xen-vtx-unstable

annotate xen/arch/x86/x86_32/entry.S @ 6774:4d899a738d59

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:05:49 2005 +0000 (2005-09-13)
parents f752e0c873a6 dd668f7527cb
children e7c7196fa329 8ca0f98ba8e2
rev   line source
kaf24@1672 1 /*
kaf24@1672 2 * Hypercall and fault low-level handling routines.
kaf24@1672 3 *
kaf24@1672 4 * Copyright (c) 2002-2004, K A Fraser
kaf24@1672 5 * Copyright (c) 1991, 1992 Linus Torvalds
kaf24@3089 6 *
kaf24@3089 7 * Calling back to a guest OS:
kaf24@3089 8 * ===========================
kaf24@3089 9 *
kaf24@1672 10 * First, we require that all callbacks (either via a supplied
kaf24@1672 11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
kaf24@1672 12 * in the shared-info-structure) are to ring 1. This just makes life easier,
kaf24@1672 13 * in that it means we don't have to do messy GDT/LDT lookups to find
kaf24@1672 14 * out which the privilege-level of the return code-selector. That code
kaf24@1672 15 * would just be a hassle to write, and would need to account for running
kaf24@1672 16 * off the end of the GDT/LDT, for example. For all callbacks we check
kaf24@3089 17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
kaf24@3089 18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
kaf24@3089 19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
kaf24@3089 20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
kaf24@3089 21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
kaf24@1672 22 * likely to fault, and we may end up killing the domain (no harm can
kaf24@1672 23 * come to Xen, though).
kaf24@1672 24 *
kaf24@1672 25 * When doing a callback, we check if the return CS is in ring 0. If so,
kaf24@1672 26 * callback is delayed until next return to ring != 0.
kaf24@1672 27 * If return CS is in ring 1, then we create a callback frame
kaf24@1672 28 * starting at return SS/ESP. The base of the frame does an intra-privilege
kaf24@1672 29 * interrupt-return.
kaf24@1672 30 * If return CS is in ring > 1, we create a callback frame starting
kaf24@1672 31 * at SS/ESP taken from appropriate section of the current TSS. The base
kaf24@1672 32 * of the frame does an inter-privilege interrupt-return.
kaf24@1672 33 *
kaf24@1672 34 * Note that the "failsafe callback" uses a special stackframe:
kaf24@1672 35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
kaf24@1672 36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
kaf24@1672 37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
kaf24@1672 38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
kaf24@1672 39 * saved/restored in guest OS. Furthermore, if we load them we may cause
kaf24@1672 40 * a fault if they are invalid, which is a hassle to deal with. We avoid
kaf24@1672 41 * that problem if we don't load them :-) This property allows us to use
kaf24@1672 42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
kaf24@1672 43 * on return to ring != 0, we can simply package it up as a return via
kaf24@1672 44 * the failsafe callback, and let the guest OS sort it out (perhaps by
kaf24@1672 45 * killing an application process). Note that we also do this for any
kaf24@1672 46 * faulting IRET -- just let the guest OS handle it via the event
kaf24@1672 47 * callback.
kaf24@1672 48 *
kaf24@1672 49 * We terminate a domain in the following cases:
kaf24@1672 50 * - creating a callback stack frame (due to bad ring-1 stack).
kaf24@1672 51 * - faulting IRET on entry to failsafe callback handler.
kaf24@1672 52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
kaf24@1672 53 * handler in good order (absolutely no faults allowed!).
kaf24@1672 54 */
kaf24@1672 55
kaf24@1672 56 #include <xen/config.h>
kaf24@1672 57 #include <xen/errno.h>
kaf24@2047 58 #include <xen/softirq.h>
kaf24@3276 59 #include <asm/asm_defns.h>
kaf24@3595 60 #include <asm/apicdef.h>
kaf24@3754 61 #include <asm/page.h>
kaf24@2789 62 #include <public/xen.h>
kaf24@1672 63
kaf24@6452 64 #define GET_GUEST_REGS(reg) \
kaf24@6452 65 movl $~(STACK_SIZE-1),reg; \
kaf24@6452 66 andl %esp,reg; \
kaf24@6452 67 orl $(STACK_SIZE-CPUINFO_sizeof),reg;
kaf24@6452 68
kaf24@3754 69 #define GET_CURRENT(reg) \
kaf24@3754 70 movl $STACK_SIZE-4, reg; \
kaf24@3754 71 orl %esp, reg; \
kaf24@3754 72 andl $~3,reg; \
kaf24@1672 73 movl (reg),reg;
kaf24@1672 74
iap10@3290 75 #ifdef CONFIG_VMX
iap10@3290 76 /*
iap10@3290 77 * At VMExit time the processor saves the guest selectors, esp, eip,
iap10@3290 78 * and eflags. Therefore we don't save them, but simply decrement
iap10@3290 79 * the kernel stack pointer to make it consistent with the stack frame
iap10@3290 80 * at usual interruption time. The eflags of the host is not saved by VMX,
iap10@3290 81 * and we set it to the fixed value.
iap10@3290 82 *
iap10@3290 83 * We also need the room, especially because orig_eax field is used
kaf24@4683 84 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
iap10@3388 85 * (10) u32 gs;
iap10@3388 86 * (9) u32 fs;
iap10@3388 87 * (8) u32 ds;
iap10@3388 88 * (7) u32 es;
kaf24@3761 89 * <- get_stack_bottom() (= HOST_ESP)
iap10@3388 90 * (6) u32 ss;
iap10@3388 91 * (5) u32 esp;
iap10@3388 92 * (4) u32 eflags;
iap10@3388 93 * (3) u32 cs;
iap10@3290 94 * (2) u32 eip;
iap10@3388 95 * (2/1) u16 entry_vector;
iap10@3388 96 * (1/1) u16 error_code;
kaf24@3761 97 * However, get_stack_bottom() actually returns 20 bytes before the real
kaf24@3761 98 * bottom of the stack to allow space for:
iap10@3388 99 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
iap10@3290 100 */
iap10@3290 101 #define VMX_MONITOR_EFLAGS 0x202 /* IF on */
iap10@3388 102 #define NR_SKIPPED_REGS 6 /* See the above explanation */
iap10@3290 103 #define VMX_SAVE_ALL_NOSEGREGS \
iap10@3290 104 pushl $VMX_MONITOR_EFLAGS; \
iap10@3290 105 popf; \
iap10@3290 106 subl $(NR_SKIPPED_REGS*4), %esp; \
kaf24@4683 107 movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \
iap10@3290 108 pushl %eax; \
iap10@3290 109 pushl %ebp; \
iap10@3290 110 pushl %edi; \
iap10@3290 111 pushl %esi; \
iap10@3290 112 pushl %edx; \
iap10@3290 113 pushl %ecx; \
iap10@3290 114 pushl %ebx;
iap10@3290 115
kaf24@6113 116 #define VMX_RESTORE_ALL_NOSEGREGS \
kaf24@6113 117 popl %ebx; \
kaf24@6113 118 popl %ecx; \
kaf24@6113 119 popl %edx; \
kaf24@6113 120 popl %esi; \
kaf24@6113 121 popl %edi; \
kaf24@6113 122 popl %ebp; \
kaf24@6113 123 popl %eax; \
kaf24@6113 124 addl $(NR_SKIPPED_REGS*4), %esp
kaf24@6113 125
iap10@3290 126 ENTRY(vmx_asm_vmexit_handler)
iap10@3290 127 /* selectors are restored/saved by VMX */
iap10@3290 128 VMX_SAVE_ALL_NOSEGREGS
adsharma@6535 129 #ifdef TRACE_BUFFER
adsharma@6535 130 call trace_vmexit
adsharma@6535 131 #endif
kaf24@4700 132 call vmx_vmexit_handler
iap10@3290 133 jmp vmx_asm_do_resume
iap10@3290 134
kaf24@6113 135 .macro vmx_asm_common launch initialized
kaf24@6113 136 1:
kaf24@6113 137 /* vmx_test_all_events */
kaf24@6113 138 .if \initialized
iap10@3290 139 GET_CURRENT(%ebx)
kaf24@4138 140 /*test_all_events:*/
iap10@3290 141 xorl %ecx,%ecx
iap10@3290 142 notl %ecx
iap10@3290 143 cli # tests must not race interrupts
iap10@3290 144 /*test_softirqs:*/
kaf24@5289 145 movl VCPU_processor(%ebx),%eax
kaf24@4593 146 shl $IRQSTAT_shift,%eax
kaf24@4700 147 test %ecx,irq_stat(%eax,1)
kaf24@6113 148 jnz 2f
iap10@3290 149
kaf24@6113 150 /* vmx_restore_all_guest */
kaf24@6326 151 call vmx_intr_assist
kaf24@4700 152 call load_cr2
adsharma@6535 153 #ifdef TRACE_BUFFER
adsharma@6535 154 call trace_vmentry
adsharma@6535 155 #endif
kaf24@6113 156 .endif
kaf24@6113 157 VMX_RESTORE_ALL_NOSEGREGS
iap10@3290 158 /*
iap10@3290 159 * Check if we are going back to VMX-based VM
iap10@3290 160 * By this time, all the setups in the VMCS must be complete.
iap10@3290 161 */
kaf24@6113 162 .if \launch
kaf24@6113 163 /* VMLUANCH */
kaf24@6113 164 .byte 0x0f,0x01,0xc2
kaf24@6113 165 pushf
kaf24@6113 166 call vm_launch_fail
kaf24@6113 167 .else
iap10@3290 168 /* VMRESUME */
iap10@3290 169 .byte 0x0f,0x01,0xc3
iap10@3290 170 pushf
kaf24@4700 171 call vm_resume_fail
kaf24@6113 172 .endif
iap10@3290 173 /* Should never reach here */
iap10@3290 174 hlt
iap10@3290 175
iap10@3290 176 ALIGN
kaf24@6113 177 .if \initialized
kaf24@6113 178 2:
kaf24@6113 179 /* vmx_process_softirqs */
iap10@3290 180 sti
kaf24@4700 181 call do_softirq
kaf24@6113 182 jmp 1b
kaf24@6113 183 ALIGN
kaf24@6113 184 .endif
kaf24@6113 185 .endm
kaf24@6113 186
kaf24@6113 187 ENTRY(vmx_asm_do_launch)
kaf24@6113 188 vmx_asm_common 1 0
kaf24@6113 189
kaf24@6113 190 ENTRY(vmx_asm_do_resume)
kaf24@6113 191 vmx_asm_common 0 1
kaf24@6113 192
kaf24@6113 193 ENTRY(vmx_asm_do_relaunch)
kaf24@6113 194 vmx_asm_common 1 1
kaf24@6113 195
iap10@3290 196 #endif
iap10@3290 197
kaf24@1672 198 ALIGN
kaf24@1672 199 restore_all_guest:
kaf24@4683 200 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
kaf24@3169 201 jnz restore_all_vm86
kaf24@5431 202 FLT1: mov UREGS_ds(%esp),%ds
kaf24@5431 203 FLT2: mov UREGS_es(%esp),%es
kaf24@5431 204 FLT3: mov UREGS_fs(%esp),%fs
kaf24@5431 205 FLT4: mov UREGS_gs(%esp),%gs
kaf24@3169 206 restore_all_vm86:
kaf24@1672 207 popl %ebx
kaf24@1672 208 popl %ecx
kaf24@1672 209 popl %edx
kaf24@1672 210 popl %esi
kaf24@1672 211 popl %edi
kaf24@1672 212 popl %ebp
kaf24@1672 213 popl %eax
kaf24@2916 214 addl $4,%esp
kaf24@3089 215 FLT5: iret
kaf24@3089 216 .section .fixup,"ax"
kaf24@3089 217 FIX5: subl $28,%esp
kaf24@3089 218 pushl 28(%esp) # error_code/entry_vector
kaf24@4683 219 movl %eax,UREGS_eax+4(%esp)
kaf24@4683 220 movl %ebp,UREGS_ebp+4(%esp)
kaf24@4683 221 movl %edi,UREGS_edi+4(%esp)
kaf24@4683 222 movl %esi,UREGS_esi+4(%esp)
kaf24@4683 223 movl %edx,UREGS_edx+4(%esp)
kaf24@4683 224 movl %ecx,UREGS_ecx+4(%esp)
kaf24@4683 225 movl %ebx,UREGS_ebx+4(%esp)
kaf24@3089 226 FIX1: SET_XEN_SEGMENTS(a)
kaf24@3089 227 movl %eax,%fs
kaf24@3089 228 movl %eax,%gs
kaf24@3089 229 sti
kaf24@3089 230 popl %esi
kaf24@3089 231 pushfl # EFLAGS
kaf24@3089 232 movl $__HYPERVISOR_CS,%eax
kaf24@3089 233 pushl %eax # CS
kaf24@3089 234 movl $DBLFLT1,%eax
kaf24@3089 235 pushl %eax # EIP
kaf24@3089 236 pushl %esi # error_code/entry_vector
kaf24@3089 237 jmp error_code
kaf24@3089 238 DBLFLT1:GET_CURRENT(%ebx)
kaf24@3089 239 jmp test_all_events
kaf24@4138 240 failsafe_callback:
kaf24@4138 241 GET_CURRENT(%ebx)
kaf24@5289 242 leal VCPU_trap_bounce(%ebx),%edx
kaf24@5289 243 movl VCPU_failsafe_addr(%ebx),%eax
kaf24@4350 244 movl %eax,TRAPBOUNCE_eip(%edx)
kaf24@5289 245 movl VCPU_failsafe_sel(%ebx),%eax
kaf24@4138 246 movw %ax,TRAPBOUNCE_cs(%edx)
kaf24@4138 247 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
kaf24@4138 248 call create_bounce_frame
kaf24@4138 249 xorl %eax,%eax
kaf24@4683 250 movl %eax,UREGS_ds(%esp)
kaf24@4683 251 movl %eax,UREGS_es(%esp)
kaf24@4683 252 movl %eax,UREGS_fs(%esp)
kaf24@4683 253 movl %eax,UREGS_gs(%esp)
kaf24@4138 254 jmp test_all_events
kaf24@3089 255 .previous
kaf24@3089 256 .section __pre_ex_table,"a"
kaf24@3089 257 .long FLT1,FIX1
kaf24@3089 258 .long FLT2,FIX1
kaf24@3089 259 .long FLT3,FIX1
kaf24@3089 260 .long FLT4,FIX1
kaf24@3089 261 .long FLT5,FIX5
kaf24@3089 262 .previous
kaf24@3089 263 .section __ex_table,"a"
kaf24@4138 264 .long DBLFLT1,failsafe_callback
kaf24@3089 265 .previous
kaf24@1672 266
kaf24@1672 267 ALIGN
kaf24@1672 268 restore_all_xen:
kaf24@1672 269 popl %ebx
kaf24@1672 270 popl %ecx
kaf24@1672 271 popl %edx
kaf24@1672 272 popl %esi
kaf24@1672 273 popl %edi
kaf24@1672 274 popl %ebp
kaf24@1672 275 popl %eax
kaf24@1672 276 addl $4,%esp
kaf24@1672 277 iret
kaf24@1672 278
kaf24@1672 279 ALIGN
kaf24@1672 280 ENTRY(hypercall)
kaf24@3089 281 subl $4,%esp
kaf24@2917 282 SAVE_ALL(b)
kaf24@2916 283 sti
kaf24@2916 284 GET_CURRENT(%ebx)
kaf24@3958 285 andl $(NR_hypercalls-1),%eax
kaf24@3958 286 PERFC_INCR(PERFC_hypercalls, %eax)
kaf24@6452 287 #ifndef NDEBUG
kaf24@6452 288 /* Deliberately corrupt parameter regs not used by this hypercall. */
kaf24@6452 289 pushl %eax
kaf24@6452 290 pushl UREGS_eip+4(%esp)
kaf24@6452 291 pushl 28(%esp) # EBP
kaf24@6452 292 pushl 28(%esp) # EDI
kaf24@6452 293 pushl 28(%esp) # ESI
kaf24@6452 294 pushl 28(%esp) # EDX
kaf24@6452 295 pushl 28(%esp) # ECX
kaf24@6452 296 pushl 28(%esp) # EBX
kaf24@6452 297 movzb hypercall_args_table(,%eax,1),%ecx
kaf24@6452 298 leal (%esp,%ecx,4),%edi
kaf24@6452 299 subl $6,%ecx
kaf24@6452 300 negl %ecx
kaf24@6452 301 movl %eax,%esi
kaf24@6452 302 movl $0xDEADBEEF,%eax
kaf24@6452 303 rep stosl
kaf24@6452 304 movl %esi,%eax
kaf24@6452 305 #endif
kaf24@4700 306 call *hypercall_table(,%eax,4)
kaf24@6452 307 #ifndef NDEBUG
kaf24@6452 308 /* Deliberately corrupt parameter regs used by this hypercall. */
kaf24@6452 309 addl $24,%esp # Shadow parameters
kaf24@6452 310 popl %ecx # Shadow EIP
kaf24@6452 311 cmpl %ecx,UREGS_eip(%esp)
kaf24@6452 312 popl %ecx # Shadow hypercall index
kaf24@6452 313 jne skip_clobber # If EIP has changed then don't clobber
kaf24@6452 314 movzb hypercall_args_table(,%ecx,1),%ecx
kaf24@6452 315 movl %esp,%edi
kaf24@6452 316 movl %eax,%esi
kaf24@6452 317 movl $0xDEADBEEF,%eax
kaf24@6452 318 rep stosl
kaf24@6452 319 movl %esi,%eax
kaf24@6452 320 skip_clobber:
kaf24@6452 321 #endif
kaf24@4683 322 movl %eax,UREGS_eax(%esp) # save the return value
kaf24@1672 323
kaf24@1672 324 test_all_events:
kaf24@1672 325 xorl %ecx,%ecx
kaf24@1672 326 notl %ecx
kaf24@1672 327 cli # tests must not race interrupts
kaf24@1672 328 /*test_softirqs:*/
kaf24@5289 329 movl VCPU_processor(%ebx),%eax
kaf24@4593 330 shl $IRQSTAT_shift,%eax
kaf24@4700 331 test %ecx,irq_stat(%eax,1)
kaf24@1672 332 jnz process_softirqs
kaf24@1672 333 /*test_guest_events:*/
kaf24@5289 334 movl VCPU_vcpu_info(%ebx),%eax
cl349@2921 335 testb $0xFF,VCPUINFO_upcall_mask(%eax)
kaf24@1672 336 jnz restore_all_guest
cl349@2921 337 testb $0xFF,VCPUINFO_upcall_pending(%eax)
kaf24@1672 338 jz restore_all_guest
kaf24@1672 339 /*process_guest_events:*/
kaf24@4138 340 sti
kaf24@5289 341 leal VCPU_trap_bounce(%ebx),%edx
kaf24@5289 342 movl VCPU_event_addr(%ebx),%eax
kaf24@3043 343 movl %eax,TRAPBOUNCE_eip(%edx)
kaf24@5289 344 movl VCPU_event_sel(%ebx),%eax
kaf24@3043 345 movw %ax,TRAPBOUNCE_cs(%edx)
kaf24@3089 346 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
kaf24@1672 347 call create_bounce_frame
kaf24@4138 348 jmp test_all_events
kaf24@1672 349
kaf24@1672 350 ALIGN
kaf24@1672 351 process_softirqs:
kaf24@1672 352 sti
kaf24@4700 353 call do_softirq
kaf24@1672 354 jmp test_all_events
kaf24@1672 355
kaf24@2916 356 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
kaf24@2916 357 /* {EIP, CS, EFLAGS, [ESP, SS]} */
kaf24@5289 358 /* %edx == trap_bounce, %ebx == struct vcpu */
kaf24@4683 359 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
kaf24@3169 360 create_bounce_frame:
kaf24@4683 361 movl UREGS_eflags+4(%esp),%ecx
kaf24@4683 362 movb UREGS_cs+4(%esp),%cl
kaf24@3169 363 testl $(2|X86_EFLAGS_VM),%ecx
kaf24@3169 364 jz ring1 /* jump if returning to an existing ring-1 activation */
kaf24@5289 365 movl VCPU_kernel_sp(%ebx),%esi
kaf24@5431 366 FLT6: mov VCPU_kernel_ss(%ebx),%gs
kaf24@4683 367 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
kaf24@3169 368 jz nvm86_1
kaf24@4138 369 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
kaf24@4683 370 movl UREGS_es+4(%esp),%eax
kaf24@4138 371 FLT7: movl %eax,%gs:(%esi)
kaf24@4683 372 movl UREGS_ds+4(%esp),%eax
kaf24@4138 373 FLT8: movl %eax,%gs:4(%esi)
kaf24@4683 374 movl UREGS_fs+4(%esp),%eax
kaf24@4138 375 FLT9: movl %eax,%gs:8(%esi)
kaf24@4683 376 movl UREGS_gs+4(%esp),%eax
kaf24@4138 377 FLT10: movl %eax,%gs:12(%esi)
kaf24@3169 378 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
kaf24@4683 379 movl UREGS_esp+4(%esp),%eax
kaf24@4138 380 FLT11: movl %eax,%gs:(%esi)
kaf24@4683 381 movl UREGS_ss+4(%esp),%eax
kaf24@4138 382 FLT12: movl %eax,%gs:4(%esi)
kaf24@3169 383 jmp 1f
kaf24@3169 384 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
kaf24@4683 385 movl UREGS_esp+4(%esp),%esi
kaf24@5431 386 FLT13: mov UREGS_ss+4(%esp),%gs
kaf24@3169 387 1: /* Construct a stack frame: EFLAGS, CS/EIP */
kaf24@4949 388 movb TRAPBOUNCE_flags(%edx),%cl
kaf24@1672 389 subl $12,%esi
kaf24@4683 390 movl UREGS_eip+4(%esp),%eax
kaf24@4138 391 FLT14: movl %eax,%gs:(%esi)
kaf24@5289 392 movl VCPU_vcpu_info(%ebx),%eax
kaf24@4949 393 pushl VCPUINFO_upcall_mask(%eax)
kaf24@4949 394 testb $TBF_INTERRUPT,%cl
kaf24@6017 395 setnz %ch # TBF_INTERRUPT -> set upcall mask
kaf24@6017 396 orb %ch,VCPUINFO_upcall_mask(%eax)
kaf24@4949 397 popl %eax
kaf24@4949 398 shll $16,%eax # Bits 16-23: saved_upcall_mask
kaf24@4949 399 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
kaf24@4138 400 FLT15: movl %eax,%gs:4(%esi)
kaf24@4683 401 movl UREGS_eflags+4(%esp),%eax
kaf24@4138 402 FLT16: movl %eax,%gs:8(%esi)
kaf24@3089 403 test $TBF_EXCEPTION_ERRCODE,%cl
kaf24@3089 404 jz 1f
kaf24@3089 405 subl $4,%esi # push error_code onto guest frame
kaf24@3089 406 movl TRAPBOUNCE_error_code(%edx),%eax
kaf24@4138 407 FLT17: movl %eax,%gs:(%esi)
kaf24@3089 408 testb $TBF_EXCEPTION_CR2,%cl
kaf24@3089 409 jz 2f
kaf24@3089 410 subl $4,%esi # push %cr2 onto guest frame
kaf24@3089 411 movl TRAPBOUNCE_cr2(%edx),%eax
kaf24@4138 412 FLT18: movl %eax,%gs:(%esi)
kaf24@3089 413 1: testb $TBF_FAILSAFE,%cl
kaf24@3089 414 jz 2f
kaf24@3089 415 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
kaf24@4683 416 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
kaf24@3169 417 jz nvm86_2
kaf24@3169 418 xorl %eax,%eax # VM86: we write zero selector values
kaf24@4138 419 FLT19: movl %eax,%gs:(%esi)
kaf24@4138 420 FLT20: movl %eax,%gs:4(%esi)
kaf24@4138 421 FLT21: movl %eax,%gs:8(%esi)
kaf24@4138 422 FLT22: movl %eax,%gs:12(%esi)
kaf24@3169 423 jmp 2f
kaf24@4683 424 nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
kaf24@4138 425 FLT23: movl %eax,%gs:(%esi)
kaf24@4683 426 movl UREGS_es+4(%esp),%eax
kaf24@4138 427 FLT24: movl %eax,%gs:4(%esi)
kaf24@4683 428 movl UREGS_fs+4(%esp),%eax
kaf24@4138 429 FLT25: movl %eax,%gs:8(%esi)
kaf24@4683 430 movl UREGS_gs+4(%esp),%eax
kaf24@4138 431 FLT26: movl %eax,%gs:12(%esi)
kaf24@4683 432 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
kaf24@3169 433 jz nvm86_3
kaf24@3169 434 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
kaf24@4683 435 movl %eax,UREGS_ds+4(%esp)
kaf24@4683 436 movl %eax,UREGS_es+4(%esp)
kaf24@4683 437 movl %eax,UREGS_fs+4(%esp)
kaf24@4683 438 movl %eax,UREGS_gs+4(%esp)
kaf24@3169 439 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
kaf24@1672 440 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
kaf24@4683 441 andl $0xfffcbeff,UREGS_eflags+4(%esp)
kaf24@5431 442 mov %gs,UREGS_ss+4(%esp)
kaf24@4683 443 movl %esi,UREGS_esp+4(%esp)
kaf24@3043 444 movzwl TRAPBOUNCE_cs(%edx),%eax
kaf24@4683 445 movl %eax,UREGS_cs+4(%esp)
kaf24@3043 446 movl TRAPBOUNCE_eip(%edx),%eax
kaf24@5431 447 test %eax,%eax
kaf24@5431 448 jz domain_crash_synchronous
kaf24@4683 449 movl %eax,UREGS_eip+4(%esp)
kaf24@4138 450 movb $0,TRAPBOUNCE_flags(%edx)
kaf24@1672 451 ret
kaf24@1672 452 .section __ex_table,"a"
kaf24@4325 453 .long FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
kaf24@4325 454 .long FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
kaf24@4325 455 .long FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
kaf24@4325 456 .long FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
kaf24@4325 457 .long FLT14,domain_crash_synchronous , FLT15,domain_crash_synchronous
kaf24@4325 458 .long FLT16,domain_crash_synchronous , FLT17,domain_crash_synchronous
kaf24@4325 459 .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
kaf24@4325 460 .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
kaf24@4325 461 .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
kaf24@4325 462 .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
kaf24@4325 463 .long FLT26,domain_crash_synchronous
kaf24@1672 464 .previous
kaf24@1672 465
kaf24@1672 466 ALIGN
kaf24@3089 467 process_guest_exception_and_events:
kaf24@5289 468 leal VCPU_trap_bounce(%ebx),%edx
kaf24@3089 469 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
kaf24@1672 470 jz test_all_events
kaf24@3089 471 call create_bounce_frame
kaf24@1672 472 jmp test_all_events
kaf24@1672 473
kaf24@1672 474 ALIGN
kaf24@1672 475 ENTRY(ret_from_intr)
kaf24@3169 476 GET_CURRENT(%ebx)
kaf24@4683 477 movl UREGS_eflags(%esp),%eax
kaf24@4683 478 movb UREGS_cs(%esp),%al
kaf24@3169 479 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3169 480 jnz test_all_events
kaf24@3169 481 jmp restore_all_xen
kaf24@1672 482
kaf24@1672 483 ENTRY(divide_error)
kaf24@3089 484 pushl $TRAP_divide_error<<16
kaf24@1672 485 ALIGN
kaf24@1672 486 error_code:
kaf24@3089 487 SAVE_ALL_NOSEGREGS(a)
kaf24@3089 488 SET_XEN_SEGMENTS(a)
kaf24@4683 489 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
kaf24@3089 490 jz exception_with_ints_disabled
kaf24@4103 491 sti # re-enable interrupts
kaf24@3089 492 xorl %eax,%eax
kaf24@4683 493 movw UREGS_entry_vector(%esp),%ax
kaf24@3089 494 movl %esp,%edx
kaf24@4683 495 pushl %edx # push the cpu_user_regs pointer
kaf24@1672 496 GET_CURRENT(%ebx)
kaf24@3958 497 PERFC_INCR(PERFC_exceptions, %eax)
kaf24@4700 498 call *exception_table(,%eax,4)
kaf24@3089 499 addl $4,%esp
kaf24@4683 500 movl UREGS_eflags(%esp),%eax
kaf24@4683 501 movb UREGS_cs(%esp),%al
kaf24@3169 502 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3089 503 jz restore_all_xen
kaf24@1672 504 jmp process_guest_exception_and_events
kaf24@1672 505
kaf24@3089 506 exception_with_ints_disabled:
kaf24@4683 507 movl UREGS_eflags(%esp),%eax
kaf24@4683 508 movb UREGS_cs(%esp),%al
kaf24@3169 509 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
kaf24@4103 510 jnz FATAL_exception_with_ints_disabled
cl349@3794 511 pushl %esp
kaf24@3089 512 call search_pre_exception_table
kaf24@3089 513 addl $4,%esp
kaf24@3089 514 testl %eax,%eax # no fixup code for faulting EIP?
kaf24@3089 515 jz FATAL_exception_with_ints_disabled
kaf24@4683 516 movl %eax,UREGS_eip(%esp)
kaf24@3089 517 movl %esp,%esi
kaf24@3089 518 subl $4,%esp
kaf24@3089 519 movl %esp,%edi
kaf24@4683 520 movl $UREGS_kernel_sizeof/4,%ecx
kaf24@3089 521 rep; movsl # make room for error_code/entry_vector
kaf24@4683 522 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
kaf24@4683 523 movl %eax,UREGS_kernel_sizeof(%esp)
kaf24@3089 524 jmp restore_all_xen # return to fixup code
kaf24@3089 525
kaf24@3089 526 FATAL_exception_with_ints_disabled:
kaf24@3089 527 xorl %esi,%esi
kaf24@4683 528 movw UREGS_entry_vector(%esp),%si
kaf24@3089 529 movl %esp,%edx
kaf24@4683 530 pushl %edx # push the cpu_user_regs pointer
kaf24@3089 531 pushl %esi # push the trapnr (entry vector)
kaf24@4700 532 call fatal_trap
kaf24@3089 533 ud2
kaf24@3089 534
kaf24@1672 535 ENTRY(coprocessor_error)
kaf24@3089 536 pushl $TRAP_copro_error<<16
kaf24@1672 537 jmp error_code
kaf24@1672 538
kaf24@1672 539 ENTRY(simd_coprocessor_error)
kaf24@3089 540 pushl $TRAP_simd_error<<16
kaf24@1672 541 jmp error_code
kaf24@1672 542
kaf24@1672 543 ENTRY(device_not_available)
kaf24@3089 544 pushl $TRAP_no_device<<16
kaf24@1672 545 jmp error_code
kaf24@1672 546
kaf24@1672 547 ENTRY(debug)
kaf24@3089 548 pushl $TRAP_debug<<16
kaf24@1672 549 jmp error_code
kaf24@1672 550
kaf24@1672 551 ENTRY(int3)
kaf24@3089 552 pushl $TRAP_int3<<16
kaf24@1672 553 jmp error_code
kaf24@1672 554
kaf24@1672 555 ENTRY(overflow)
kaf24@3089 556 pushl $TRAP_overflow<<16
kaf24@1672 557 jmp error_code
kaf24@1672 558
kaf24@1672 559 ENTRY(bounds)
kaf24@3089 560 pushl $TRAP_bounds<<16
kaf24@1672 561 jmp error_code
kaf24@1672 562
kaf24@1672 563 ENTRY(invalid_op)
kaf24@3089 564 pushl $TRAP_invalid_op<<16
kaf24@1672 565 jmp error_code
kaf24@1672 566
kaf24@1672 567 ENTRY(coprocessor_segment_overrun)
kaf24@3089 568 pushl $TRAP_copro_seg<<16
kaf24@1672 569 jmp error_code
kaf24@1672 570
kaf24@1672 571 ENTRY(invalid_TSS)
kaf24@3089 572 movw $TRAP_invalid_tss,2(%esp)
kaf24@1672 573 jmp error_code
kaf24@1672 574
kaf24@1672 575 ENTRY(segment_not_present)
kaf24@3089 576 movw $TRAP_no_segment,2(%esp)
kaf24@1672 577 jmp error_code
kaf24@1672 578
kaf24@1672 579 ENTRY(stack_segment)
kaf24@3089 580 movw $TRAP_stack_error,2(%esp)
kaf24@1672 581 jmp error_code
kaf24@1672 582
kaf24@1672 583 ENTRY(general_protection)
kaf24@3089 584 movw $TRAP_gp_fault,2(%esp)
kaf24@1672 585 jmp error_code
kaf24@1672 586
kaf24@1672 587 ENTRY(alignment_check)
kaf24@3089 588 movw $TRAP_alignment_check,2(%esp)
kaf24@1672 589 jmp error_code
kaf24@1672 590
kaf24@1672 591 ENTRY(page_fault)
kaf24@3089 592 movw $TRAP_page_fault,2(%esp)
kaf24@1672 593 jmp error_code
kaf24@1672 594
kaf24@1672 595 ENTRY(machine_check)
kaf24@3089 596 pushl $TRAP_machine_check<<16
kaf24@1672 597 jmp error_code
kaf24@1672 598
kaf24@1672 599 ENTRY(spurious_interrupt_bug)
kaf24@3089 600 pushl $TRAP_spurious_int<<16
kaf24@1672 601 jmp error_code
kaf24@1672 602
kaf24@1672 603 ENTRY(nmi)
kaf24@1672 604 # Save state but do not trash the segment registers!
kaf24@1672 605 # We may otherwise be unable to reload them or copy them to ring 1.
kaf24@1672 606 pushl %eax
kaf24@2917 607 SAVE_ALL_NOSEGREGS(a)
kaf24@1672 608
kaf24@2047 609 # Check for hardware problems.
kaf24@1672 610 inb $0x61,%al
kaf24@1672 611 testb $0x80,%al
kaf24@2042 612 jne nmi_parity_err
kaf24@1672 613 testb $0x40,%al
kaf24@1672 614 jne nmi_io_err
kaf24@1672 615 movl %eax,%ebx
kaf24@1672 616
kaf24@1672 617 # Okay, its almost a normal NMI tick. We can only process it if:
kaf24@1672 618 # A. We are the outermost Xen activation (in which case we have
kaf24@1672 619 # the selectors safely saved on our stack)
kaf24@3695 620 # B. DS and ES contain sane Xen values.
kaf24@1672 621 # In all other cases we bail without touching DS-GS, as we have
kaf24@1672 622 # interrupted an enclosing Xen activation in tricky prologue or
kaf24@1672 623 # epilogue code.
kaf24@4683 624 movl UREGS_eflags(%esp),%eax
kaf24@4683 625 movb UREGS_cs(%esp),%al
kaf24@3169 626 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3089 627 jnz do_watchdog_tick
kaf24@3335 628 movl %ds,%eax
kaf24@1672 629 cmpw $(__HYPERVISOR_DS),%ax
kaf24@3595 630 jne defer_nmi
kaf24@3335 631 movl %es,%eax
kaf24@1672 632 cmpw $(__HYPERVISOR_DS),%ax
kaf24@3595 633 jne defer_nmi
kaf24@1672 634
kaf24@1672 635 do_watchdog_tick:
kaf24@1672 636 movl $(__HYPERVISOR_DS),%edx
kaf24@1672 637 movl %edx,%ds
kaf24@1672 638 movl %edx,%es
kaf24@1672 639 movl %esp,%edx
kaf24@4505 640 pushl %ebx # reason
kaf24@4505 641 pushl %edx # regs
kaf24@4700 642 call do_nmi
kaf24@4505 643 addl $8,%esp
kaf24@4505 644 jmp ret_from_intr
kaf24@1672 645
kaf24@3595 646 defer_nmi:
kaf24@3595 647 movl $FIXMAP_apic_base,%eax
kaf24@3595 648 # apic_wait_icr_idle()
kaf24@3636 649 1: movl %ss:APIC_ICR(%eax),%ebx
kaf24@3595 650 testl $APIC_ICR_BUSY,%ebx
kaf24@3595 651 jnz 1b
kaf24@3595 652 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
kaf24@3595 653 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
kaf24@3636 654 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
kaf24@3595 655 jmp restore_all_xen
kaf24@3595 656
kaf24@2047 657 nmi_parity_err:
kaf24@2047 658 # Clear and disable the parity-error line
kaf24@2047 659 andb $0xf,%al
kaf24@2047 660 orb $0x4,%al
kaf24@2047 661 outb %al,$0x61
kaf24@4700 662 cmpb $'i',%ss:opt_nmi # nmi=ignore
kaf24@3695 663 je nmi_out
kaf24@4700 664 bts $0,%ss:nmi_softirq_reason
kaf24@4700 665 bts $NMI_SOFTIRQ,%ss:irq_stat
kaf24@4700 666 cmpb $'d',%ss:opt_nmi # nmi=dom0
kaf24@3695 667 je nmi_out
kaf24@2047 668 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1672 669 movl %edx,%ds
kaf24@1672 670 movl %edx,%es
kaf24@2041 671 movl %esp,%edx
kaf24@2041 672 push %edx
kaf24@4700 673 call mem_parity_error
kaf24@2047 674 addl $4,%esp
kaf24@4683 675 nmi_out:movl %ss:UREGS_eflags(%esp),%eax
kaf24@4683 676 movb %ss:UREGS_cs(%esp),%al
kaf24@3695 677 testl $(3|X86_EFLAGS_VM),%eax
kaf24@4505 678 jz restore_all_xen
kaf24@3695 679 movl $(__HYPERVISOR_DS),%edx
kaf24@3695 680 movl %edx,%ds
kaf24@3695 681 movl %edx,%es
kaf24@3695 682 GET_CURRENT(%ebx)
kaf24@3695 683 jmp test_all_events
kaf24@2047 684
kaf24@1672 685 nmi_io_err:
kaf24@2047 686 # Clear and disable the I/O-error line
kaf24@2047 687 andb $0xf,%al
kaf24@2047 688 orb $0x8,%al
kaf24@2047 689 outb %al,$0x61
kaf24@4700 690 cmpb $'i',%ss:opt_nmi # nmi=ignore
kaf24@3695 691 je nmi_out
kaf24@4700 692 bts $1,%ss:nmi_softirq_reason
kaf24@4700 693 bts $NMI_SOFTIRQ,%ss:irq_stat
kaf24@4700 694 cmpb $'d',%ss:opt_nmi # nmi=dom0
kaf24@3695 695 je nmi_out
kaf24@2047 696 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1672 697 movl %edx,%ds
kaf24@1672 698 movl %edx,%es
kaf24@2041 699 movl %esp,%edx
kaf24@2041 700 push %edx
kaf24@4700 701 call io_check_error
kaf24@2047 702 addl $4,%esp
kaf24@3695 703 jmp nmi_out
kaf24@3169 704
kaf24@3169 705
kaf24@3169 706 ENTRY(setup_vm86_frame)
kaf24@3169 707 # Copies the entire stack frame forwards by 16 bytes.
kaf24@3169 708 .macro copy_vm86_words count=18
kaf24@3169 709 .if \count
kaf24@3169 710 pushl ((\count-1)*4)(%esp)
kaf24@3169 711 popl ((\count-1)*4)+16(%esp)
kaf24@3169 712 copy_vm86_words "(\count-1)"
kaf24@3169 713 .endif
kaf24@3169 714 .endm
kaf24@3169 715 copy_vm86_words
kaf24@3169 716 addl $16,%esp
kaf24@3169 717 ret
kaf24@3169 718
kaf24@4696 719 do_arch_sched_op:
kaf24@4696 720 # Ensure we return success even if we return via schedule_tail()
kaf24@4696 721 xorl %eax,%eax
kaf24@6452 722 GET_GUEST_REGS(%ecx)
kaf24@6452 723 movl %eax,UREGS_eax(%ecx)
kaf24@4700 724 jmp do_sched_op
kaf24@4696 725
kaf24@3169 726 do_switch_vm86:
kaf24@6452 727 # Reset the stack pointer
kaf24@6452 728 GET_GUEST_REGS(%ecx)
kaf24@6452 729 movl %ecx,%esp
kaf24@3169 730
kaf24@3169 731 # GS:ESI == Ring-1 stack activation
kaf24@4683 732 movl UREGS_esp(%esp),%esi
kaf24@5431 733 VFLT1: mov UREGS_ss(%esp),%gs
kaf24@3169 734
kaf24@3169 735 # ES:EDI == Ring-0 stack activation
kaf24@4683 736 leal UREGS_eip(%esp),%edi
kaf24@3169 737
kaf24@3169 738 # Restore the hypercall-number-clobbered EAX on our stack frame
kaf24@3169 739 VFLT2: movl %gs:(%esi),%eax
kaf24@4683 740 movl %eax,UREGS_eax(%esp)
kaf24@3169 741 addl $4,%esi
kaf24@3169 742
kaf24@3169 743 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
kaf24@4683 744 movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
kaf24@3169 745 VFLT3: movl %gs:(%esi),%eax
kaf24@3169 746 stosl
kaf24@3169 747 addl $4,%esi
kaf24@3169 748 loop VFLT3
kaf24@3169 749
kaf24@4103 750 # Fix up EFLAGS: IOPL=0, IF=1, VM=1
kaf24@4683 751 andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
kaf24@4683 752 orl $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
kaf24@3169 753
kaf24@3169 754 jmp test_all_events
kaf24@3169 755
kaf24@3169 756 .section __ex_table,"a"
cl349@4339 757 .long VFLT1,domain_crash_synchronous
cl349@4339 758 .long VFLT2,domain_crash_synchronous
cl349@4339 759 .long VFLT3,domain_crash_synchronous
kaf24@3169 760 .previous
kaf24@3169 761
kaf24@1672 762 .data
kaf24@3089 763
kaf24@3089 764 ENTRY(exception_table)
kaf24@4700 765 .long do_divide_error
kaf24@4700 766 .long do_debug
kaf24@3089 767 .long 0 # nmi
kaf24@4700 768 .long do_int3
kaf24@4700 769 .long do_overflow
kaf24@4700 770 .long do_bounds
kaf24@4700 771 .long do_invalid_op
kaf24@4700 772 .long math_state_restore
kaf24@3089 773 .long 0 # double fault
kaf24@4700 774 .long do_coprocessor_segment_overrun
kaf24@4700 775 .long do_invalid_TSS
kaf24@4700 776 .long do_segment_not_present
kaf24@4700 777 .long do_stack_segment
kaf24@4700 778 .long do_general_protection
kaf24@4700 779 .long do_page_fault
kaf24@4700 780 .long do_spurious_interrupt_bug
kaf24@4700 781 .long do_coprocessor_error
kaf24@4700 782 .long do_alignment_check
kaf24@4700 783 .long do_machine_check
kaf24@4700 784 .long do_simd_coprocessor_error
kaf24@3089 785
kaf24@1672 786 ENTRY(hypercall_table)
kaf24@4700 787 .long do_set_trap_table /* 0 */
kaf24@4700 788 .long do_mmu_update
kaf24@4700 789 .long do_set_gdt
kaf24@4700 790 .long do_stack_switch
kaf24@4700 791 .long do_set_callbacks
kaf24@4700 792 .long do_fpu_taskswitch /* 5 */
kaf24@4700 793 .long do_arch_sched_op
kaf24@4700 794 .long do_dom0_op
kaf24@4700 795 .long do_set_debugreg
kaf24@4700 796 .long do_get_debugreg
kaf24@4700 797 .long do_update_descriptor /* 10 */
kaf24@4930 798 .long do_ni_hypercall
kaf24@6468 799 .long do_memory_op
kaf24@4700 800 .long do_multicall
kaf24@4700 801 .long do_update_va_mapping
kaf24@4700 802 .long do_set_timer_op /* 15 */
kaf24@4700 803 .long do_event_channel_op
kaf24@4700 804 .long do_xen_version
kaf24@4700 805 .long do_console_io
kaf24@4700 806 .long do_physdev_op
kaf24@4700 807 .long do_grant_table_op /* 20 */
kaf24@4700 808 .long do_vm_assist
kaf24@4700 809 .long do_update_va_mapping_otherdomain
kaf24@4700 810 .long do_switch_vm86
kaf24@4700 811 .long do_boot_vcpu
kaf24@4700 812 .long do_ni_hypercall /* 25 */
kaf24@4700 813 .long do_mmuext_op
smh22@5930 814 .long do_acm_op /* 27 */
kaf24@1672 815 .rept NR_hypercalls-((.-hypercall_table)/4)
kaf24@4700 816 .long do_ni_hypercall
kaf24@1672 817 .endr
kaf24@6452 818
kaf24@6452 819 ENTRY(hypercall_args_table)
kaf24@6452 820 .byte 1 /* do_set_trap_table */ /* 0 */
kaf24@6452 821 .byte 4 /* do_mmu_update */
kaf24@6452 822 .byte 2 /* do_set_gdt */
kaf24@6452 823 .byte 2 /* do_stack_switch */
kaf24@6452 824 .byte 4 /* do_set_callbacks */
kaf24@6452 825 .byte 1 /* do_fpu_taskswitch */ /* 5 */
kaf24@6452 826 .byte 2 /* do_arch_sched_op */
kaf24@6452 827 .byte 1 /* do_dom0_op */
kaf24@6452 828 .byte 2 /* do_set_debugreg */
kaf24@6452 829 .byte 1 /* do_get_debugreg */
kaf24@6452 830 .byte 4 /* do_update_descriptor */ /* 10 */
kaf24@6452 831 .byte 0 /* do_ni_hypercall */
kaf24@6468 832 .byte 2 /* do_memory_op */
kaf24@6452 833 .byte 2 /* do_multicall */
kaf24@6452 834 .byte 4 /* do_update_va_mapping */
kaf24@6452 835 .byte 2 /* do_set_timer_op */ /* 15 */
kaf24@6452 836 .byte 1 /* do_event_channel_op */
kaf24@6734 837 .byte 2 /* do_xen_version */
kaf24@6452 838 .byte 3 /* do_console_io */
kaf24@6452 839 .byte 1 /* do_physdev_op */
kaf24@6452 840 .byte 3 /* do_grant_table_op */ /* 20 */
kaf24@6452 841 .byte 2 /* do_vm_assist */
kaf24@6452 842 .byte 5 /* do_update_va_mapping_otherdomain */
kaf24@6452 843 .byte 0 /* do_switch_vm86 */
kaf24@6452 844 .byte 2 /* do_boot_vcpu */
kaf24@6452 845 .byte 0 /* do_ni_hypercall */ /* 25 */
kaf24@6452 846 .byte 4 /* do_mmuext_op */
kaf24@6452 847 .byte 1 /* do_acm_op */
kaf24@6452 848 .rept NR_hypercalls-(.-hypercall_args_table)
kaf24@6452 849 .byte 0 /* do_ni_hypercall */
kaf24@6452 850 .endr