debuggers.hg

view xen/arch/x86/x86_64/entry.S @ 10949:b33c08de3d98

[HVM] Add a concept of HVM parameters to the hypervisor.

Each HVM domain has a space of HVM parameters associated with it,
and these can be manipulated via a new hvm_op hypercall. This means
that the hypervisor no longer needs to parse the hvm_info table, so
remove that code.

Signed-off-by: Steven Smith <ssmith@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 03 13:53:33 2006 +0100 (2006-08-03)
parents 37f206c7405a
children b9af81884b99
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 movq VCPU_syscall_addr(%rbx),%rax
31 movq %rax,TRAPBOUNCE_eip(%rdx)
32 movw $0,TRAPBOUNCE_flags(%rdx)
33 bt $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
34 jnc 1f
35 orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
36 1: call create_bounce_frame
37 jmp test_all_events
39 /* %rbx: struct vcpu, interrupts disabled */
40 restore_all_guest:
41 RESTORE_ALL
42 testw $TRAP_syscall,4(%rsp)
43 jz iret_exit_to_guest
45 addq $8,%rsp
46 popq %rcx # RIP
47 popq %r11 # CS
48 cmpw $__GUEST_CS32,%r11
49 popq %r11 # RFLAGS
50 popq %rsp # RSP
51 je 1f
52 sysretq
53 1: sysretl
55 ALIGN
56 /* No special register assumptions. */
57 iret_exit_to_guest:
58 addq $8,%rsp
59 FLT1: iretq
61 .section .fixup,"ax"
62 FIX1: popq -15*8-8(%rsp) # error_code/entry_vector
63 SAVE_ALL # 15*8 bytes pushed
64 movq -8(%rsp),%rsi # error_code/entry_vector
65 sti # after stack abuse (-1024(%rsp))
66 pushq $__HYPERVISOR_DS # SS
67 leaq 8(%rsp),%rax
68 pushq %rax # RSP
69 pushf # RFLAGS
70 pushq $__HYPERVISOR_CS # CS
71 leaq DBLFLT1(%rip),%rax
72 pushq %rax # RIP
73 pushq %rsi # error_code/entry_vector
74 jmp handle_exception
75 DBLFLT1:GET_CURRENT(%rbx)
76 jmp test_all_events
77 failsafe_callback:
78 GET_CURRENT(%rbx)
79 leaq VCPU_trap_bounce(%rbx),%rdx
80 movq VCPU_failsafe_addr(%rbx),%rax
81 movq %rax,TRAPBOUNCE_eip(%rdx)
82 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
83 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
84 jnc 1f
85 orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
86 1: call create_bounce_frame
87 jmp test_all_events
88 .previous
89 .section __pre_ex_table,"a"
90 .quad FLT1,FIX1
91 .previous
92 .section __ex_table,"a"
93 .quad DBLFLT1,failsafe_callback
94 .previous
96 ALIGN
97 /* No special register assumptions. */
98 restore_all_xen:
99 RESTORE_ALL
100 addq $8,%rsp
101 iretq
103 /*
104 * When entering SYSCALL from kernel mode:
105 * %rax = hypercall vector
106 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
107 * %r11, %rcx = SYSCALL-saved %rflags and %rip
108 * NB. We must move %r10 to %rcx for C function-calling ABI.
109 *
110 * When entering SYSCALL from user mode:
111 * Vector directly to the registered arch.syscall_addr.
112 *
113 * Initial work is done by per-CPU stack trampolines. At this point %rsp
114 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
115 * and %cs have been saved. All other registers are still to be saved onto
116 * the stack, starting with %rip, and an appropriate %ss must be saved into
117 * the space left by the trampoline.
118 */
119 ALIGN
120 ENTRY(syscall_enter)
121 sti
122 movl $__GUEST_SS,24(%rsp)
123 pushq %rcx
124 pushq $0
125 movl $TRAP_syscall,4(%rsp)
126 SAVE_ALL
127 GET_CURRENT(%rbx)
128 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
129 jz switch_to_kernel
131 /*hypercall:*/
132 movq %r10,%rcx
133 cmpq $NR_hypercalls,%rax
134 jae bad_hypercall
135 #ifndef NDEBUG
136 /* Deliberately corrupt parameter regs not used by this hypercall. */
137 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
138 leaq hypercall_args_table(%rip),%r10
139 movq $6,%rcx
140 sub (%r10,%rax,1),%cl
141 movq %rsp,%rdi
142 movl $0xDEADBEEF,%eax
143 rep stosq
144 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
145 movq UREGS_rax(%rsp),%rax
146 pushq %rax
147 pushq UREGS_rip+8(%rsp)
148 #endif
149 leaq hypercall_table(%rip),%r10
150 PERFC_INCR(PERFC_hypercalls, %rax)
151 callq *(%r10,%rax,8)
152 #ifndef NDEBUG
153 /* Deliberately corrupt parameter regs used by this hypercall. */
154 popq %r10 # Shadow RIP
155 cmpq %r10,UREGS_rip(%rsp)
156 popq %rcx # Shadow hypercall index
157 jne skip_clobber /* If RIP has changed then don't clobber. */
158 leaq hypercall_args_table(%rip),%r10
159 movb (%r10,%rcx,1),%cl
160 movl $0xDEADBEEF,%r10d
161 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
162 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
163 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
164 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
165 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
166 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
167 skip_clobber:
168 #endif
169 movq %rax,UREGS_rax(%rsp) # save the return value
171 /* %rbx: struct vcpu */
172 test_all_events:
173 cli # tests must not race interrupts
174 /*test_softirqs:*/
175 movl VCPU_processor(%rbx),%eax
176 shl $IRQSTAT_shift,%rax
177 leaq irq_stat(%rip),%rcx
178 testl $~0,(%rcx,%rax,1)
179 jnz process_softirqs
180 btr $_VCPUF_nmi_pending,VCPU_flags(%rbx)
181 jc process_nmi
182 test_guest_events:
183 movq VCPU_vcpu_info(%rbx),%rax
184 testb $0xFF,VCPUINFO_upcall_mask(%rax)
185 jnz restore_all_guest
186 testb $0xFF,VCPUINFO_upcall_pending(%rax)
187 jz restore_all_guest
188 /*process_guest_events:*/
189 sti
190 leaq VCPU_trap_bounce(%rbx),%rdx
191 movq VCPU_event_addr(%rbx),%rax
192 movq %rax,TRAPBOUNCE_eip(%rdx)
193 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
194 call create_bounce_frame
195 jmp test_all_events
197 ALIGN
198 /* %rbx: struct vcpu */
199 process_softirqs:
200 sti
201 call do_softirq
202 jmp test_all_events
204 ALIGN
205 /* %rbx: struct vcpu */
206 process_nmi:
207 movq VCPU_nmi_addr(%rbx),%rax
208 test %rax,%rax
209 jz test_all_events
210 bts $_VCPUF_nmi_masked,VCPU_flags(%rbx)
211 jc 1f
212 sti
213 leaq VCPU_trap_bounce(%rbx),%rdx
214 movq %rax,TRAPBOUNCE_eip(%rdx)
215 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
216 call create_bounce_frame
217 jmp test_all_events
218 1: bts $_VCPUF_nmi_pending,VCPU_flags(%rbx)
219 jmp test_guest_events
221 bad_hypercall:
222 movq $-ENOSYS,UREGS_rax(%rsp)
223 jmp test_all_events
225 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
226 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
227 /* %rdx: trap_bounce, %rbx: struct vcpu */
228 /* On return only %rbx is guaranteed non-clobbered. */
229 create_bounce_frame:
230 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
231 jnz 1f
232 /* Push new frame at registered guest-OS stack base. */
233 pushq %rdx
234 movq %rbx,%rdi
235 call toggle_guest_mode
236 popq %rdx
237 movq VCPU_kernel_sp(%rbx),%rsi
238 jmp 2f
239 1: /* In kernel context already: push new frame at existing %rsp. */
240 movq UREGS_rsp+8(%rsp),%rsi
241 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
242 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
243 movq $HYPERVISOR_VIRT_START,%rax
244 cmpq %rax,%rsi
245 jb 1f # In +ve address space? Then okay.
246 movq $HYPERVISOR_VIRT_END+60,%rax
247 cmpq %rax,%rsi
248 jb domain_crash_synchronous # Above Xen private area? Then okay.
249 1: movb TRAPBOUNCE_flags(%rdx),%cl
250 subq $40,%rsi
251 movq UREGS_ss+8(%rsp),%rax
252 FLT2: movq %rax,32(%rsi) # SS
253 movq UREGS_rsp+8(%rsp),%rax
254 FLT3: movq %rax,24(%rsi) # RSP
255 movq VCPU_vcpu_info(%rbx),%rax
256 pushq VCPUINFO_upcall_mask(%rax)
257 testb $TBF_INTERRUPT,%cl
258 setnz %ch # TBF_INTERRUPT -> set upcall mask
259 orb %ch,VCPUINFO_upcall_mask(%rax)
260 popq %rax
261 shlq $32,%rax # Bits 32-39: saved_upcall_mask
262 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
263 FLT4: movq %rax,8(%rsi) # CS / saved_upcall_mask
264 shrq $32,%rax
265 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
266 setz %ch # %ch == !saved_upcall_mask
267 movq UREGS_eflags+8(%rsp),%rax
268 andq $~X86_EFLAGS_IF,%rax
269 shlb $1,%ch # Bit 9 (EFLAGS.IF)
270 orb %ch,%ah # Fold EFLAGS.IF into %eax
271 FLT5: movq %rax,16(%rsi) # RFLAGS
272 movq UREGS_rip+8(%rsp),%rax
273 FLT6: movq %rax,(%rsi) # RIP
274 testb $TBF_EXCEPTION_ERRCODE,%cl
275 jz 1f
276 subq $8,%rsi
277 movl TRAPBOUNCE_error_code(%rdx),%eax
278 FLT7: movq %rax,(%rsi) # ERROR CODE
279 1: testb $TBF_FAILSAFE,%cl
280 jz 2f
281 subq $32,%rsi
282 movl %gs,%eax
283 FLT8: movq %rax,24(%rsi) # GS
284 movl %fs,%eax
285 FLT9: movq %rax,16(%rsi) # FS
286 movl %es,%eax
287 FLT10: movq %rax,8(%rsi) # ES
288 movl %ds,%eax
289 FLT11: movq %rax,(%rsi) # DS
290 2: subq $16,%rsi
291 movq UREGS_r11+8(%rsp),%rax
292 FLT12: movq %rax,8(%rsi) # R11
293 movq UREGS_rcx+8(%rsp),%rax
294 FLT13: movq %rax,(%rsi) # RCX
295 /* Rewrite our stack frame and return to guest-OS mode. */
296 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
297 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
298 andl $0xfffcbeff,UREGS_eflags+8(%rsp)
299 movq $__GUEST_SS,UREGS_ss+8(%rsp)
300 movq %rsi,UREGS_rsp+8(%rsp)
301 movq $__GUEST_CS,UREGS_cs+8(%rsp)
302 movq TRAPBOUNCE_eip(%rdx),%rax
303 testq %rax,%rax
304 jz domain_crash_synchronous
305 movq %rax,UREGS_rip+8(%rsp)
306 movb $0,TRAPBOUNCE_flags(%rdx)
307 ret
308 .section __ex_table,"a"
309 .quad FLT2,domain_crash_synchronous , FLT3,domain_crash_synchronous
310 .quad FLT4,domain_crash_synchronous , FLT5,domain_crash_synchronous
311 .quad FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
312 .quad FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
313 .quad FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
314 .quad FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
315 .previous
317 domain_crash_synchronous_string:
318 .asciz "domain_crash_sync called from entry.S\n"
320 domain_crash_synchronous:
321 # Get out of the guest-save area of the stack.
322 GET_GUEST_REGS(%rax)
323 movq %rax,%rsp
324 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
325 orb $3,UREGS_cs(%rsp)
326 # printk(domain_crash_synchronous_string)
327 leaq domain_crash_synchronous_string(%rip),%rdi
328 xorl %eax,%eax
329 call printf
330 jmp __domain_crash_synchronous
332 ALIGN
333 /* No special register assumptions. */
334 ENTRY(ret_from_intr)
335 GET_CURRENT(%rbx)
336 testb $3,UREGS_cs(%rsp)
337 jnz test_all_events
338 jmp restore_all_xen
340 ALIGN
341 /* No special register assumptions. */
342 handle_exception:
343 SAVE_ALL
344 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
345 jz exception_with_ints_disabled
346 sti
347 movq %rsp,%rdi
348 movl UREGS_entry_vector(%rsp),%eax
349 leaq exception_table(%rip),%rdx
350 GET_CURRENT(%rbx)
351 PERFC_INCR(PERFC_exceptions, %rax)
352 callq *(%rdx,%rax,8)
353 testb $3,UREGS_cs(%rsp)
354 jz restore_all_xen
355 leaq VCPU_trap_bounce(%rbx),%rdx
356 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
357 jz test_all_events
358 call create_bounce_frame
359 jmp test_all_events
361 /* No special register assumptions. */
362 exception_with_ints_disabled:
363 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
364 jnz FATAL_exception_with_ints_disabled
365 movq %rsp,%rdi
366 call search_pre_exception_table
367 testq %rax,%rax # no fixup code for faulting EIP?
368 jz FATAL_exception_with_ints_disabled
369 movq %rax,UREGS_rip(%rsp)
370 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
371 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
372 jz 1f # then there is a pad quadword already
373 movq %rsp,%rsi
374 subq $8,%rsp
375 movq %rsp,%rdi
376 movq $UREGS_kernel_sizeof/8,%rcx
377 rep; movsq # make room for ec/ev
378 1: movq UREGS_error_code(%rsp),%rax # ec/ev
379 movq %rax,UREGS_kernel_sizeof(%rsp)
380 jmp restore_all_xen # return to fixup code
382 /* No special register assumptions. */
383 FATAL_exception_with_ints_disabled:
384 movl UREGS_entry_vector(%rsp),%edi
385 movq %rsp,%rsi
386 call fatal_trap
387 ud2
389 ENTRY(divide_error)
390 pushq $0
391 movl $TRAP_divide_error,4(%rsp)
392 jmp handle_exception
394 ENTRY(coprocessor_error)
395 pushq $0
396 movl $TRAP_copro_error,4(%rsp)
397 jmp handle_exception
399 ENTRY(simd_coprocessor_error)
400 pushq $0
401 movl $TRAP_simd_error,4(%rsp)
402 jmp handle_exception
404 ENTRY(device_not_available)
405 pushq $0
406 movl $TRAP_no_device,4(%rsp)
407 jmp handle_exception
409 ENTRY(debug)
410 pushq $0
411 movl $TRAP_debug,4(%rsp)
412 jmp handle_exception
414 ENTRY(int3)
415 pushq $0
416 movl $TRAP_int3,4(%rsp)
417 jmp handle_exception
419 ENTRY(overflow)
420 pushq $0
421 movl $TRAP_overflow,4(%rsp)
422 jmp handle_exception
424 ENTRY(bounds)
425 pushq $0
426 movl $TRAP_bounds,4(%rsp)
427 jmp handle_exception
429 ENTRY(invalid_op)
430 pushq $0
431 movl $TRAP_invalid_op,4(%rsp)
432 jmp handle_exception
434 ENTRY(coprocessor_segment_overrun)
435 pushq $0
436 movl $TRAP_copro_seg,4(%rsp)
437 jmp handle_exception
439 ENTRY(invalid_TSS)
440 movl $TRAP_invalid_tss,4(%rsp)
441 jmp handle_exception
443 ENTRY(segment_not_present)
444 movl $TRAP_no_segment,4(%rsp)
445 jmp handle_exception
447 ENTRY(stack_segment)
448 movl $TRAP_stack_error,4(%rsp)
449 jmp handle_exception
451 ENTRY(general_protection)
452 movl $TRAP_gp_fault,4(%rsp)
453 jmp handle_exception
455 ENTRY(alignment_check)
456 movl $TRAP_alignment_check,4(%rsp)
457 jmp handle_exception
459 ENTRY(page_fault)
460 movl $TRAP_page_fault,4(%rsp)
461 jmp handle_exception
463 ENTRY(machine_check)
464 pushq $0
465 movl $TRAP_machine_check,4(%rsp)
466 jmp handle_exception
468 ENTRY(spurious_interrupt_bug)
469 pushq $0
470 movl $TRAP_spurious_int,4(%rsp)
471 jmp handle_exception
473 ENTRY(double_fault)
474 SAVE_ALL
475 movq %rsp,%rdi
476 call do_double_fault
477 ud2
479 ENTRY(nmi)
480 pushq $0
481 SAVE_ALL
482 testb $3,UREGS_cs(%rsp)
483 jz nmi_in_hypervisor_mode
484 /* Interrupted guest context. Copy the context to stack bottom. */
485 GET_GUEST_REGS(%rbx)
486 movl $UREGS_kernel_sizeof/8,%ecx
487 1: popq %rax
488 movq %rax,(%rbx)
489 addq $8,%rbx
490 loop 1b
491 subq $UREGS_kernel_sizeof,%rbx
492 movq %rbx,%rsp
493 nmi_in_hypervisor_mode:
494 movq %rsp,%rdi
495 call do_nmi
496 jmp ret_from_intr
498 do_arch_sched_op_compat:
499 # Ensure we return success even if we return via schedule_tail()
500 xorl %eax,%eax
501 GET_GUEST_REGS(%r10)
502 movq %rax,UREGS_rax(%r10)
503 jmp do_sched_op_compat
505 do_arch_sched_op:
506 # Ensure we return success even if we return via schedule_tail()
507 xorl %eax,%eax
508 GET_GUEST_REGS(%r10)
509 movq %rax,UREGS_rax(%r10)
510 jmp do_sched_op
512 .data
514 ENTRY(exception_table)
515 .quad do_divide_error
516 .quad do_debug
517 .quad 0 # nmi
518 .quad do_int3
519 .quad do_overflow
520 .quad do_bounds
521 .quad do_invalid_op
522 .quad math_state_restore
523 .quad 0 # double_fault
524 .quad do_coprocessor_segment_overrun
525 .quad do_invalid_TSS
526 .quad do_segment_not_present
527 .quad do_stack_segment
528 .quad do_general_protection
529 .quad do_page_fault
530 .quad do_spurious_interrupt_bug
531 .quad do_coprocessor_error
532 .quad do_alignment_check
533 .quad do_machine_check
534 .quad do_simd_coprocessor_error
536 ENTRY(hypercall_table)
537 .quad do_set_trap_table /* 0 */
538 .quad do_mmu_update
539 .quad do_set_gdt
540 .quad do_stack_switch
541 .quad do_set_callbacks
542 .quad do_fpu_taskswitch /* 5 */
543 .quad do_arch_sched_op_compat
544 .quad do_dom0_op
545 .quad do_set_debugreg
546 .quad do_get_debugreg
547 .quad do_update_descriptor /* 10 */
548 .quad do_ni_hypercall
549 .quad do_memory_op
550 .quad do_multicall
551 .quad do_update_va_mapping
552 .quad do_set_timer_op /* 15 */
553 .quad do_event_channel_op_compat
554 .quad do_xen_version
555 .quad do_console_io
556 .quad do_physdev_op_compat
557 .quad do_grant_table_op /* 20 */
558 .quad do_vm_assist
559 .quad do_update_va_mapping_otherdomain
560 .quad do_iret
561 .quad do_vcpu_op
562 .quad do_set_segment_base /* 25 */
563 .quad do_mmuext_op
564 .quad do_acm_op
565 .quad do_nmi_op
566 .quad do_arch_sched_op
567 .quad do_callback_op /* 30 */
568 .quad do_xenoprof_op
569 .quad do_event_channel_op
570 .quad do_physdev_op
571 .quad do_hvm_op
572 .rept NR_hypercalls-((.-hypercall_table)/8)
573 .quad do_ni_hypercall
574 .endr
576 ENTRY(hypercall_args_table)
577 .byte 1 /* do_set_trap_table */ /* 0 */
578 .byte 4 /* do_mmu_update */
579 .byte 2 /* do_set_gdt */
580 .byte 2 /* do_stack_switch */
581 .byte 3 /* do_set_callbacks */
582 .byte 1 /* do_fpu_taskswitch */ /* 5 */
583 .byte 2 /* do_arch_sched_op_compat */
584 .byte 1 /* do_dom0_op */
585 .byte 2 /* do_set_debugreg */
586 .byte 1 /* do_get_debugreg */
587 .byte 2 /* do_update_descriptor */ /* 10 */
588 .byte 0 /* do_ni_hypercall */
589 .byte 2 /* do_memory_op */
590 .byte 2 /* do_multicall */
591 .byte 3 /* do_update_va_mapping */
592 .byte 1 /* do_set_timer_op */ /* 15 */
593 .byte 1 /* do_event_channel_op_compat */
594 .byte 2 /* do_xen_version */
595 .byte 3 /* do_console_io */
596 .byte 1 /* do_physdev_op_compat */
597 .byte 3 /* do_grant_table_op */ /* 20 */
598 .byte 2 /* do_vm_assist */
599 .byte 4 /* do_update_va_mapping_otherdomain */
600 .byte 0 /* do_iret */
601 .byte 3 /* do_vcpu_op */
602 .byte 2 /* do_set_segment_base */ /* 25 */
603 .byte 4 /* do_mmuext_op */
604 .byte 1 /* do_acm_op */
605 .byte 2 /* do_nmi_op */
606 .byte 2 /* do_arch_sched_op */
607 .byte 2 /* do_callback_op */ /* 30 */
608 .byte 2 /* do_xenoprof_op */
609 .byte 2 /* do_event_channel_op */
610 .byte 2 /* do_physdev_op */
611 .byte 2 /* do_hvm_op */
612 .rept NR_hypercalls-(.-hypercall_args_table)
613 .byte 0 /* do_ni_hypercall */
614 .endr