debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 3198:a46548db5e52

bitkeeper revision 1.1159.187.20 (41a77433rez_BGuifdBbTI3Y34kiag)

Deal with exception from guest context with real interrupts disabled.
This can happen because IOPL != 0 and even a well-behaved guest may
execute random BIOS code if it is a privileged hardware-controlling
domain (e.g., DOM0 X server).
author kaf24@scramble.cl.cam.ac.uk
date Fri Nov 26 18:21:39 2004 +0000 (2004-11-26)
parents b013a6b30d9e
children 20290eb62e95 861d3cdc1dc5
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/x86_32/asm_defns.h>
60 #include <public/xen.h>
62 #define GET_CURRENT(reg) \
63 movl $8192-4, reg; \
64 orl %esp, reg; \
65 andl $~3,reg; \
66 movl (reg),reg;
68 ENTRY(continue_nonidle_task)
69 GET_CURRENT(%ebx)
70 jmp test_all_events
72 ALIGN
73 restore_all_guest:
74 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
75 jnz failsafe_callback
76 FLT1: movl XREGS_ds(%esp),%ds
77 FLT2: movl XREGS_es(%esp),%es
78 FLT3: movl XREGS_fs(%esp),%fs
79 FLT4: movl XREGS_gs(%esp),%gs
80 popl %ebx
81 popl %ecx
82 popl %edx
83 popl %esi
84 popl %edi
85 popl %ebp
86 popl %eax
87 addl $4,%esp
88 FLT5: iret
89 .section .fixup,"ax"
90 FIX5: subl $28,%esp
91 pushl 28(%esp) # error_code/entry_vector
92 movl %eax,XREGS_eax+4(%esp)
93 movl %ebp,XREGS_ebp+4(%esp)
94 movl %edi,XREGS_edi+4(%esp)
95 movl %esi,XREGS_esi+4(%esp)
96 movl %edx,XREGS_edx+4(%esp)
97 movl %ecx,XREGS_ecx+4(%esp)
98 movl %ebx,XREGS_ebx+4(%esp)
99 FIX1: SET_XEN_SEGMENTS(a)
100 movl %eax,%fs
101 movl %eax,%gs
102 sti
103 popl %esi
104 pushfl # EFLAGS
105 movl $__HYPERVISOR_CS,%eax
106 pushl %eax # CS
107 movl $DBLFLT1,%eax
108 pushl %eax # EIP
109 pushl %esi # error_code/entry_vector
110 jmp error_code
111 DBLFLT1:GET_CURRENT(%ebx)
112 jmp test_all_events
113 DBLFIX1:GET_CURRENT(%ebx)
114 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
115 jnz domain_crash # cannot reenter failsafe code
116 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
117 jmp test_all_events # will return via failsafe code
118 .previous
119 .section __pre_ex_table,"a"
120 .long FLT1,FIX1
121 .long FLT2,FIX1
122 .long FLT3,FIX1
123 .long FLT4,FIX1
124 .long FLT5,FIX5
125 .previous
126 .section __ex_table,"a"
127 .long DBLFLT1,DBLFIX1
128 .previous
130 /* No special register assumptions */
131 failsafe_callback:
132 GET_CURRENT(%ebx)
133 andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx)
134 leal DOMAIN_trap_bounce(%ebx),%edx
135 movl DOMAIN_failsafe_addr(%ebx),%eax
136 movl %eax,TRAPBOUNCE_eip(%edx)
137 movl DOMAIN_failsafe_sel(%ebx),%eax
138 movw %ax,TRAPBOUNCE_cs(%edx)
139 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
140 call create_bounce_frame
141 popl %ebx
142 popl %ecx
143 popl %edx
144 popl %esi
145 popl %edi
146 popl %ebp
147 popl %eax
148 addl $4,%esp
149 FLT6: iret
150 .section .fixup,"ax"
151 FIX6: pushl %ebx
152 GET_CURRENT(%ebx)
153 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
154 pop %ebx
155 jmp FIX5
156 .section __pre_ex_table,"a"
157 .long FLT6,FIX6
158 .previous
160 ALIGN
161 restore_all_xen:
162 popl %ebx
163 popl %ecx
164 popl %edx
165 popl %esi
166 popl %edi
167 popl %ebp
168 popl %eax
169 addl $4,%esp
170 iret
172 ALIGN
173 ENTRY(hypercall)
174 subl $4,%esp
175 SAVE_ALL(b)
176 sti
177 GET_CURRENT(%ebx)
178 andl $(NR_hypercalls-1),%eax
179 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
181 ret_from_hypercall:
182 movl %eax,XREGS_eax(%esp) # save the return value
184 test_all_events:
185 xorl %ecx,%ecx
186 notl %ecx
187 cli # tests must not race interrupts
188 /*test_softirqs:*/
189 movl DOMAIN_processor(%ebx),%eax
190 shl $6,%eax # sizeof(irq_cpustat) == 64
191 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
192 jnz process_softirqs
193 /*test_guest_events:*/
194 movl DOMAIN_shared_info(%ebx),%eax
195 testb $0xFF,SHINFO_upcall_mask(%eax)
196 jnz restore_all_guest
197 testb $0xFF,SHINFO_upcall_pending(%eax)
198 jz restore_all_guest
199 /*process_guest_events:*/
200 leal DOMAIN_trap_bounce(%ebx),%edx
201 movl DOMAIN_event_addr(%ebx),%eax
202 movl %eax,TRAPBOUNCE_eip(%edx)
203 movl DOMAIN_event_sel(%ebx),%eax
204 movw %ax,TRAPBOUNCE_cs(%edx)
205 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
206 call create_bounce_frame
207 movl DOMAIN_shared_info(%ebx),%eax
208 movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
209 jmp restore_all_guest
211 ALIGN
212 process_softirqs:
213 sti
214 call SYMBOL_NAME(do_softirq)
215 jmp test_all_events
217 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
218 /* {EIP, CS, EFLAGS, [ESP, SS]} */
219 /* %edx == trap_bounce, %ebx == task_struct */
220 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
221 create_bounce_frame:
222 movb XREGS_cs+4(%esp),%cl
223 testb $2,%cl
224 jz 1f /* jump if returning to an existing ring-1 activation */
225 /* obtain ss/esp from TSS -- no current ring-1 activations */
226 movl DOMAIN_processor(%ebx),%eax
227 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
228 movl %eax, %ecx
229 shll $7, %ecx
230 shll $13, %eax
231 addl %ecx,%eax
232 addl $init_tss + 12,%eax
233 movl (%eax),%esi /* tss->esp1 */
234 FLT7: movl 4(%eax),%gs /* tss->ss1 */
235 /* base of stack frame must contain ss/esp (inter-priv iret) */
236 subl $8,%esi
237 movl XREGS_esp+4(%esp),%eax
238 FLT8: movl %eax,%gs:(%esi)
239 movl XREGS_ss+4(%esp),%eax
240 FLT9: movl %eax,%gs:4(%esi)
241 jmp 2f
242 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
243 movl XREGS_esp+4(%esp),%esi
244 FLT10: movl XREGS_ss+4(%esp),%gs
245 2: /* Construct a stack frame: EFLAGS, CS/EIP */
246 subl $12,%esi
247 movl XREGS_eip+4(%esp),%eax
248 FLT11: movl %eax,%gs:(%esi)
249 movl XREGS_cs+4(%esp),%eax
250 FLT12: movl %eax,%gs:4(%esi)
251 movl XREGS_eflags+4(%esp),%eax
252 FLT13: movl %eax,%gs:8(%esi)
253 movb TRAPBOUNCE_flags(%edx),%cl
254 test $TBF_EXCEPTION_ERRCODE,%cl
255 jz 1f
256 subl $4,%esi # push error_code onto guest frame
257 movl TRAPBOUNCE_error_code(%edx),%eax
258 FLT14: movl %eax,%gs:(%esi)
259 testb $TBF_EXCEPTION_CR2,%cl
260 jz 2f
261 subl $4,%esi # push %cr2 onto guest frame
262 movl TRAPBOUNCE_cr2(%edx),%eax
263 FLT15: movl %eax,%gs:(%esi)
264 1: testb $TBF_FAILSAFE,%cl
265 jz 2f
266 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
267 movl XREGS_ds+4(%esp),%eax
268 FLT16: movl %eax,%gs:(%esi)
269 movl XREGS_es+4(%esp),%eax
270 FLT17: movl %eax,%gs:4(%esi)
271 movl XREGS_fs+4(%esp),%eax
272 FLT18: movl %eax,%gs:8(%esi)
273 movl XREGS_gs+4(%esp),%eax
274 FLT19: movl %eax,%gs:12(%esi)
275 2: movb $0,TRAPBOUNCE_flags(%edx)
276 /* Rewrite our stack frame and return to ring 1. */
277 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
278 andl $0xfffcbeff,XREGS_eflags+4(%esp)
279 movl %gs,XREGS_ss+4(%esp)
280 movl %esi,XREGS_esp+4(%esp)
281 movzwl TRAPBOUNCE_cs(%edx),%eax
282 movl %eax,XREGS_cs+4(%esp)
283 movl TRAPBOUNCE_eip(%edx),%eax
284 movl %eax,XREGS_eip+4(%esp)
285 ret
286 .section .fixup,"ax"
287 FIX7: sti
288 popl %esi
289 addl $4,%esp # Discard create_b_frame return address
290 pushfl # EFLAGS
291 movl $__HYPERVISOR_CS,%eax
292 pushl %eax # CS
293 movl $DBLFLT2,%eax
294 pushl %eax # EIP
295 pushl %esi # error_code/entry_vector
296 jmp error_code
297 DBLFLT2:jmp process_guest_exception_and_events
298 .previous
299 .section __pre_ex_table,"a"
300 .long FLT7,FIX7
301 .long FLT8,FIX7
302 .long FLT9,FIX7
303 .long FLT10,FIX7
304 .long FLT11,FIX7
305 .long FLT12,FIX7
306 .long FLT13,FIX7
307 .long FLT14,FIX7
308 .long FLT15,FIX7
309 .long FLT16,FIX7
310 .long FLT17,FIX7
311 .long FLT18,FIX7
312 .long FLT19,FIX7
313 .previous
314 .section __ex_table,"a"
315 .long DBLFLT2,domain_crash
316 .previous
318 ALIGN
319 process_guest_exception_and_events:
320 leal DOMAIN_trap_bounce(%ebx),%edx
321 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
322 jz test_all_events
323 call create_bounce_frame
324 jmp test_all_events
326 ALIGN
327 ENTRY(ret_from_intr)
328 GET_CURRENT(%ebx)
329 movb XREGS_cs(%esp),%al
330 testb $3,%al # return to non-supervisor?
331 jnz test_all_events
332 jmp restore_all_xen
334 ENTRY(divide_error)
335 pushl $TRAP_divide_error<<16
336 ALIGN
337 error_code:
338 SAVE_ALL_NOSEGREGS(a)
339 SET_XEN_SEGMENTS(a)
340 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
341 jz exception_with_ints_disabled
342 1: sti # re-enable interrupts
343 xorl %eax,%eax
344 movw XREGS_entry_vector(%esp),%ax
345 movl %esp,%edx
346 pushl %edx # push the xen_regs pointer
347 GET_CURRENT(%ebx)
348 call *SYMBOL_NAME(exception_table)(,%eax,4)
349 addl $4,%esp
350 movb XREGS_cs(%esp),%al
351 testb $3,%al
352 jz restore_all_xen
353 jmp process_guest_exception_and_events
355 exception_with_ints_disabled:
356 movb XREGS_cs(%esp),%al
357 testb $3,%al # interrupts disabled outside Xen?
358 jnz 1b # it really does happen! (e.g., DOM0 X server)
359 pushl XREGS_eip(%esp)
360 call search_pre_exception_table
361 addl $4,%esp
362 testl %eax,%eax # no fixup code for faulting EIP?
363 jz FATAL_exception_with_ints_disabled
364 movl %eax,XREGS_eip(%esp)
365 movl %esp,%esi
366 subl $4,%esp
367 movl %esp,%edi
368 movl $XREGS_kernel_sizeof/4,%ecx
369 rep; movsl # make room for error_code/entry_vector
370 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
371 movl %eax,XREGS_kernel_sizeof(%esp)
372 jmp restore_all_xen # return to fixup code
374 FATAL_exception_with_ints_disabled:
375 xorl %esi,%esi
376 movw XREGS_entry_vector(%esp),%si
377 movl %esp,%edx
378 pushl %edx # push the xen_regs pointer
379 pushl %esi # push the trapnr (entry vector)
380 call SYMBOL_NAME(fatal_trap)
381 ud2
383 ENTRY(coprocessor_error)
384 pushl $TRAP_copro_error<<16
385 jmp error_code
387 ENTRY(simd_coprocessor_error)
388 pushl $TRAP_simd_error<<16
389 jmp error_code
391 ENTRY(device_not_available)
392 pushl $TRAP_no_device<<16
393 jmp error_code
395 ENTRY(debug)
396 pushl $TRAP_debug<<16
397 jmp error_code
399 ENTRY(int3)
400 pushl $TRAP_int3<<16
401 jmp error_code
403 ENTRY(overflow)
404 pushl $TRAP_overflow<<16
405 jmp error_code
407 ENTRY(bounds)
408 pushl $TRAP_bounds<<16
409 jmp error_code
411 ENTRY(invalid_op)
412 pushl $TRAP_invalid_op<<16
413 jmp error_code
415 ENTRY(coprocessor_segment_overrun)
416 pushl $TRAP_copro_seg<<16
417 jmp error_code
419 ENTRY(invalid_TSS)
420 movw $TRAP_invalid_tss,2(%esp)
421 jmp error_code
423 ENTRY(segment_not_present)
424 movw $TRAP_no_segment,2(%esp)
425 jmp error_code
427 ENTRY(stack_segment)
428 movw $TRAP_stack_error,2(%esp)
429 jmp error_code
431 ENTRY(general_protection)
432 movw $TRAP_gp_fault,2(%esp)
433 jmp error_code
435 ENTRY(alignment_check)
436 movw $TRAP_alignment_check,2(%esp)
437 jmp error_code
439 ENTRY(page_fault)
440 movw $TRAP_page_fault,2(%esp)
441 jmp error_code
443 ENTRY(machine_check)
444 pushl $TRAP_machine_check<<16
445 jmp error_code
447 ENTRY(spurious_interrupt_bug)
448 pushl $TRAP_spurious_int<<16
449 jmp error_code
451 ENTRY(nmi)
452 # Save state but do not trash the segment registers!
453 # We may otherwise be unable to reload them or copy them to ring 1.
454 pushl %eax
455 SAVE_ALL_NOSEGREGS(a)
457 # Check for hardware problems.
458 inb $0x61,%al
459 testb $0x80,%al
460 jne nmi_parity_err
461 testb $0x40,%al
462 jne nmi_io_err
463 movl %eax,%ebx
465 # Okay, its almost a normal NMI tick. We can only process it if:
466 # A. We are the outermost Xen activation (in which case we have
467 # the selectors safely saved on our stack)
468 # B. DS-GS all contain sane Xen values.
469 # In all other cases we bail without touching DS-GS, as we have
470 # interrupted an enclosing Xen activation in tricky prologue or
471 # epilogue code.
472 movb XREGS_cs(%esp),%al
473 testb $3,%al
474 jnz do_watchdog_tick
475 movl XREGS_ds(%esp),%eax
476 cmpw $(__HYPERVISOR_DS),%ax
477 jne restore_all_xen
478 movl XREGS_es(%esp),%eax
479 cmpw $(__HYPERVISOR_DS),%ax
480 jne restore_all_xen
481 movl XREGS_fs(%esp),%eax
482 cmpw $(__HYPERVISOR_DS),%ax
483 jne restore_all_xen
484 movl XREGS_gs(%esp),%eax
485 cmpw $(__HYPERVISOR_DS),%ax
486 jne restore_all_xen
488 do_watchdog_tick:
489 movl $(__HYPERVISOR_DS),%edx
490 movl %edx,%ds
491 movl %edx,%es
492 movl %esp,%edx
493 pushl %ebx # reason
494 pushl %edx # regs
495 call SYMBOL_NAME(do_nmi)
496 addl $8,%esp
497 movb XREGS_cs(%esp),%al
498 testb $3,%al
499 jz restore_all_xen
500 GET_CURRENT(%ebx)
501 jmp restore_all_guest
503 nmi_parity_err:
504 # Clear and disable the parity-error line
505 andb $0xf,%al
506 orb $0x4,%al
507 outb %al,$0x61
508 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
509 je restore_all_xen
510 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
511 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
512 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
513 je restore_all_xen
514 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
515 movl %edx,%ds
516 movl %edx,%es
517 movl %esp,%edx
518 push %edx
519 call SYMBOL_NAME(mem_parity_error)
520 addl $4,%esp
521 jmp ret_from_intr
523 nmi_io_err:
524 # Clear and disable the I/O-error line
525 andb $0xf,%al
526 orb $0x8,%al
527 outb %al,$0x61
528 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
529 je restore_all_xen
530 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
531 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
532 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
533 je restore_all_xen
534 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
535 movl %edx,%ds
536 movl %edx,%es
537 movl %esp,%edx
538 push %edx
539 call SYMBOL_NAME(io_check_error)
540 addl $4,%esp
541 jmp ret_from_intr
543 .data
545 ENTRY(exception_table)
546 .long SYMBOL_NAME(do_divide_error)
547 .long SYMBOL_NAME(do_debug)
548 .long 0 # nmi
549 .long SYMBOL_NAME(do_int3)
550 .long SYMBOL_NAME(do_overflow)
551 .long SYMBOL_NAME(do_bounds)
552 .long SYMBOL_NAME(do_invalid_op)
553 .long SYMBOL_NAME(math_state_restore)
554 .long 0 # double fault
555 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
556 .long SYMBOL_NAME(do_invalid_TSS)
557 .long SYMBOL_NAME(do_segment_not_present)
558 .long SYMBOL_NAME(do_stack_segment)
559 .long SYMBOL_NAME(do_general_protection)
560 .long SYMBOL_NAME(do_page_fault)
561 .long SYMBOL_NAME(do_spurious_interrupt_bug)
562 .long SYMBOL_NAME(do_coprocessor_error)
563 .long SYMBOL_NAME(do_alignment_check)
564 .long SYMBOL_NAME(do_machine_check)
565 .long SYMBOL_NAME(do_simd_coprocessor_error)
567 ENTRY(hypercall_table)
568 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
569 .long SYMBOL_NAME(do_mmu_update)
570 .long SYMBOL_NAME(do_set_gdt)
571 .long SYMBOL_NAME(do_stack_switch)
572 .long SYMBOL_NAME(do_set_callbacks)
573 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
574 .long SYMBOL_NAME(do_sched_op)
575 .long SYMBOL_NAME(do_dom0_op)
576 .long SYMBOL_NAME(do_set_debugreg)
577 .long SYMBOL_NAME(do_get_debugreg)
578 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
579 .long SYMBOL_NAME(do_set_fast_trap)
580 .long SYMBOL_NAME(do_dom_mem_op)
581 .long SYMBOL_NAME(do_multicall)
582 .long SYMBOL_NAME(do_update_va_mapping)
583 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
584 .long SYMBOL_NAME(do_event_channel_op)
585 .long SYMBOL_NAME(do_xen_version)
586 .long SYMBOL_NAME(do_console_io)
587 .long SYMBOL_NAME(do_physdev_op)
588 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
589 .long SYMBOL_NAME(do_vm_assist)
590 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
591 .rept NR_hypercalls-((.-hypercall_table)/4)
592 .long SYMBOL_NAME(do_ni_hypercall)
593 .endr