debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 2958:90b094417ff3

bitkeeper revision 1.1159.1.394 (4190a152C0zjusRSriDG4srGifYk1A)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xen.bk-smp
into freefall.cl.cam.ac.uk:/local/scratch/cl349/xen.bk-smp
author cl349@freefall.cl.cam.ac.uk
date Tue Nov 09 10:52:02 2004 +0000 (2004-11-09)
parents 61a55dee09d8 4a610e420c0d
children e809b69fa26a
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 */
8 /*
9 * The idea for callbacks to guest OSes
10 * ====================================
11 *
12 * First, we require that all callbacks (either via a supplied
13 * interrupt-descriptor-table, or via the special event or failsafe callbacks
14 * in the shared-info-structure) are to ring 1. This just makes life easier,
15 * in that it means we don't have to do messy GDT/LDT lookups to find
16 * out which the privilege-level of the return code-selector. That code
17 * would just be a hassle to write, and would need to account for running
18 * off the end of the GDT/LDT, for example. For all callbacks we check
19 * that the provided
20 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
21 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
22 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
23 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
24 * than the correct ring) and bad things are bound to ensue -- IRET is
25 * likely to fault, and we may end up killing the domain (no harm can
26 * come to Xen, though).
27 *
28 * When doing a callback, we check if the return CS is in ring 0. If so,
29 * callback is delayed until next return to ring != 0.
30 * If return CS is in ring 1, then we create a callback frame
31 * starting at return SS/ESP. The base of the frame does an intra-privilege
32 * interrupt-return.
33 * If return CS is in ring > 1, we create a callback frame starting
34 * at SS/ESP taken from appropriate section of the current TSS. The base
35 * of the frame does an inter-privilege interrupt-return.
36 *
37 * Note that the "failsafe callback" uses a special stackframe:
38 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
39 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
40 * That is, original values for DS/ES/FS/GS are placed on stack rather than
41 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
42 * saved/restored in guest OS. Furthermore, if we load them we may cause
43 * a fault if they are invalid, which is a hassle to deal with. We avoid
44 * that problem if we don't load them :-) This property allows us to use
45 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
46 * on return to ring != 0, we can simply package it up as a return via
47 * the failsafe callback, and let the guest OS sort it out (perhaps by
48 * killing an application process). Note that we also do this for any
49 * faulting IRET -- just let the guest OS handle it via the event
50 * callback.
51 *
52 * We terminate a domain in the following cases:
53 * - creating a callback stack frame (due to bad ring-1 stack).
54 * - faulting IRET on entry to failsafe callback handler.
55 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
56 * handler in good order (absolutely no faults allowed!).
57 */
59 #include <xen/config.h>
60 #include <xen/errno.h>
61 #include <xen/softirq.h>
62 #include <asm/x86_32/asm_defns.h>
63 #include <public/xen.h>
65 #define GET_CURRENT(reg) \
66 movl $4096-4, reg; \
67 orl %esp, reg; \
68 andl $~3,reg; \
69 movl (reg),reg;
71 ENTRY(continue_nonidle_task)
72 GET_CURRENT(%ebx)
73 jmp test_all_events
75 ALIGN
76 /*
77 * HYPERVISOR_multicall(call_list, nr_calls)
78 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'.
79 * This is fairly easy except that:
80 * 1. We may fault reading the call list, and must patch that up; and
81 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
82 * caller could cause our stack to blow up.
83 */
84 #define MULTICALL_ENTRY_ORDER 5
85 do_multicall:
86 popl %eax
87 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
88 je multicall_return_from_call
89 pushl %ebx
90 movl 4(%esp),%ebx /* EBX == call_list */
91 movl 8(%esp),%ecx /* ECX == nr_calls */
92 /* Ensure the entire multicall list is below HYPERVISOR_VIRT_START. */
93 movl %ecx,%eax
94 shll $MULTICALL_ENTRY_ORDER,%eax
95 addl %ebx,%eax /* EAX == end of multicall list */
96 jc bad_multicall_address
97 cmpl $__HYPERVISOR_VIRT_START,%eax
98 jnc bad_multicall_address
99 multicall_loop:
100 pushl %ecx
101 multicall_fault1:
102 pushl 20(%ebx) # args[4]
103 multicall_fault2:
104 pushl 16(%ebx) # args[3]
105 multicall_fault3:
106 pushl 12(%ebx) # args[2]
107 multicall_fault4:
108 pushl 8(%ebx) # args[1]
109 multicall_fault5:
110 pushl 4(%ebx) # args[0]
111 multicall_fault6:
112 movl (%ebx),%eax # op
113 andl $(NR_hypercalls-1),%eax
114 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
115 multicall_return_from_call:
116 multicall_fault7:
117 movl %eax,24(%ebx) # args[5] == result
118 addl $20,%esp
119 popl %ecx
120 addl $(1<<MULTICALL_ENTRY_ORDER),%ebx
121 loop multicall_loop
122 popl %ebx
123 xorl %eax,%eax
124 jmp ret_from_hypercall
126 bad_multicall_address:
127 popl %ebx
128 movl $-EFAULT,%eax
129 jmp ret_from_hypercall
131 .section __ex_table,"a"
132 .align 4
133 .long multicall_fault1, multicall_fixup1
134 .long multicall_fault2, multicall_fixup2
135 .long multicall_fault3, multicall_fixup3
136 .long multicall_fault4, multicall_fixup4
137 .long multicall_fault5, multicall_fixup5
138 .long multicall_fault6, multicall_fixup6
139 .long multicall_fault7, multicall_fixup6
140 .previous
142 .section .fixup,"ax"
143 multicall_fixup6:
144 addl $4,%esp
145 multicall_fixup5:
146 addl $4,%esp
147 multicall_fixup4:
148 addl $4,%esp
149 multicall_fixup3:
150 addl $4,%esp
151 multicall_fixup2:
152 addl $4,%esp
153 multicall_fixup1:
154 addl $4,%esp
155 popl %ebx
156 movl $-EFAULT,%eax
157 jmp ret_from_hypercall
158 .previous
160 ALIGN
161 restore_all_guest:
162 1: movl XREGS_ds(%esp),%ds
163 2: movl XREGS_es(%esp),%es
164 3: movl XREGS_fs(%esp),%fs
165 4: movl XREGS_gs(%esp),%gs
166 popl %ebx
167 popl %ecx
168 popl %edx
169 popl %esi
170 popl %edi
171 popl %ebp
172 popl %eax
173 addl $4,%esp
174 5: iret
175 .section .fixup,"ax"
176 6: subl $4,%esp
177 pushl %eax
178 pushl %ebp
179 pushl %edi
180 pushl %esi
181 pushl %edx
182 pushl %ecx
183 pushl %ebx
184 7: SET_XEN_SEGMENTS(a)
185 jmp failsafe_callback
186 .previous
187 .section __ex_table,"a"
188 .align 4
189 .long 1b,7b
190 .long 2b,7b
191 .long 3b,7b
192 .long 4b,7b
193 .long 5b,6b
194 .previous
196 /* No special register assumptions */
197 failsafe_callback:
198 GET_CURRENT(%ebx)
199 movl DOMAIN_processor(%ebx),%eax
200 shl $4,%eax
201 lea guest_trap_bounce(%eax),%edx
202 movl DOMAIN_failsafe_addr(%ebx),%eax
203 movl %eax,GTB_eip(%edx)
204 movl DOMAIN_failsafe_sel(%ebx),%eax
205 movw %ax,GTB_cs(%edx)
206 call create_bounce_frame
207 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
208 movl XREGS_ds(%esp),%eax
209 FAULT1: movl %eax,%gs:(%esi)
210 movl XREGS_es(%esp),%eax
211 FAULT2: movl %eax,%gs:4(%esi)
212 movl XREGS_fs(%esp),%eax
213 FAULT3: movl %eax,%gs:8(%esi)
214 movl XREGS_gs(%esp),%eax
215 FAULT4: movl %eax,%gs:12(%esi)
216 movl %esi,XREGS_esp(%esp)
217 popl %ebx
218 popl %ecx
219 popl %edx
220 popl %esi
221 popl %edi
222 popl %ebp
223 popl %eax
224 addl $4,%esp
225 FAULT5: iret
227 ALIGN
228 restore_all_xen:
229 popl %ebx
230 popl %ecx
231 popl %edx
232 popl %esi
233 popl %edi
234 popl %ebp
235 popl %eax
236 addl $4,%esp
237 iret
239 ALIGN
240 ENTRY(hypercall)
241 pushl %eax # save orig_eax
242 SAVE_ALL(b)
243 sti
244 GET_CURRENT(%ebx)
245 andl $(NR_hypercalls-1),%eax
246 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
248 ret_from_hypercall:
249 movl %eax,XREGS_eax(%esp) # save the return value
251 test_all_events:
252 xorl %ecx,%ecx
253 notl %ecx
254 cli # tests must not race interrupts
255 /*test_softirqs:*/
256 movl DOMAIN_processor(%ebx),%eax
257 shl $6,%eax # sizeof(irq_cpustat) == 64
258 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
259 jnz process_softirqs
260 /*test_guest_events:*/
261 movl DOMAIN_shared_info(%ebx),%eax
262 testb $0xFF,SHINFO_upcall_mask(%eax)
263 jnz restore_all_guest
264 testb $0xFF,SHINFO_upcall_pending(%eax)
265 jz restore_all_guest
266 movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
267 /*process_guest_events:*/
268 movl DOMAIN_processor(%ebx),%edx
269 shl $4,%edx # sizeof(guest_trap_bounce) == 16
270 lea guest_trap_bounce(%edx),%edx
271 movl DOMAIN_event_addr(%ebx),%eax
272 movl %eax,GTB_eip(%edx)
273 movl DOMAIN_event_sel(%ebx),%eax
274 movw %ax,GTB_cs(%edx)
275 call create_bounce_frame
276 jmp restore_all_guest
278 ALIGN
279 process_softirqs:
280 sti
281 call SYMBOL_NAME(do_softirq)
282 jmp test_all_events
284 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
285 /* {EIP, CS, EFLAGS, [ESP, SS]} */
286 /* %edx == guest_trap_bounce, %ebx == task_struct */
287 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
288 create_bounce_frame:
289 mov XREGS_cs+4(%esp),%cl
290 test $2,%cl
291 jz 1f /* jump if returning to an existing ring-1 activation */
292 /* obtain ss/esp from TSS -- no current ring-1 activations */
293 movl DOMAIN_processor(%ebx),%eax
294 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
295 movl %eax, %ecx
296 shll $7, %ecx
297 shll $13, %eax
298 addl %ecx,%eax
299 addl $init_tss + 12,%eax
300 movl (%eax),%esi /* tss->esp1 */
301 FAULT6: movl 4(%eax),%gs /* tss->ss1 */
302 /* base of stack frame must contain ss/esp (inter-priv iret) */
303 subl $8,%esi
304 movl XREGS_esp+4(%esp),%eax
305 FAULT7: movl %eax,%gs:(%esi)
306 movl XREGS_ss+4(%esp),%eax
307 FAULT8: movl %eax,%gs:4(%esi)
308 jmp 2f
309 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
310 movl XREGS_esp+4(%esp),%esi
311 FAULT9: movl XREGS_ss+4(%esp),%gs
312 2: /* Construct a stack frame: EFLAGS, CS/EIP */
313 subl $12,%esi
314 movl XREGS_eip+4(%esp),%eax
315 FAULT10:movl %eax,%gs:(%esi)
316 movl XREGS_cs+4(%esp),%eax
317 FAULT11:movl %eax,%gs:4(%esi)
318 movl XREGS_eflags+4(%esp),%eax
319 FAULT12:movl %eax,%gs:8(%esi)
320 /* Rewrite our stack frame and return to ring 1. */
321 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
322 andl $0xfffcbeff,%eax
323 movl %eax,XREGS_eflags+4(%esp)
324 movl %gs,XREGS_ss+4(%esp)
325 movl %esi,XREGS_esp+4(%esp)
326 movzwl GTB_cs(%edx),%eax
327 movl %eax,XREGS_cs+4(%esp)
328 movl GTB_eip(%edx),%eax
329 movl %eax,XREGS_eip+4(%esp)
330 ret
332 .section __ex_table,"a"
333 .align 4
334 .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack
335 .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack
336 .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack
337 .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack
338 .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret
339 .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector
340 .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack
341 .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack
342 .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector
343 .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack
344 .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack
345 .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack
346 .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack
347 .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack
348 .previous
350 # This handler kills domains which experience unrecoverable faults.
351 .section .fixup,"ax"
352 crash_domain_fixup1:
353 subl $4,%esp
354 SAVE_ALL(a)
355 sti
356 jmp domain_crash
357 crash_domain_fixup2:
358 addl $4,%esp
359 crash_domain_fixup3:
360 jmp domain_crash
361 .previous
363 ALIGN
364 process_guest_exception_and_events:
365 movl DOMAIN_processor(%ebx),%eax
366 shl $4,%eax
367 lea guest_trap_bounce(%eax),%edx
368 testb $~0,GTB_flags(%edx)
369 jz test_all_events
370 call create_bounce_frame # just the basic frame
371 mov GTB_flags(%edx),%cl
372 test $GTBF_TRAP_NOCODE,%cl
373 jnz 2f
374 subl $4,%esi # push error_code onto guest frame
375 movl GTB_error_code(%edx),%eax
376 FAULT13:movl %eax,%gs:(%esi)
377 test $GTBF_TRAP_CR2,%cl
378 jz 1f
379 subl $4,%esi # push %cr2 onto guest frame
380 movl GTB_cr2(%edx),%eax
381 FAULT14:movl %eax,%gs:(%esi)
382 1: movl %esi,XREGS_esp(%esp)
383 2: movb $0,GTB_flags(%edx)
384 jmp test_all_events
386 ALIGN
387 ENTRY(ret_from_intr)
388 GET_CURRENT(%ebx)
389 movb XREGS_cs(%esp),%al
390 testb $3,%al # return to non-supervisor?
391 jne test_all_events
392 jmp restore_all_xen
394 ENTRY(divide_error)
395 pushl $0 # no error code
396 pushl $ SYMBOL_NAME(do_divide_error)
397 ALIGN
398 error_code:
399 cld
400 pushl %ebp
401 pushl %edi
402 pushl %esi
403 pushl %edx
404 pushl %ecx
405 pushl %ebx
406 movb XREGS_cs(%esp),%bl
407 testb $3,%bl
408 je 1f
409 movl %ds,XREGS_ds(%esp)
410 movl %es,XREGS_es(%esp)
411 movl %fs,XREGS_fs(%esp)
412 movl %gs,XREGS_gs(%esp)
413 1: SET_XEN_SEGMENTS(b)
414 movl XREGS_orig_eax(%esp),%esi # get the error code
415 movl XREGS_eax(%esp),%edi # get the function address
416 movl %eax,XREGS_eax(%esp)
417 movl %esp,%edx
418 pushl %esi # push the error code
419 pushl %edx # push the xen_regs pointer
420 GET_CURRENT(%ebx)
421 call *%edi
422 addl $8,%esp
423 movb XREGS_cs(%esp),%al
424 testb $3,%al
425 je restore_all_xen
426 jmp process_guest_exception_and_events
428 ENTRY(coprocessor_error)
429 pushl $0
430 pushl $ SYMBOL_NAME(do_coprocessor_error)
431 jmp error_code
433 ENTRY(simd_coprocessor_error)
434 pushl $0
435 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
436 jmp error_code
438 ENTRY(device_not_available)
439 pushl $0
440 pushl $SYMBOL_NAME(math_state_restore)
441 jmp error_code
443 ENTRY(debug)
444 pushl $0
445 pushl $ SYMBOL_NAME(do_debug)
446 jmp error_code
448 ENTRY(int3)
449 pushl $0
450 pushl $ SYMBOL_NAME(do_int3)
451 jmp error_code
453 ENTRY(overflow)
454 pushl $0
455 pushl $ SYMBOL_NAME(do_overflow)
456 jmp error_code
458 ENTRY(bounds)
459 pushl $0
460 pushl $ SYMBOL_NAME(do_bounds)
461 jmp error_code
463 ENTRY(invalid_op)
464 pushl $0
465 pushl $ SYMBOL_NAME(do_invalid_op)
466 jmp error_code
468 ENTRY(coprocessor_segment_overrun)
469 pushl $0
470 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
471 jmp error_code
473 ENTRY(invalid_TSS)
474 pushl $ SYMBOL_NAME(do_invalid_TSS)
475 jmp error_code
477 ENTRY(segment_not_present)
478 pushl $ SYMBOL_NAME(do_segment_not_present)
479 jmp error_code
481 ENTRY(stack_segment)
482 pushl $ SYMBOL_NAME(do_stack_segment)
483 jmp error_code
485 ENTRY(general_protection)
486 pushl $ SYMBOL_NAME(do_general_protection)
487 jmp error_code
489 ENTRY(alignment_check)
490 pushl $ SYMBOL_NAME(do_alignment_check)
491 jmp error_code
493 ENTRY(page_fault)
494 pushl $ SYMBOL_NAME(do_page_fault)
495 jmp error_code
497 ENTRY(machine_check)
498 pushl $0
499 pushl $ SYMBOL_NAME(do_machine_check)
500 jmp error_code
502 ENTRY(spurious_interrupt_bug)
503 pushl $0
504 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
505 jmp error_code
507 ENTRY(nmi)
508 # Save state but do not trash the segment registers!
509 # We may otherwise be unable to reload them or copy them to ring 1.
510 pushl %eax
511 SAVE_ALL_NOSEGREGS(a)
513 # Check for hardware problems.
514 inb $0x61,%al
515 testb $0x80,%al
516 jne nmi_parity_err
517 testb $0x40,%al
518 jne nmi_io_err
519 movl %eax,%ebx
521 # Okay, its almost a normal NMI tick. We can only process it if:
522 # A. We are the outermost Xen activation (in which case we have
523 # the selectors safely saved on our stack)
524 # B. DS-GS all contain sane Xen values.
525 # In all other cases we bail without touching DS-GS, as we have
526 # interrupted an enclosing Xen activation in tricky prologue or
527 # epilogue code.
528 movb XREGS_cs(%esp),%al
529 testb $3,%al
530 jne do_watchdog_tick
531 movl XREGS_ds(%esp),%eax
532 cmpw $(__HYPERVISOR_DS),%ax
533 jne restore_all_xen
534 movl XREGS_es(%esp),%eax
535 cmpw $(__HYPERVISOR_DS),%ax
536 jne restore_all_xen
537 movl XREGS_fs(%esp),%eax
538 cmpw $(__HYPERVISOR_DS),%ax
539 jne restore_all_xen
540 movl XREGS_gs(%esp),%eax
541 cmpw $(__HYPERVISOR_DS),%ax
542 jne restore_all_xen
544 do_watchdog_tick:
545 movl $(__HYPERVISOR_DS),%edx
546 movl %edx,%ds
547 movl %edx,%es
548 movl %esp,%edx
549 pushl %ebx # reason
550 pushl %edx # regs
551 call SYMBOL_NAME(do_nmi)
552 addl $8,%esp
553 movb XREGS_cs(%esp),%al
554 testb $3,%al
555 je restore_all_xen
556 GET_CURRENT(%ebx)
557 jmp restore_all_guest
559 nmi_parity_err:
560 # Clear and disable the parity-error line
561 andb $0xf,%al
562 orb $0x4,%al
563 outb %al,$0x61
564 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
565 je restore_all_xen
566 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
567 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
568 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
569 je restore_all_xen
570 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
571 movl %edx,%ds
572 movl %edx,%es
573 movl %esp,%edx
574 push %edx
575 call SYMBOL_NAME(mem_parity_error)
576 addl $4,%esp
577 jmp ret_from_intr
579 nmi_io_err:
580 # Clear and disable the I/O-error line
581 andb $0xf,%al
582 orb $0x8,%al
583 outb %al,$0x61
584 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
585 je restore_all_xen
586 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
587 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
588 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
589 je restore_all_xen
590 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
591 movl %edx,%ds
592 movl %edx,%es
593 movl %esp,%edx
594 push %edx
595 call SYMBOL_NAME(io_check_error)
596 addl $4,%esp
597 jmp ret_from_intr
599 .data
600 ENTRY(hypercall_table)
601 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
602 .long SYMBOL_NAME(do_mmu_update)
603 .long SYMBOL_NAME(do_set_gdt)
604 .long SYMBOL_NAME(do_stack_switch)
605 .long SYMBOL_NAME(do_set_callbacks)
606 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
607 .long SYMBOL_NAME(do_sched_op)
608 .long SYMBOL_NAME(do_dom0_op)
609 .long SYMBOL_NAME(do_set_debugreg)
610 .long SYMBOL_NAME(do_get_debugreg)
611 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
612 .long SYMBOL_NAME(do_set_fast_trap)
613 .long SYMBOL_NAME(do_dom_mem_op)
614 .long SYMBOL_NAME(do_multicall)
615 .long SYMBOL_NAME(do_update_va_mapping)
616 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
617 .long SYMBOL_NAME(do_event_channel_op)
618 .long SYMBOL_NAME(do_xen_version)
619 .long SYMBOL_NAME(do_console_io)
620 .long SYMBOL_NAME(do_physdev_op)
621 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
622 .long SYMBOL_NAME(do_vm_assist)
623 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
624 .rept NR_hypercalls-((.-hypercall_table)/4)
625 .long SYMBOL_NAME(do_ni_hypercall)
626 .endr