debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 3136:f1c44a4d4998

bitkeeper revision 1.1159.1.446 (41a48ee42Omqs3zoJHTZPhLlPx5LUw)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno/BK/xen-unstable.bk
into arcadians.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xen.bk-smp
author cl349@arcadians.cl.cam.ac.uk
date Wed Nov 24 13:38:44 2004 +0000 (2004-11-24)
parents 42bdac6c8985 2754a2ed61c3
children 75f82adfcc90
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/x86_32/asm_defns.h>
60 #include <public/xen.h>
62 #define GET_CURRENT(reg) \
63 movl $8192-4, reg; \
64 orl %esp, reg; \
65 andl $~3,reg; \
66 movl (reg),reg;
68 ENTRY(continue_nonidle_task)
69 GET_CURRENT(%ebx)
70 jmp test_all_events
72 ALIGN
73 /*
74 * HYPERVISOR_multicall(call_list, nr_calls)
75 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'.
76 * This is fairly easy except that:
77 * 1. We may fault reading the call list, and must patch that up; and
78 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
79 * caller could cause our stack to blow up.
80 */
81 #define MULTICALL_ENTRY_ORDER 5
82 do_multicall:
83 popl %eax
84 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
85 je multicall_return_from_call
86 pushl %ebx
87 movl 4(%esp),%ebx /* EBX == call_list */
88 movl 8(%esp),%ecx /* ECX == nr_calls */
89 /* Ensure the entire multicall list is below HYPERVISOR_VIRT_START. */
90 movl %ecx,%eax
91 shll $MULTICALL_ENTRY_ORDER,%eax
92 addl %ebx,%eax /* EAX == end of multicall list */
93 jc bad_multicall_address
94 cmpl $__HYPERVISOR_VIRT_START,%eax
95 jnc bad_multicall_address
96 multicall_loop:
97 pushl %ecx
98 movl 4(%esp),%ecx # %ecx = struct domain
99 movl DOMAIN_processor(%ecx),%eax
100 shl $6,%eax # sizeof(irq_cpustat) == 64
101 testl $~0,SYMBOL_NAME(irq_stat)(%eax,1)
102 jnz multicall_preempt
103 multicall_fault1:
104 pushl 20(%ebx) # args[4]
105 multicall_fault2:
106 pushl 16(%ebx) # args[3]
107 multicall_fault3:
108 pushl 12(%ebx) # args[2]
109 multicall_fault4:
110 pushl 8(%ebx) # args[1]
111 multicall_fault5:
112 pushl 4(%ebx) # args[0]
113 multicall_fault6:
114 movl (%ebx),%eax # op
115 andl $(NR_hypercalls-1),%eax
116 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
117 multicall_return_from_call:
118 multicall_fault7:
119 movl %eax,24(%ebx) # args[5] == result
120 addl $20,%esp
121 popl %ecx
122 addl $(1<<MULTICALL_ENTRY_ORDER),%ebx
123 loop multicall_loop
124 popl %ebx
125 xorl %eax,%eax
126 jmp ret_from_hypercall
128 multicall_preempt:
129 # NB. remaining nr_calls is already at top of stack
130 pushl %ebx # call_list
131 pushl $2 # nr_args == 2
132 pushl $__HYPERVISOR_multicall # op == __HYPERVISOR_multicall
133 call hypercall_create_continuation
134 addl $16,%esp
135 popl %ebx
136 movl $__HYPERVISOR_multicall,%eax
137 jmp ret_from_hypercall
139 bad_multicall_address:
140 popl %ebx
141 movl $-EFAULT,%eax
142 jmp ret_from_hypercall
144 .section __ex_table,"a"
145 .align 4
146 .long multicall_fault1, multicall_fixup1
147 .long multicall_fault2, multicall_fixup2
148 .long multicall_fault3, multicall_fixup3
149 .long multicall_fault4, multicall_fixup4
150 .long multicall_fault5, multicall_fixup5
151 .long multicall_fault6, multicall_fixup6
152 .long multicall_fault7, multicall_fixup6
153 .previous
155 .section .fixup,"ax"
156 multicall_fixup6:
157 addl $4,%esp
158 multicall_fixup5:
159 addl $4,%esp
160 multicall_fixup4:
161 addl $4,%esp
162 multicall_fixup3:
163 addl $4,%esp
164 multicall_fixup2:
165 addl $4,%esp
166 multicall_fixup1:
167 addl $4,%esp
168 popl %ebx
169 movl $-EFAULT,%eax
170 jmp ret_from_hypercall
171 .previous
173 ALIGN
174 restore_all_guest:
175 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
176 jnz failsafe_callback
177 FLT1: movl XREGS_ds(%esp),%ds
178 FLT2: movl XREGS_es(%esp),%es
179 FLT3: movl XREGS_fs(%esp),%fs
180 FLT4: movl XREGS_gs(%esp),%gs
181 popl %ebx
182 popl %ecx
183 popl %edx
184 popl %esi
185 popl %edi
186 popl %ebp
187 popl %eax
188 addl $4,%esp
189 FLT5: iret
190 .section .fixup,"ax"
191 FIX5: subl $28,%esp
192 pushl 28(%esp) # error_code/entry_vector
193 movl %eax,XREGS_eax+4(%esp)
194 movl %ebp,XREGS_ebp+4(%esp)
195 movl %edi,XREGS_edi+4(%esp)
196 movl %esi,XREGS_esi+4(%esp)
197 movl %edx,XREGS_edx+4(%esp)
198 movl %ecx,XREGS_ecx+4(%esp)
199 movl %ebx,XREGS_ebx+4(%esp)
200 FIX1: SET_XEN_SEGMENTS(a)
201 movl %eax,%fs
202 movl %eax,%gs
203 sti
204 popl %esi
205 pushfl # EFLAGS
206 movl $__HYPERVISOR_CS,%eax
207 pushl %eax # CS
208 movl $DBLFLT1,%eax
209 pushl %eax # EIP
210 pushl %esi # error_code/entry_vector
211 jmp error_code
212 DBLFLT1:GET_CURRENT(%ebx)
213 jmp test_all_events
214 DBLFIX1:GET_CURRENT(%ebx)
215 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
216 jnz domain_crash # cannot reenter failsafe code
217 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
218 jmp test_all_events # will return via failsafe code
219 .previous
220 .section __pre_ex_table,"a"
221 .long FLT1,FIX1
222 .long FLT2,FIX1
223 .long FLT3,FIX1
224 .long FLT4,FIX1
225 .long FLT5,FIX5
226 .previous
227 .section __ex_table,"a"
228 .long DBLFLT1,DBLFIX1
229 .previous
231 /* No special register assumptions */
232 failsafe_callback:
233 GET_CURRENT(%ebx)
234 andb $~TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
235 leal EDOMAIN_trap_bounce(%ebx),%edx
236 movl EDOMAIN_failsafe_addr(%ebx),%eax
237 movl %eax,TRAPBOUNCE_eip(%edx)
238 movl EDOMAIN_failsafe_sel(%ebx),%eax
239 movw %ax,TRAPBOUNCE_cs(%edx)
240 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
241 call create_bounce_frame
242 popl %ebx
243 popl %ecx
244 popl %edx
245 popl %esi
246 popl %edi
247 popl %ebp
248 popl %eax
249 addl $4,%esp
250 FLT6: iret
251 .section .fixup,"ax"
252 FIX6: pushl %ebx
253 GET_CURRENT(%ebx)
254 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
255 pop %ebx
256 jmp FIX5
257 .section __pre_ex_table,"a"
258 .long FLT6,FIX6
259 .previous
261 ALIGN
262 restore_all_xen:
263 popl %ebx
264 popl %ecx
265 popl %edx
266 popl %esi
267 popl %edi
268 popl %ebp
269 popl %eax
270 addl $4,%esp
271 iret
273 ALIGN
274 ENTRY(hypercall)
275 subl $4,%esp
276 SAVE_ALL(b)
277 sti
278 GET_CURRENT(%ebx)
279 andl $(NR_hypercalls-1),%eax
280 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
282 ret_from_hypercall:
283 movl %eax,XREGS_eax(%esp) # save the return value
285 test_all_events:
286 xorl %ecx,%ecx
287 notl %ecx
288 cli # tests must not race interrupts
289 /*test_softirqs:*/
290 movl EDOMAIN_processor(%ebx),%eax
291 shl $6,%eax # sizeof(irq_cpustat) == 64
292 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
293 jnz process_softirqs
294 /*test_guest_events:*/
295 movl EDOMAIN_vcpu_info(%ebx),%eax
296 testb $0xFF,VCPUINFO_upcall_mask(%eax)
297 jnz restore_all_guest
298 testb $0xFF,VCPUINFO_upcall_pending(%eax)
299 jz restore_all_guest
300 /*process_guest_events:*/
301 leal EDOMAIN_trap_bounce(%ebx),%edx
302 movl EDOMAIN_event_addr(%ebx),%eax
303 movl %eax,TRAPBOUNCE_eip(%edx)
304 movl EDOMAIN_event_sel(%ebx),%eax
305 movw %ax,TRAPBOUNCE_cs(%edx)
306 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
307 call create_bounce_frame
308 movl EDOMAIN_vcpu_info(%ebx),%eax
309 movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
310 jmp restore_all_guest
312 ALIGN
313 process_softirqs:
314 sti
315 call SYMBOL_NAME(do_softirq)
316 jmp test_all_events
318 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
319 /* {EIP, CS, EFLAGS, [ESP, SS]} */
320 /* %edx == trap_bounce, %ebx == task_struct */
321 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
322 create_bounce_frame:
323 movb XREGS_cs+4(%esp),%cl
324 testb $2,%cl
325 jz 1f /* jump if returning to an existing ring-1 activation */
326 /* obtain ss/esp from TSS -- no current ring-1 activations */
327 movl EDOMAIN_processor(%ebx),%eax
328 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
329 movl %eax, %ecx
330 shll $7, %ecx
331 shll $13, %eax
332 addl %ecx,%eax
333 addl $init_tss + 12,%eax
334 movl (%eax),%esi /* tss->esp1 */
335 FLT7: movl 4(%eax),%gs /* tss->ss1 */
336 /* base of stack frame must contain ss/esp (inter-priv iret) */
337 subl $8,%esi
338 movl XREGS_esp+4(%esp),%eax
339 FLT8: movl %eax,%gs:(%esi)
340 movl XREGS_ss+4(%esp),%eax
341 FLT9: movl %eax,%gs:4(%esi)
342 jmp 2f
343 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
344 movl XREGS_esp+4(%esp),%esi
345 FLT10: movl XREGS_ss+4(%esp),%gs
346 2: /* Construct a stack frame: EFLAGS, CS/EIP */
347 subl $12,%esi
348 movl XREGS_eip+4(%esp),%eax
349 FLT11: movl %eax,%gs:(%esi)
350 movl XREGS_cs+4(%esp),%eax
351 FLT12: movl %eax,%gs:4(%esi)
352 movl XREGS_eflags+4(%esp),%eax
353 FLT13: movl %eax,%gs:8(%esi)
354 movb TRAPBOUNCE_flags(%edx),%cl
355 test $TBF_EXCEPTION_ERRCODE,%cl
356 jz 1f
357 subl $4,%esi # push error_code onto guest frame
358 movl TRAPBOUNCE_error_code(%edx),%eax
359 FLT14: movl %eax,%gs:(%esi)
360 testb $TBF_EXCEPTION_CR2,%cl
361 jz 2f
362 subl $4,%esi # push %cr2 onto guest frame
363 movl TRAPBOUNCE_cr2(%edx),%eax
364 FLT15: movl %eax,%gs:(%esi)
365 1: testb $TBF_FAILSAFE,%cl
366 jz 2f
367 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
368 movl XREGS_ds+4(%esp),%eax
369 FLT16: movl %eax,%gs:(%esi)
370 movl XREGS_es+4(%esp),%eax
371 FLT17: movl %eax,%gs:4(%esi)
372 movl XREGS_fs+4(%esp),%eax
373 FLT18: movl %eax,%gs:8(%esi)
374 movl XREGS_gs+4(%esp),%eax
375 FLT19: movl %eax,%gs:12(%esi)
376 2: movb $0,TRAPBOUNCE_flags(%edx)
377 /* Rewrite our stack frame and return to ring 1. */
378 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
379 andl $0xfffcbeff,XREGS_eflags+4(%esp)
380 movl %gs,XREGS_ss+4(%esp)
381 movl %esi,XREGS_esp+4(%esp)
382 movzwl TRAPBOUNCE_cs(%edx),%eax
383 movl %eax,XREGS_cs+4(%esp)
384 movl TRAPBOUNCE_eip(%edx),%eax
385 movl %eax,XREGS_eip+4(%esp)
386 ret
387 .section .fixup,"ax"
388 FIX7: sti
389 popl %esi
390 addl $4,%esp # Discard create_b_frame return address
391 pushfl # EFLAGS
392 movl $__HYPERVISOR_CS,%eax
393 pushl %eax # CS
394 movl $DBLFLT2,%eax
395 pushl %eax # EIP
396 pushl %esi # error_code/entry_vector
397 jmp error_code
398 DBLFLT2:jmp process_guest_exception_and_events
399 .previous
400 .section __pre_ex_table,"a"
401 .long FLT7,FIX7
402 .long FLT8,FIX7
403 .long FLT9,FIX7
404 .long FLT10,FIX7
405 .long FLT11,FIX7
406 .long FLT12,FIX7
407 .long FLT13,FIX7
408 .long FLT14,FIX7
409 .long FLT15,FIX7
410 .long FLT16,FIX7
411 .long FLT17,FIX7
412 .long FLT18,FIX7
413 .long FLT19,FIX7
414 .previous
415 .section __ex_table,"a"
416 .long DBLFLT2,domain_crash
417 .previous
419 ALIGN
420 process_guest_exception_and_events:
421 leal EDOMAIN_trap_bounce(%ebx),%edx
422 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
423 jz test_all_events
424 call create_bounce_frame
425 jmp test_all_events
427 ALIGN
428 ENTRY(ret_from_intr)
429 GET_CURRENT(%ebx)
430 movb XREGS_cs(%esp),%al
431 testb $3,%al # return to non-supervisor?
432 jnz test_all_events
433 jmp restore_all_xen
435 ENTRY(divide_error)
436 pushl $TRAP_divide_error<<16
437 ALIGN
438 error_code:
439 SAVE_ALL_NOSEGREGS(a)
440 SET_XEN_SEGMENTS(a)
441 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
442 jz exception_with_ints_disabled
443 sti # re-enable interrupts
444 xorl %eax,%eax
445 movw XREGS_entry_vector(%esp),%ax
446 movl %esp,%edx
447 pushl %edx # push the xen_regs pointer
448 GET_CURRENT(%ebx)
449 call *SYMBOL_NAME(exception_table)(,%eax,4)
450 addl $4,%esp
451 movb XREGS_cs(%esp),%al
452 testb $3,%al
453 jz restore_all_xen
454 jmp process_guest_exception_and_events
456 exception_with_ints_disabled:
457 movb XREGS_cs(%esp),%al
458 testb $3,%al # interrupts disabled outside Xen?
459 jnz FATAL_exception_with_ints_disabled
460 pushl XREGS_eip(%esp)
461 call search_pre_exception_table
462 addl $4,%esp
463 testl %eax,%eax # no fixup code for faulting EIP?
464 jz FATAL_exception_with_ints_disabled
465 movl %eax,XREGS_eip(%esp)
466 movl %esp,%esi
467 subl $4,%esp
468 movl %esp,%edi
469 movl $XREGS_kernel_sizeof/4,%ecx
470 rep; movsl # make room for error_code/entry_vector
471 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
472 movl %eax,XREGS_kernel_sizeof(%esp)
473 jmp restore_all_xen # return to fixup code
475 FATAL_exception_with_ints_disabled:
476 xorl %esi,%esi
477 movw XREGS_entry_vector(%esp),%si
478 movl %esp,%edx
479 pushl %edx # push the xen_regs pointer
480 pushl %esi # push the trapnr (entry vector)
481 call SYMBOL_NAME(fatal_trap)
482 ud2
484 ENTRY(coprocessor_error)
485 pushl $TRAP_copro_error<<16
486 jmp error_code
488 ENTRY(simd_coprocessor_error)
489 pushl $TRAP_simd_error<<16
490 jmp error_code
492 ENTRY(device_not_available)
493 pushl $TRAP_no_device<<16
494 jmp error_code
496 ENTRY(debug)
497 pushl $TRAP_debug<<16
498 jmp error_code
500 ENTRY(int3)
501 pushl $TRAP_int3<<16
502 jmp error_code
504 ENTRY(overflow)
505 pushl $TRAP_overflow<<16
506 jmp error_code
508 ENTRY(bounds)
509 pushl $TRAP_bounds<<16
510 jmp error_code
512 ENTRY(invalid_op)
513 pushl $TRAP_invalid_op<<16
514 jmp error_code
516 ENTRY(coprocessor_segment_overrun)
517 pushl $TRAP_copro_seg<<16
518 jmp error_code
520 ENTRY(invalid_TSS)
521 movw $TRAP_invalid_tss,2(%esp)
522 jmp error_code
524 ENTRY(segment_not_present)
525 movw $TRAP_no_segment,2(%esp)
526 jmp error_code
528 ENTRY(stack_segment)
529 movw $TRAP_stack_error,2(%esp)
530 jmp error_code
532 ENTRY(general_protection)
533 movw $TRAP_gp_fault,2(%esp)
534 jmp error_code
536 ENTRY(alignment_check)
537 movw $TRAP_alignment_check,2(%esp)
538 jmp error_code
540 ENTRY(page_fault)
541 movw $TRAP_page_fault,2(%esp)
542 jmp error_code
544 ENTRY(machine_check)
545 pushl $TRAP_machine_check<<16
546 jmp error_code
548 ENTRY(spurious_interrupt_bug)
549 pushl $TRAP_spurious_int<<16
550 jmp error_code
552 ENTRY(nmi)
553 # Save state but do not trash the segment registers!
554 # We may otherwise be unable to reload them or copy them to ring 1.
555 pushl %eax
556 SAVE_ALL_NOSEGREGS(a)
558 # Check for hardware problems.
559 inb $0x61,%al
560 testb $0x80,%al
561 jne nmi_parity_err
562 testb $0x40,%al
563 jne nmi_io_err
564 movl %eax,%ebx
566 # Okay, its almost a normal NMI tick. We can only process it if:
567 # A. We are the outermost Xen activation (in which case we have
568 # the selectors safely saved on our stack)
569 # B. DS-GS all contain sane Xen values.
570 # In all other cases we bail without touching DS-GS, as we have
571 # interrupted an enclosing Xen activation in tricky prologue or
572 # epilogue code.
573 movb XREGS_cs(%esp),%al
574 testb $3,%al
575 jnz do_watchdog_tick
576 movl XREGS_ds(%esp),%eax
577 cmpw $(__HYPERVISOR_DS),%ax
578 jne restore_all_xen
579 movl XREGS_es(%esp),%eax
580 cmpw $(__HYPERVISOR_DS),%ax
581 jne restore_all_xen
582 movl XREGS_fs(%esp),%eax
583 cmpw $(__HYPERVISOR_DS),%ax
584 jne restore_all_xen
585 movl XREGS_gs(%esp),%eax
586 cmpw $(__HYPERVISOR_DS),%ax
587 jne restore_all_xen
589 do_watchdog_tick:
590 movl $(__HYPERVISOR_DS),%edx
591 movl %edx,%ds
592 movl %edx,%es
593 movl %esp,%edx
594 pushl %ebx # reason
595 pushl %edx # regs
596 call SYMBOL_NAME(do_nmi)
597 addl $8,%esp
598 movb XREGS_cs(%esp),%al
599 testb $3,%al
600 jz restore_all_xen
601 GET_CURRENT(%ebx)
602 jmp restore_all_guest
604 nmi_parity_err:
605 # Clear and disable the parity-error line
606 andb $0xf,%al
607 orb $0x4,%al
608 outb %al,$0x61
609 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
610 je restore_all_xen
611 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
612 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
613 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
614 je restore_all_xen
615 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
616 movl %edx,%ds
617 movl %edx,%es
618 movl %esp,%edx
619 push %edx
620 call SYMBOL_NAME(mem_parity_error)
621 addl $4,%esp
622 jmp ret_from_intr
624 nmi_io_err:
625 # Clear and disable the I/O-error line
626 andb $0xf,%al
627 orb $0x8,%al
628 outb %al,$0x61
629 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
630 je restore_all_xen
631 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
632 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
633 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
634 je restore_all_xen
635 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
636 movl %edx,%ds
637 movl %edx,%es
638 movl %esp,%edx
639 push %edx
640 call SYMBOL_NAME(io_check_error)
641 addl $4,%esp
642 jmp ret_from_intr
644 .data
646 ENTRY(exception_table)
647 .long SYMBOL_NAME(do_divide_error)
648 .long SYMBOL_NAME(do_debug)
649 .long 0 # nmi
650 .long SYMBOL_NAME(do_int3)
651 .long SYMBOL_NAME(do_overflow)
652 .long SYMBOL_NAME(do_bounds)
653 .long SYMBOL_NAME(do_invalid_op)
654 .long SYMBOL_NAME(math_state_restore)
655 .long 0 # double fault
656 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
657 .long SYMBOL_NAME(do_invalid_TSS)
658 .long SYMBOL_NAME(do_segment_not_present)
659 .long SYMBOL_NAME(do_stack_segment)
660 .long SYMBOL_NAME(do_general_protection)
661 .long SYMBOL_NAME(do_page_fault)
662 .long SYMBOL_NAME(do_spurious_interrupt_bug)
663 .long SYMBOL_NAME(do_coprocessor_error)
664 .long SYMBOL_NAME(do_alignment_check)
665 .long SYMBOL_NAME(do_machine_check)
666 .long SYMBOL_NAME(do_simd_coprocessor_error)
668 ENTRY(hypercall_table)
669 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
670 .long SYMBOL_NAME(do_mmu_update)
671 .long SYMBOL_NAME(do_set_gdt)
672 .long SYMBOL_NAME(do_stack_switch)
673 .long SYMBOL_NAME(do_set_callbacks)
674 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
675 .long SYMBOL_NAME(do_sched_op)
676 .long SYMBOL_NAME(do_dom0_op)
677 .long SYMBOL_NAME(do_set_debugreg)
678 .long SYMBOL_NAME(do_get_debugreg)
679 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
680 .long SYMBOL_NAME(do_set_fast_trap)
681 .long SYMBOL_NAME(do_dom_mem_op)
682 .long SYMBOL_NAME(do_multicall)
683 .long SYMBOL_NAME(do_update_va_mapping)
684 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
685 .long SYMBOL_NAME(do_event_channel_op)
686 .long SYMBOL_NAME(do_xen_version)
687 .long SYMBOL_NAME(do_console_io)
688 .long SYMBOL_NAME(do_physdev_op)
689 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
690 .long SYMBOL_NAME(do_vm_assist)
691 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
692 .long SYMBOL_NAME(do_boot_vcpu)
693 .rept NR_hypercalls-((.-hypercall_table)/4)
694 .long SYMBOL_NAME(do_ni_hypercall)
695 .endr