debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 2375:3145fa096b1a

bitkeeper revision 1.1159.45.17 (412b0f07nrZVpzBQ0MnEcFNcQUolbw)

More grant-table code. Various cleanups and speedups.
author kaf24@scramble.cl.cam.ac.uk
date Tue Aug 24 09:48:55 2004 +0000 (2004-08-24)
parents 4678a5d8fc54
children c326283ef029
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 */
8 /*
9 * The idea for callbacks to guest OSes
10 * ====================================
11 *
12 * First, we require that all callbacks (either via a supplied
13 * interrupt-descriptor-table, or via the special event or failsafe callbacks
14 * in the shared-info-structure) are to ring 1. This just makes life easier,
15 * in that it means we don't have to do messy GDT/LDT lookups to find
16 * out which the privilege-level of the return code-selector. That code
17 * would just be a hassle to write, and would need to account for running
18 * off the end of the GDT/LDT, for example. For all callbacks we check
19 * that the provided
20 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
21 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
22 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
23 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
24 * than the correct ring) and bad things are bound to ensue -- IRET is
25 * likely to fault, and we may end up killing the domain (no harm can
26 * come to Xen, though).
27 *
28 * When doing a callback, we check if the return CS is in ring 0. If so,
29 * callback is delayed until next return to ring != 0.
30 * If return CS is in ring 1, then we create a callback frame
31 * starting at return SS/ESP. The base of the frame does an intra-privilege
32 * interrupt-return.
33 * If return CS is in ring > 1, we create a callback frame starting
34 * at SS/ESP taken from appropriate section of the current TSS. The base
35 * of the frame does an inter-privilege interrupt-return.
36 *
37 * Note that the "failsafe callback" uses a special stackframe:
38 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
39 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
40 * That is, original values for DS/ES/FS/GS are placed on stack rather than
41 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
42 * saved/restored in guest OS. Furthermore, if we load them we may cause
43 * a fault if they are invalid, which is a hassle to deal with. We avoid
44 * that problem if we don't load them :-) This property allows us to use
45 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
46 * on return to ring != 0, we can simply package it up as a return via
47 * the failsafe callback, and let the guest OS sort it out (perhaps by
48 * killing an application process). Note that we also do this for any
49 * faulting IRET -- just let the guest OS handle it via the event
50 * callback.
51 *
52 * We terminate a domain in the following cases:
53 * - creating a callback stack frame (due to bad ring-1 stack).
54 * - faulting IRET on entry to failsafe callback handler.
55 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
56 * handler in good order (absolutely no faults allowed!).
57 */
59 #include <xen/config.h>
60 #include <xen/errno.h>
61 #include <xen/softirq.h>
62 #include <hypervisor-ifs/hypervisor-if.h>
64 EBX = 0x00
65 ECX = 0x04
66 EDX = 0x08
67 ESI = 0x0C
68 EDI = 0x10
69 EBP = 0x14
70 EAX = 0x18
71 DS = 0x1C
72 ES = 0x20
73 FS = 0x24
74 GS = 0x28
75 ORIG_EAX = 0x2C
76 EIP = 0x30
77 CS = 0x34
78 EFLAGS = 0x38
79 OLDESP = 0x3C
80 OLDSS = 0x40
82 /* Offsets in domain structure */
83 PROCESSOR = 0
84 SHARED_INFO = 4
85 EVENT_SEL = 8
86 EVENT_ADDR = 12
87 FAILSAFE_BUFFER = 16
88 FAILSAFE_SEL = 32
89 FAILSAFE_ADDR = 36
91 /* Offsets in shared_info_t */
92 #define UPCALL_PENDING /* 0 */
93 #define UPCALL_MASK 1
95 /* Offsets in guest_trap_bounce */
96 GTB_ERROR_CODE = 0
97 GTB_CR2 = 4
98 GTB_FLAGS = 8
99 GTB_CS = 10
100 GTB_EIP = 12
101 GTBF_TRAP = 1
102 GTBF_TRAP_NOCODE = 2
103 GTBF_TRAP_CR2 = 4
105 CF_MASK = 0x00000001
106 IF_MASK = 0x00000200
107 NT_MASK = 0x00004000
109 #define SAVE_ALL_NOSEGREGS \
110 cld; \
111 pushl %gs; \
112 pushl %fs; \
113 pushl %es; \
114 pushl %ds; \
115 pushl %eax; \
116 pushl %ebp; \
117 pushl %edi; \
118 pushl %esi; \
119 pushl %edx; \
120 pushl %ecx; \
121 pushl %ebx; \
123 #define SAVE_ALL \
124 SAVE_ALL_NOSEGREGS \
125 movl $(__HYPERVISOR_DS),%edx; \
126 movl %edx,%ds; \
127 movl %edx,%es; \
128 movl %edx,%fs; \
129 movl %edx,%gs; \
130 sti;
132 #define GET_CURRENT(reg) \
133 movl $4096-4, reg; \
134 orl %esp, reg; \
135 andl $~3,reg; \
136 movl (reg),reg;
138 ENTRY(continue_nonidle_task)
139 GET_CURRENT(%ebx)
140 jmp test_all_events
142 ALIGN
143 /*
144 * HYPERVISOR_multicall(call_list, nr_calls)
145 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'.
146 * This is fairly easy except that:
147 * 1. We may fault reading the call list, and must patch that up; and
148 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
149 * caller could cause our stack to blow up.
150 */
151 do_multicall:
152 popl %eax
153 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
154 je multicall_return_from_call
155 pushl %ebx
156 movl 4(%esp),%ebx /* EBX == call_list */
157 movl 8(%esp),%ecx /* ECX == nr_calls */
158 multicall_loop:
159 pushl %ecx
160 multicall_fault1:
161 pushl 20(%ebx) # args[4]
162 multicall_fault2:
163 pushl 16(%ebx) # args[3]
164 multicall_fault3:
165 pushl 12(%ebx) # args[2]
166 multicall_fault4:
167 pushl 8(%ebx) # args[1]
168 multicall_fault5:
169 pushl 4(%ebx) # args[0]
170 multicall_fault6:
171 movl (%ebx),%eax # op
172 andl $(NR_hypercalls-1),%eax
173 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
174 multicall_return_from_call:
175 multicall_fault7:
176 movl %eax,24(%ebx) # args[5] == result
177 addl $20,%esp
178 popl %ecx
179 addl $(ARGS_PER_MULTICALL_ENTRY*4),%ebx
180 loop multicall_loop
181 popl %ebx
182 xorl %eax,%eax
183 jmp ret_from_hypercall
185 .section __ex_table,"a"
186 .align 4
187 .long multicall_fault1, multicall_fixup1
188 .long multicall_fault2, multicall_fixup2
189 .long multicall_fault3, multicall_fixup3
190 .long multicall_fault4, multicall_fixup4
191 .long multicall_fault5, multicall_fixup5
192 .long multicall_fault6, multicall_fixup6
193 .previous
195 .section .fixup,"ax"
196 multicall_fixup6:
197 addl $4,%esp
198 multicall_fixup5:
199 addl $4,%esp
200 multicall_fixup4:
201 addl $4,%esp
202 multicall_fixup3:
203 addl $4,%esp
204 multicall_fixup2:
205 addl $4,%esp
206 multicall_fixup1:
207 addl $4,%esp
208 popl %ebx
209 movl $-EFAULT,%eax
210 jmp ret_from_hypercall
211 .previous
213 ALIGN
214 restore_all_guest:
215 # First, may need to restore %ds if clobbered by create_bounce_frame
216 pushl %ss
217 popl %ds
218 # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
219 leal DS(%esp),%esi
220 leal FAILSAFE_BUFFER(%ebx),%edi
221 movsl
222 movsl
223 movsl
224 movsl
225 # Finally, restore guest registers -- faults will cause failsafe
226 popl %ebx
227 popl %ecx
228 popl %edx
229 popl %esi
230 popl %edi
231 popl %ebp
232 popl %eax
233 1: popl %ds
234 2: popl %es
235 3: popl %fs
236 4: popl %gs
237 addl $4,%esp
238 5: iret
239 .section .fixup,"ax"
240 10: subl $4,%esp
241 pushl %gs
242 9: pushl %fs
243 8: pushl %es
244 7: pushl %ds
245 6: pushl %eax
246 pushl %ebp
247 pushl %edi
248 pushl %esi
249 pushl %edx
250 pushl %ecx
251 pushl %ebx
252 pushl %ss
253 popl %ds
254 pushl %ss
255 popl %es
256 jmp failsafe_callback
257 .previous
258 .section __ex_table,"a"
259 .align 4
260 .long 1b,6b
261 .long 2b,7b
262 .long 3b,8b
263 .long 4b,9b
264 .long 5b,10b
265 .previous
267 /* No special register assumptions */
268 failsafe_callback:
269 GET_CURRENT(%ebx)
270 movl PROCESSOR(%ebx),%eax
271 shl $4,%eax
272 lea guest_trap_bounce(%eax),%edx
273 movl FAILSAFE_ADDR(%ebx),%eax
274 movl %eax,GTB_EIP(%edx)
275 movl FAILSAFE_SEL(%ebx),%eax
276 movw %ax,GTB_CS(%edx)
277 call create_bounce_frame
278 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
279 leal FAILSAFE_BUFFER(%ebx),%ebp
280 movl 0(%ebp),%eax # DS
281 FAULT1: movl %eax,(%esi)
282 movl 4(%ebp),%eax # ES
283 FAULT2: movl %eax,4(%esi)
284 movl 8(%ebp),%eax # FS
285 FAULT3: movl %eax,8(%esi)
286 movl 12(%ebp),%eax # GS
287 FAULT4: movl %eax,12(%esi)
288 movl %esi,OLDESP(%esp)
289 popl %ebx
290 popl %ecx
291 popl %edx
292 popl %esi
293 popl %edi
294 popl %ebp
295 popl %eax
296 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
297 FAULT5: iret
300 ALIGN
301 # Simple restore -- we should never fault as we we will only interrupt ring 0
302 # when sane values have been placed in all registers. The only exception is
303 # NMI, which may interrupt before good values have been placed in DS-GS.
304 # The NMI return code deals with this problem itself.
305 restore_all_xen:
306 popl %ebx
307 popl %ecx
308 popl %edx
309 popl %esi
310 popl %edi
311 popl %ebp
312 popl %eax
313 popl %ds
314 popl %es
315 popl %fs
316 popl %gs
317 addl $4,%esp
318 iret
320 ALIGN
321 ENTRY(hypercall)
322 pushl %eax # save orig_eax
323 SAVE_ALL
324 GET_CURRENT(%ebx)
325 andl $(NR_hypercalls-1),%eax
326 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
328 ret_from_hypercall:
329 movl %eax,EAX(%esp) # save the return value
331 test_all_events:
332 xorl %ecx,%ecx
333 notl %ecx
334 cli # tests must not race interrupts
335 /*test_softirqs:*/
336 movl PROCESSOR(%ebx),%eax
337 shl $6,%eax # sizeof(irq_cpustat) == 64
338 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
339 jnz process_softirqs
340 /*test_guest_events:*/
341 movl SHARED_INFO(%ebx),%eax
342 testb $0xFF,UPCALL_MASK(%eax)
343 jnz restore_all_guest
344 testb $0xFF,UPCALL_PENDING(%eax)
345 jz restore_all_guest
346 movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery
347 /*process_guest_events:*/
348 movl PROCESSOR(%ebx),%edx
349 shl $4,%edx # sizeof(guest_trap_bounce) == 16
350 lea guest_trap_bounce(%edx),%edx
351 movl EVENT_ADDR(%ebx),%eax
352 movl %eax,GTB_EIP(%edx)
353 movl EVENT_SEL(%ebx),%eax
354 movw %ax,GTB_CS(%edx)
355 call create_bounce_frame
356 jmp restore_all_guest
358 ALIGN
359 process_softirqs:
360 sti
361 call SYMBOL_NAME(do_softirq)
362 jmp test_all_events
364 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
365 /* {EIP, CS, EFLAGS, [ESP, SS]} */
366 /* %edx == guest_trap_bounce, %ebx == task_struct */
367 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
368 create_bounce_frame:
369 mov CS+4(%esp),%cl
370 test $2,%cl
371 jz 1f /* jump if returning to an existing ring-1 activation */
372 /* obtain ss/esp from TSS -- no current ring-1 activations */
373 movl PROCESSOR(%ebx),%eax
374 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
375 movl %eax, %ecx
376 shll $7, %ecx
377 shll $13, %eax
378 addl %ecx,%eax
379 addl $init_tss + 12,%eax
380 movl (%eax),%esi /* tss->esp1 */
381 FAULT6: movl 4(%eax),%ds /* tss->ss1 */
382 /* base of stack frame must contain ss/esp (inter-priv iret) */
383 subl $8,%esi
384 movl OLDESP+4(%esp),%eax
385 FAULT7: movl %eax,(%esi)
386 movl OLDSS+4(%esp),%eax
387 FAULT8: movl %eax,4(%esi)
388 jmp 2f
389 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
390 movl OLDESP+4(%esp),%esi
391 FAULT9: movl OLDSS+4(%esp),%ds
392 2: /* Construct a stack frame: EFLAGS, CS/EIP */
393 subl $12,%esi
394 movl EIP+4(%esp),%eax
395 FAULT10:movl %eax,(%esi)
396 movl CS+4(%esp),%eax
397 FAULT11:movl %eax,4(%esi)
398 movl EFLAGS+4(%esp),%eax
399 FAULT12:movl %eax,8(%esi)
400 /* Rewrite our stack frame and return to ring 1. */
401 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
402 andl $0xfffcbeff,%eax
403 movl %eax,EFLAGS+4(%esp)
404 movl %ds,OLDSS+4(%esp)
405 movl %esi,OLDESP+4(%esp)
406 movzwl %es:GTB_CS(%edx),%eax
407 movl %eax,CS+4(%esp)
408 movl %es:GTB_EIP(%edx),%eax
409 movl %eax,EIP+4(%esp)
410 ret
413 .section __ex_table,"a"
414 .align 4
415 .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack
416 .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack
417 .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack
418 .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack
419 .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret
420 .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector
421 .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack
422 .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack
423 .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector
424 .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack
425 .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack
426 .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack
427 .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack
428 .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack
429 .previous
431 # This handler kills domains which experience unrecoverable faults.
432 .section .fixup,"ax"
433 crash_domain_fixup1:
434 subl $4,%esp
435 SAVE_ALL
436 jmp domain_crash
437 crash_domain_fixup2:
438 addl $4,%esp
439 crash_domain_fixup3:
440 pushl %ss
441 popl %ds
442 jmp domain_crash
443 .previous
445 ALIGN
446 process_guest_exception_and_events:
447 movl PROCESSOR(%ebx),%eax
448 shl $4,%eax
449 lea guest_trap_bounce(%eax),%edx
450 testb $~0,GTB_FLAGS(%edx)
451 jz test_all_events
452 call create_bounce_frame # just the basic frame
453 mov %es:GTB_FLAGS(%edx),%cl
454 test $GTBF_TRAP_NOCODE,%cl
455 jnz 2f
456 subl $4,%esi # push error_code onto guest frame
457 movl %es:GTB_ERROR_CODE(%edx),%eax
458 FAULT13:movl %eax,(%esi)
459 test $GTBF_TRAP_CR2,%cl
460 jz 1f
461 subl $4,%esi # push %cr2 onto guest frame
462 movl %es:GTB_CR2(%edx),%eax
463 FAULT14:movl %eax,(%esi)
464 1: movl %esi,OLDESP(%esp)
465 2: push %es # unclobber %ds
466 pop %ds
467 movb $0,GTB_FLAGS(%edx)
468 jmp test_all_events
470 ALIGN
471 ENTRY(ret_from_intr)
472 GET_CURRENT(%ebx)
473 movb CS(%esp),%al
474 testb $3,%al # return to non-supervisor?
475 jne test_all_events
476 jmp restore_all_xen
478 ENTRY(divide_error)
479 pushl $0 # no error code
480 pushl $ SYMBOL_NAME(do_divide_error)
481 ALIGN
482 error_code:
483 pushl %fs
484 pushl %es
485 pushl %ds
486 pushl %eax
487 xorl %eax,%eax
488 pushl %ebp
489 pushl %edi
490 pushl %esi
491 pushl %edx
492 decl %eax # eax = -1
493 pushl %ecx
494 pushl %ebx
495 cld
496 movl %gs,%ecx
497 movl ORIG_EAX(%esp), %esi # get the error code
498 movl GS(%esp), %edi # get the function address
499 movl %eax, ORIG_EAX(%esp)
500 movl %ecx, GS(%esp)
501 movl $(__HYPERVISOR_DS),%edx
502 movl %edx,%ds
503 movl %edx,%es
504 movl %edx,%fs
505 movl %edx,%gs
506 movl %esp,%edx
507 pushl %esi # push the error code
508 pushl %edx # push the pt_regs pointer
509 GET_CURRENT(%ebx)
510 call *%edi
511 addl $8,%esp
512 movb CS(%esp),%al
513 testb $3,%al
514 je restore_all_xen
515 jmp process_guest_exception_and_events
517 ENTRY(coprocessor_error)
518 pushl $0
519 pushl $ SYMBOL_NAME(do_coprocessor_error)
520 jmp error_code
522 ENTRY(simd_coprocessor_error)
523 pushl $0
524 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
525 jmp error_code
527 ENTRY(device_not_available)
528 pushl $0
529 pushl $SYMBOL_NAME(math_state_restore)
530 jmp error_code
532 ENTRY(debug)
533 pushl $0
534 pushl $ SYMBOL_NAME(do_debug)
535 jmp error_code
537 ENTRY(int3)
538 pushl $0
539 pushl $ SYMBOL_NAME(do_int3)
540 jmp error_code
542 ENTRY(overflow)
543 pushl $0
544 pushl $ SYMBOL_NAME(do_overflow)
545 jmp error_code
547 ENTRY(bounds)
548 pushl $0
549 pushl $ SYMBOL_NAME(do_bounds)
550 jmp error_code
552 ENTRY(invalid_op)
553 pushl $0
554 pushl $ SYMBOL_NAME(do_invalid_op)
555 jmp error_code
557 ENTRY(coprocessor_segment_overrun)
558 pushl $0
559 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
560 jmp error_code
562 ENTRY(invalid_TSS)
563 pushl $ SYMBOL_NAME(do_invalid_TSS)
564 jmp error_code
566 ENTRY(segment_not_present)
567 pushl $ SYMBOL_NAME(do_segment_not_present)
568 jmp error_code
570 ENTRY(stack_segment)
571 pushl $ SYMBOL_NAME(do_stack_segment)
572 jmp error_code
574 ENTRY(general_protection)
575 pushl $ SYMBOL_NAME(do_general_protection)
576 jmp error_code
578 ENTRY(alignment_check)
579 pushl $ SYMBOL_NAME(do_alignment_check)
580 jmp error_code
582 ENTRY(page_fault)
583 pushl $ SYMBOL_NAME(do_page_fault)
584 jmp error_code
586 ENTRY(machine_check)
587 pushl $0
588 pushl $ SYMBOL_NAME(do_machine_check)
589 jmp error_code
591 ENTRY(spurious_interrupt_bug)
592 pushl $0
593 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
594 jmp error_code
596 ENTRY(nmi)
597 # Save state but do not trash the segment registers!
598 # We may otherwise be unable to reload them or copy them to ring 1.
599 pushl %eax
600 SAVE_ALL_NOSEGREGS
602 # Check for hardware problems.
603 inb $0x61,%al
604 testb $0x80,%al
605 jne nmi_parity_err
606 testb $0x40,%al
607 jne nmi_io_err
608 movl %eax,%ebx
610 # Okay, its almost a normal NMI tick. We can only process it if:
611 # A. We are the outermost Xen activation (in which case we have
612 # the selectors safely saved on our stack)
613 # B. DS-GS all contain sane Xen values.
614 # In all other cases we bail without touching DS-GS, as we have
615 # interrupted an enclosing Xen activation in tricky prologue or
616 # epilogue code.
617 movb CS(%esp),%al
618 testb $3,%al
619 jne do_watchdog_tick
620 movl DS(%esp),%eax
621 cmpw $(__HYPERVISOR_DS),%ax
622 jne nmi_badseg
623 movl ES(%esp),%eax
624 cmpw $(__HYPERVISOR_DS),%ax
625 jne nmi_badseg
626 movl FS(%esp),%eax
627 cmpw $(__HYPERVISOR_DS),%ax
628 jne nmi_badseg
629 movl GS(%esp),%eax
630 cmpw $(__HYPERVISOR_DS),%ax
631 jne nmi_badseg
633 do_watchdog_tick:
634 movl $(__HYPERVISOR_DS),%edx
635 movl %edx,%ds
636 movl %edx,%es
637 movl %esp,%edx
638 pushl %ebx # reason
639 pushl %edx # regs
640 call SYMBOL_NAME(do_nmi)
641 addl $8,%esp
642 movb CS(%esp),%al
643 testb $3,%al
644 je restore_all_xen
645 GET_CURRENT(%ebx)
646 jmp restore_all_guest
648 nmi_badseg:
649 popl %ebx
650 popl %ecx
651 popl %edx
652 popl %esi
653 popl %edi
654 popl %ebp
655 popl %eax
656 addl $20,%esp
657 iret
659 nmi_parity_err:
660 # Clear and disable the parity-error line
661 andb $0xf,%al
662 orb $0x4,%al
663 outb %al,$0x61
664 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
665 je nmi_badseg
666 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
667 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
668 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
669 je nmi_badseg
670 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
671 movl %edx,%ds
672 movl %edx,%es
673 movl %esp,%edx
674 push %edx
675 call SYMBOL_NAME(mem_parity_error)
676 addl $4,%esp
677 jmp ret_from_intr
679 nmi_io_err:
680 # Clear and disable the I/O-error line
681 andb $0xf,%al
682 orb $0x8,%al
683 outb %al,$0x61
684 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
685 je nmi_badseg
686 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
687 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
688 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
689 je nmi_badseg
690 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
691 movl %edx,%ds
692 movl %edx,%es
693 movl %esp,%edx
694 push %edx
695 call SYMBOL_NAME(io_check_error)
696 addl $4,%esp
697 jmp ret_from_intr
699 .data
700 ENTRY(hypercall_table)
701 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
702 .long SYMBOL_NAME(do_mmu_update)
703 .long SYMBOL_NAME(do_set_gdt)
704 .long SYMBOL_NAME(do_stack_switch)
705 .long SYMBOL_NAME(do_set_callbacks)
706 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
707 .long SYMBOL_NAME(do_sched_op)
708 .long SYMBOL_NAME(do_dom0_op)
709 .long SYMBOL_NAME(do_set_debugreg)
710 .long SYMBOL_NAME(do_get_debugreg)
711 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
712 .long SYMBOL_NAME(do_set_fast_trap)
713 .long SYMBOL_NAME(do_dom_mem_op)
714 .long SYMBOL_NAME(do_multicall)
715 .long SYMBOL_NAME(do_update_va_mapping)
716 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
717 .long SYMBOL_NAME(do_event_channel_op)
718 .long SYMBOL_NAME(do_xen_version)
719 .long SYMBOL_NAME(do_console_io)
720 .long SYMBOL_NAME(do_physdev_op)
721 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
722 .long SYMBOL_NAME(do_vm_assist)
723 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
724 .rept NR_hypercalls-((.-hypercall_table)/4)
725 .long SYMBOL_NAME(do_ni_hypercall)
726 .endr