debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 3674:fb875591fd72

bitkeeper revision 1.1159.223.63 (42028527-fv-d9BM0_LRp8UKGP19gQ)

Fix NMI deferral.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:15 2005 +0000 (2005-02-03)
parents a5f1a6abfc46
children 8710698e57e1 2ba061595230
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <public/xen.h>
63 #define GET_CURRENT(reg) \
64 movl $8192-4, reg; \
65 orl %esp, reg; \
66 andl $~3,reg; \
67 movl (reg),reg;
69 ALIGN
70 restore_all_guest:
71 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
72 jnz failsafe_callback
73 testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
74 jnz restore_all_vm86
75 FLT1: movl XREGS_ds(%esp),%ds
76 FLT2: movl XREGS_es(%esp),%es
77 FLT3: movl XREGS_fs(%esp),%fs
78 FLT4: movl XREGS_gs(%esp),%gs
79 restore_all_vm86:
80 popl %ebx
81 popl %ecx
82 popl %edx
83 popl %esi
84 popl %edi
85 popl %ebp
86 popl %eax
87 addl $4,%esp
88 FLT5: iret
89 .section .fixup,"ax"
90 FIX5: subl $28,%esp
91 pushl 28(%esp) # error_code/entry_vector
92 movl %eax,XREGS_eax+4(%esp)
93 movl %ebp,XREGS_ebp+4(%esp)
94 movl %edi,XREGS_edi+4(%esp)
95 movl %esi,XREGS_esi+4(%esp)
96 movl %edx,XREGS_edx+4(%esp)
97 movl %ecx,XREGS_ecx+4(%esp)
98 movl %ebx,XREGS_ebx+4(%esp)
99 FIX1: SET_XEN_SEGMENTS(a)
100 movl %eax,%fs
101 movl %eax,%gs
102 sti
103 popl %esi
104 pushfl # EFLAGS
105 movl $__HYPERVISOR_CS,%eax
106 pushl %eax # CS
107 movl $DBLFLT1,%eax
108 pushl %eax # EIP
109 pushl %esi # error_code/entry_vector
110 jmp error_code
111 DBLFLT1:GET_CURRENT(%ebx)
112 jmp test_all_events
113 DBLFIX1:GET_CURRENT(%ebx)
114 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
115 jnz domain_crash # cannot reenter failsafe code
116 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
117 jmp test_all_events # will return via failsafe code
118 .previous
119 .section __pre_ex_table,"a"
120 .long FLT1,FIX1
121 .long FLT2,FIX1
122 .long FLT3,FIX1
123 .long FLT4,FIX1
124 .long FLT5,FIX5
125 .previous
126 .section __ex_table,"a"
127 .long DBLFLT1,DBLFIX1
128 .previous
130 /* No special register assumptions */
131 failsafe_callback:
132 GET_CURRENT(%ebx)
133 andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx)
134 leal DOMAIN_trap_bounce(%ebx),%edx
135 movl DOMAIN_failsafe_addr(%ebx),%eax
136 movl %eax,TRAPBOUNCE_eip(%edx)
137 movl DOMAIN_failsafe_sel(%ebx),%eax
138 movw %ax,TRAPBOUNCE_cs(%edx)
139 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
140 call create_bounce_frame
141 popl %ebx
142 popl %ecx
143 popl %edx
144 popl %esi
145 popl %edi
146 popl %ebp
147 popl %eax
148 addl $4,%esp
149 FLT6: iret
150 .section .fixup,"ax"
151 FIX6: pushl %ebx
152 GET_CURRENT(%ebx)
153 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
154 pop %ebx
155 jmp FIX5
156 .section __pre_ex_table,"a"
157 .long FLT6,FIX6
158 .previous
160 ALIGN
161 restore_all_xen:
162 popl %ebx
163 popl %ecx
164 popl %edx
165 popl %esi
166 popl %edi
167 popl %ebp
168 popl %eax
169 addl $4,%esp
170 iret
172 ALIGN
173 ENTRY(hypercall)
174 subl $4,%esp
175 SAVE_ALL(b)
176 sti
177 GET_CURRENT(%ebx)
178 andl $(NR_hypercalls-1),%eax
179 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
181 ret_from_hypercall:
182 movl %eax,XREGS_eax(%esp) # save the return value
184 test_all_events:
185 xorl %ecx,%ecx
186 notl %ecx
187 cli # tests must not race interrupts
188 /*test_softirqs:*/
189 movl DOMAIN_processor(%ebx),%eax
190 shl $6,%eax # sizeof(irq_cpustat) == 64
191 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
192 jnz process_softirqs
193 /*test_guest_events:*/
194 movl DOMAIN_shared_info(%ebx),%eax
195 testb $0xFF,SHINFO_upcall_mask(%eax)
196 jnz restore_all_guest
197 testb $0xFF,SHINFO_upcall_pending(%eax)
198 jz restore_all_guest
199 /*process_guest_events:*/
200 leal DOMAIN_trap_bounce(%ebx),%edx
201 movl DOMAIN_event_addr(%ebx),%eax
202 movl %eax,TRAPBOUNCE_eip(%edx)
203 movl DOMAIN_event_sel(%ebx),%eax
204 movw %ax,TRAPBOUNCE_cs(%edx)
205 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
206 call create_bounce_frame
207 movl DOMAIN_shared_info(%ebx),%eax
208 movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
209 jmp restore_all_guest
211 ALIGN
212 process_softirqs:
213 sti
214 call SYMBOL_NAME(do_softirq)
215 jmp test_all_events
217 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
218 /* {EIP, CS, EFLAGS, [ESP, SS]} */
219 /* %edx == trap_bounce, %ebx == task_struct */
220 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
221 create_bounce_frame:
222 movl XREGS_eflags+4(%esp),%ecx
223 movb XREGS_cs+4(%esp),%cl
224 testl $(2|X86_EFLAGS_VM),%ecx
225 jz ring1 /* jump if returning to an existing ring-1 activation */
226 /* obtain ss/esp from TSS -- no current ring-1 activations */
227 movl DOMAIN_processor(%ebx),%eax
228 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
229 movl %eax, %ecx
230 shll $7, %ecx
231 shll $13, %eax
232 addl %ecx,%eax
233 addl $init_tss + 12,%eax
234 movl (%eax),%esi /* tss->esp1 */
235 FLT7: movl 4(%eax),%gs /* tss->ss1 */
236 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
237 jz nvm86_1
238 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
239 movl XREGS_es+4(%esp),%eax
240 FLT8: movl %eax,%gs:(%esi)
241 movl XREGS_ds+4(%esp),%eax
242 FLT9: movl %eax,%gs:4(%esi)
243 movl XREGS_fs+4(%esp),%eax
244 FLT10: movl %eax,%gs:8(%esi)
245 movl XREGS_gs+4(%esp),%eax
246 FLT11: movl %eax,%gs:12(%esi)
247 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
248 movl XREGS_esp+4(%esp),%eax
249 FLT12: movl %eax,%gs:(%esi)
250 movl XREGS_ss+4(%esp),%eax
251 FLT13: movl %eax,%gs:4(%esi)
252 jmp 1f
253 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
254 movl XREGS_esp+4(%esp),%esi
255 FLT14: movl XREGS_ss+4(%esp),%gs
256 1: /* Construct a stack frame: EFLAGS, CS/EIP */
257 subl $12,%esi
258 movl XREGS_eip+4(%esp),%eax
259 FLT15: movl %eax,%gs:(%esi)
260 movl XREGS_cs+4(%esp),%eax
261 FLT16: movl %eax,%gs:4(%esi)
262 movl XREGS_eflags+4(%esp),%eax
263 FLT17: movl %eax,%gs:8(%esi)
264 movb TRAPBOUNCE_flags(%edx),%cl
265 test $TBF_EXCEPTION_ERRCODE,%cl
266 jz 1f
267 subl $4,%esi # push error_code onto guest frame
268 movl TRAPBOUNCE_error_code(%edx),%eax
269 FLT18: movl %eax,%gs:(%esi)
270 testb $TBF_EXCEPTION_CR2,%cl
271 jz 2f
272 subl $4,%esi # push %cr2 onto guest frame
273 movl TRAPBOUNCE_cr2(%edx),%eax
274 FLT19: movl %eax,%gs:(%esi)
275 1: testb $TBF_FAILSAFE,%cl
276 jz 2f
277 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
278 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
279 jz nvm86_2
280 xorl %eax,%eax # VM86: we write zero selector values
281 FLT20: movl %eax,%gs:(%esi)
282 FLT21: movl %eax,%gs:4(%esi)
283 FLT22: movl %eax,%gs:8(%esi)
284 FLT23: movl %eax,%gs:12(%esi)
285 jmp 2f
286 nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
287 FLT24: movl %eax,%gs:(%esi)
288 movl XREGS_es+4(%esp),%eax
289 FLT25: movl %eax,%gs:4(%esi)
290 movl XREGS_fs+4(%esp),%eax
291 FLT26: movl %eax,%gs:8(%esi)
292 movl XREGS_gs+4(%esp),%eax
293 FLT27: movl %eax,%gs:12(%esi)
294 2: movb $0,TRAPBOUNCE_flags(%edx)
295 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
296 jz nvm86_3
297 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
298 movl %eax,XREGS_ds+4(%esp)
299 movl %eax,XREGS_es+4(%esp)
300 movl %eax,XREGS_fs+4(%esp)
301 movl %eax,XREGS_gs+4(%esp)
302 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
303 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
304 andl $0xfffcbeff,XREGS_eflags+4(%esp)
305 movl %gs,XREGS_ss+4(%esp)
306 movl %esi,XREGS_esp+4(%esp)
307 movzwl TRAPBOUNCE_cs(%edx),%eax
308 movl %eax,XREGS_cs+4(%esp)
309 movl TRAPBOUNCE_eip(%edx),%eax
310 movl %eax,XREGS_eip+4(%esp)
311 ret
312 .section .fixup,"ax"
313 FIX7: sti
314 popl %esi
315 addl $4,%esp # Discard create_b_frame return address
316 pushfl # EFLAGS
317 movl $__HYPERVISOR_CS,%eax
318 pushl %eax # CS
319 movl $DBLFLT2,%eax
320 pushl %eax # EIP
321 pushl %esi # error_code/entry_vector
322 jmp error_code
323 DBLFLT2:jmp process_guest_exception_and_events
324 .previous
325 .section __pre_ex_table,"a"
326 .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
327 .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
328 .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
329 .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
330 .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
331 .previous
332 .section __ex_table,"a"
333 .long DBLFLT2,domain_crash
334 .previous
336 ALIGN
337 process_guest_exception_and_events:
338 leal DOMAIN_trap_bounce(%ebx),%edx
339 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
340 jz test_all_events
341 cli # create_bounce_frame needs CLI for pre-exceptions to work
342 call create_bounce_frame
343 jmp test_all_events
345 ALIGN
346 ENTRY(ret_from_intr)
347 GET_CURRENT(%ebx)
348 movl XREGS_eflags(%esp),%eax
349 movb XREGS_cs(%esp),%al
350 testl $(3|X86_EFLAGS_VM),%eax
351 jnz test_all_events
352 jmp restore_all_xen
354 ENTRY(divide_error)
355 pushl $TRAP_divide_error<<16
356 ALIGN
357 error_code:
358 SAVE_ALL_NOSEGREGS(a)
359 SET_XEN_SEGMENTS(a)
360 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
361 jz exception_with_ints_disabled
362 1: sti # re-enable interrupts
363 xorl %eax,%eax
364 movw XREGS_entry_vector(%esp),%ax
365 movl %esp,%edx
366 pushl %edx # push the xen_regs pointer
367 GET_CURRENT(%ebx)
368 call *SYMBOL_NAME(exception_table)(,%eax,4)
369 addl $4,%esp
370 movl XREGS_eflags(%esp),%eax
371 movb XREGS_cs(%esp),%al
372 testl $(3|X86_EFLAGS_VM),%eax
373 jz restore_all_xen
374 jmp process_guest_exception_and_events
376 exception_with_ints_disabled:
377 movl XREGS_eflags(%esp),%eax
378 movb XREGS_cs(%esp),%al
379 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
380 jnz 1b # it really does happen!
381 # (e.g., DOM0 X server)
382 pushl XREGS_eip(%esp)
383 call search_pre_exception_table
384 addl $4,%esp
385 testl %eax,%eax # no fixup code for faulting EIP?
386 jz FATAL_exception_with_ints_disabled
387 movl %eax,XREGS_eip(%esp)
388 movl %esp,%esi
389 subl $4,%esp
390 movl %esp,%edi
391 movl $XREGS_kernel_sizeof/4,%ecx
392 rep; movsl # make room for error_code/entry_vector
393 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
394 movl %eax,XREGS_kernel_sizeof(%esp)
395 jmp restore_all_xen # return to fixup code
397 FATAL_exception_with_ints_disabled:
398 xorl %esi,%esi
399 movw XREGS_entry_vector(%esp),%si
400 movl %esp,%edx
401 pushl %edx # push the xen_regs pointer
402 pushl %esi # push the trapnr (entry vector)
403 call SYMBOL_NAME(fatal_trap)
404 ud2
406 ENTRY(coprocessor_error)
407 pushl $TRAP_copro_error<<16
408 jmp error_code
410 ENTRY(simd_coprocessor_error)
411 pushl $TRAP_simd_error<<16
412 jmp error_code
414 ENTRY(device_not_available)
415 pushl $TRAP_no_device<<16
416 jmp error_code
418 ENTRY(debug)
419 pushl $TRAP_debug<<16
420 jmp error_code
422 ENTRY(int3)
423 pushl $TRAP_int3<<16
424 jmp error_code
426 ENTRY(overflow)
427 pushl $TRAP_overflow<<16
428 jmp error_code
430 ENTRY(bounds)
431 pushl $TRAP_bounds<<16
432 jmp error_code
434 ENTRY(invalid_op)
435 pushl $TRAP_invalid_op<<16
436 jmp error_code
438 ENTRY(coprocessor_segment_overrun)
439 pushl $TRAP_copro_seg<<16
440 jmp error_code
442 ENTRY(invalid_TSS)
443 movw $TRAP_invalid_tss,2(%esp)
444 jmp error_code
446 ENTRY(segment_not_present)
447 movw $TRAP_no_segment,2(%esp)
448 jmp error_code
450 ENTRY(stack_segment)
451 movw $TRAP_stack_error,2(%esp)
452 jmp error_code
454 ENTRY(general_protection)
455 movw $TRAP_gp_fault,2(%esp)
456 jmp error_code
458 ENTRY(alignment_check)
459 movw $TRAP_alignment_check,2(%esp)
460 jmp error_code
462 ENTRY(page_fault)
463 movw $TRAP_page_fault,2(%esp)
464 jmp error_code
466 ENTRY(machine_check)
467 pushl $TRAP_machine_check<<16
468 jmp error_code
470 ENTRY(spurious_interrupt_bug)
471 pushl $TRAP_spurious_int<<16
472 jmp error_code
474 ENTRY(nmi)
475 # Save state but do not trash the segment registers!
476 # We may otherwise be unable to reload them or copy them to ring 1.
477 pushl %eax
478 SAVE_ALL_NOSEGREGS(a)
480 # Check for hardware problems.
481 inb $0x61,%al
482 testb $0x80,%al
483 jne nmi_parity_err
484 testb $0x40,%al
485 jne nmi_io_err
486 movl %eax,%ebx
488 # Okay, its almost a normal NMI tick. We can only process it if:
489 # A. We are the outermost Xen activation (in which case we have
490 # the selectors safely saved on our stack)
491 # B. DS-GS all contain sane Xen values.
492 # In all other cases we bail without touching DS-GS, as we have
493 # interrupted an enclosing Xen activation in tricky prologue or
494 # epilogue code.
495 movl XREGS_eflags(%esp),%eax
496 movb XREGS_cs(%esp),%al
497 testl $(3|X86_EFLAGS_VM),%eax
498 jnz do_watchdog_tick
499 movl %ds,%eax
500 cmpw $(__HYPERVISOR_DS),%ax
501 jne defer_nmi
502 movl %es,%eax
503 cmpw $(__HYPERVISOR_DS),%ax
504 jne defer_nmi
506 do_watchdog_tick:
507 movl $(__HYPERVISOR_DS),%edx
508 movl %edx,%ds
509 movl %edx,%es
510 movl %esp,%edx
511 pushl %ebx # reason
512 pushl %edx # regs
513 call SYMBOL_NAME(do_nmi)
514 addl $8,%esp
515 movl XREGS_eflags(%esp),%eax
516 movb XREGS_cs(%esp),%al
517 testl $(3|X86_EFLAGS_VM),%eax
518 jz restore_all_xen
519 GET_CURRENT(%ebx)
520 jmp restore_all_guest
522 defer_nmi:
523 movl $FIXMAP_apic_base,%eax
524 # apic_wait_icr_idle()
525 1: movl %ss:APIC_ICR(%eax),%ebx
526 testl $APIC_ICR_BUSY,%ebx
527 jnz 1b
528 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
529 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
530 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
531 jmp restore_all_xen
533 nmi_parity_err:
534 # Clear and disable the parity-error line
535 andb $0xf,%al
536 orb $0x4,%al
537 outb %al,$0x61
538 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
539 je restore_all_xen
540 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
541 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
542 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
543 je restore_all_xen
544 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
545 movl %edx,%ds
546 movl %edx,%es
547 movl %esp,%edx
548 push %edx
549 call SYMBOL_NAME(mem_parity_error)
550 addl $4,%esp
551 jmp ret_from_intr
553 nmi_io_err:
554 # Clear and disable the I/O-error line
555 andb $0xf,%al
556 orb $0x8,%al
557 outb %al,$0x61
558 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
559 je restore_all_xen
560 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
561 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
562 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
563 je restore_all_xen
564 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
565 movl %edx,%ds
566 movl %edx,%es
567 movl %esp,%edx
568 push %edx
569 call SYMBOL_NAME(io_check_error)
570 addl $4,%esp
571 jmp ret_from_intr
574 ENTRY(setup_vm86_frame)
575 # Copies the entire stack frame forwards by 16 bytes.
576 .macro copy_vm86_words count=18
577 .if \count
578 pushl ((\count-1)*4)(%esp)
579 popl ((\count-1)*4)+16(%esp)
580 copy_vm86_words "(\count-1)"
581 .endif
582 .endm
583 copy_vm86_words
584 addl $16,%esp
585 ret
587 do_switch_vm86:
588 # Discard the return address
589 addl $4,%esp
591 movl XREGS_eflags(%esp),%edx
593 # GS:ESI == Ring-1 stack activation
594 movl XREGS_esp(%esp),%esi
595 VFLT1: movl XREGS_ss(%esp),%gs
597 # ES:EDI == Ring-0 stack activation
598 leal XREGS_eip(%esp),%edi
600 # Restore the hypercall-number-clobbered EAX on our stack frame
601 VFLT2: movl %gs:(%esi),%eax
602 movl %eax,XREGS_eax(%esp)
603 addl $4,%esi
605 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
606 movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
607 VFLT3: movl %gs:(%esi),%eax
608 stosl
609 addl $4,%esi
610 loop VFLT3
612 # Fix up EFLAGS
613 andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
614 andl $X86_EFLAGS_IOPL,%edx # Ignore attempts to change EFLAGS.IOPL
615 jnz 1f
616 orl $X86_EFLAGS_IF,%edx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
617 1: orl $X86_EFLAGS_VM,%edx # Force EFLAGS.VM
618 orl %edx,XREGS_eflags(%esp)
620 jmp test_all_events
622 .section __ex_table,"a"
623 .long VFLT1,domain_crash
624 .long VFLT2,domain_crash
625 .long VFLT3,domain_crash
626 .previous
628 .data
630 ENTRY(exception_table)
631 .long SYMBOL_NAME(do_divide_error)
632 .long SYMBOL_NAME(do_debug)
633 .long 0 # nmi
634 .long SYMBOL_NAME(do_int3)
635 .long SYMBOL_NAME(do_overflow)
636 .long SYMBOL_NAME(do_bounds)
637 .long SYMBOL_NAME(do_invalid_op)
638 .long SYMBOL_NAME(math_state_restore)
639 .long 0 # double fault
640 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
641 .long SYMBOL_NAME(do_invalid_TSS)
642 .long SYMBOL_NAME(do_segment_not_present)
643 .long SYMBOL_NAME(do_stack_segment)
644 .long SYMBOL_NAME(do_general_protection)
645 .long SYMBOL_NAME(do_page_fault)
646 .long SYMBOL_NAME(do_spurious_interrupt_bug)
647 .long SYMBOL_NAME(do_coprocessor_error)
648 .long SYMBOL_NAME(do_alignment_check)
649 .long SYMBOL_NAME(do_machine_check)
650 .long SYMBOL_NAME(do_simd_coprocessor_error)
652 ENTRY(hypercall_table)
653 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
654 .long SYMBOL_NAME(do_mmu_update)
655 .long SYMBOL_NAME(do_set_gdt)
656 .long SYMBOL_NAME(do_stack_switch)
657 .long SYMBOL_NAME(do_set_callbacks)
658 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
659 .long SYMBOL_NAME(do_sched_op)
660 .long SYMBOL_NAME(do_dom0_op)
661 .long SYMBOL_NAME(do_set_debugreg)
662 .long SYMBOL_NAME(do_get_debugreg)
663 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
664 .long SYMBOL_NAME(do_set_fast_trap)
665 .long SYMBOL_NAME(do_dom_mem_op)
666 .long SYMBOL_NAME(do_multicall)
667 .long SYMBOL_NAME(do_update_va_mapping)
668 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
669 .long SYMBOL_NAME(do_event_channel_op)
670 .long SYMBOL_NAME(do_xen_version)
671 .long SYMBOL_NAME(do_console_io)
672 .long SYMBOL_NAME(do_physdev_op)
673 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
674 .long SYMBOL_NAME(do_vm_assist)
675 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
676 .long SYMBOL_NAME(do_switch_vm86)
677 .rept NR_hypercalls-((.-hypercall_table)/4)
678 .long SYMBOL_NAME(do_ni_hypercall)
679 .endr