debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 3207:861d3cdc1dc5

bitkeeper revision 1.1159.187.22 (41a89729VHxMK8Tp4qcLrOPCoZrdog)

First cut for VM86 support, based on Stephan Diestelhorst's patches. It
doesn't actually work yet -- it's possible that e.g., signal delivery is
broken.
author kaf24@scramble.cl.cam.ac.uk
date Sat Nov 27 15:03:05 2004 +0000 (2004-11-27)
parents a46548db5e52
children 08ca2c180189 4580e96f30e1
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/x86_32/asm_defns.h>
60 #include <public/xen.h>
62 #define GET_CURRENT(reg) \
63 movl $8192-4, reg; \
64 orl %esp, reg; \
65 andl $~3,reg; \
66 movl (reg),reg;
68 ENTRY(continue_nonidle_task)
69 GET_CURRENT(%ebx)
70 jmp test_all_events
72 ALIGN
73 restore_all_guest:
74 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
75 jnz failsafe_callback
76 testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
77 jnz restore_all_vm86
78 FLT1: movl XREGS_ds(%esp),%ds
79 FLT2: movl XREGS_es(%esp),%es
80 FLT3: movl XREGS_fs(%esp),%fs
81 FLT4: movl XREGS_gs(%esp),%gs
82 restore_all_vm86:
83 popl %ebx
84 popl %ecx
85 popl %edx
86 popl %esi
87 popl %edi
88 popl %ebp
89 popl %eax
90 addl $4,%esp
91 FLT5: iret
92 .section .fixup,"ax"
93 FIX5: subl $28,%esp
94 pushl 28(%esp) # error_code/entry_vector
95 movl %eax,XREGS_eax+4(%esp)
96 movl %ebp,XREGS_ebp+4(%esp)
97 movl %edi,XREGS_edi+4(%esp)
98 movl %esi,XREGS_esi+4(%esp)
99 movl %edx,XREGS_edx+4(%esp)
100 movl %ecx,XREGS_ecx+4(%esp)
101 movl %ebx,XREGS_ebx+4(%esp)
102 FIX1: SET_XEN_SEGMENTS(a)
103 movl %eax,%fs
104 movl %eax,%gs
105 sti
106 popl %esi
107 pushfl # EFLAGS
108 movl $__HYPERVISOR_CS,%eax
109 pushl %eax # CS
110 movl $DBLFLT1,%eax
111 pushl %eax # EIP
112 pushl %esi # error_code/entry_vector
113 jmp error_code
114 DBLFLT1:GET_CURRENT(%ebx)
115 jmp test_all_events
116 DBLFIX1:GET_CURRENT(%ebx)
117 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
118 jnz domain_crash # cannot reenter failsafe code
119 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
120 jmp test_all_events # will return via failsafe code
121 .previous
122 .section __pre_ex_table,"a"
123 .long FLT1,FIX1
124 .long FLT2,FIX1
125 .long FLT3,FIX1
126 .long FLT4,FIX1
127 .long FLT5,FIX5
128 .previous
129 .section __ex_table,"a"
130 .long DBLFLT1,DBLFIX1
131 .previous
133 /* No special register assumptions */
134 failsafe_callback:
135 GET_CURRENT(%ebx)
136 andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx)
137 leal DOMAIN_trap_bounce(%ebx),%edx
138 movl DOMAIN_failsafe_addr(%ebx),%eax
139 movl %eax,TRAPBOUNCE_eip(%edx)
140 movl DOMAIN_failsafe_sel(%ebx),%eax
141 movw %ax,TRAPBOUNCE_cs(%edx)
142 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
143 call create_bounce_frame
144 popl %ebx
145 popl %ecx
146 popl %edx
147 popl %esi
148 popl %edi
149 popl %ebp
150 popl %eax
151 addl $4,%esp
152 FLT6: iret
153 .section .fixup,"ax"
154 FIX6: pushl %ebx
155 GET_CURRENT(%ebx)
156 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
157 pop %ebx
158 jmp FIX5
159 .section __pre_ex_table,"a"
160 .long FLT6,FIX6
161 .previous
163 ALIGN
164 restore_all_xen:
165 popl %ebx
166 popl %ecx
167 popl %edx
168 popl %esi
169 popl %edi
170 popl %ebp
171 popl %eax
172 addl $4,%esp
173 iret
175 ALIGN
176 ENTRY(hypercall)
177 subl $4,%esp
178 SAVE_ALL(b)
179 sti
180 GET_CURRENT(%ebx)
181 andl $(NR_hypercalls-1),%eax
182 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
184 ret_from_hypercall:
185 movl %eax,XREGS_eax(%esp) # save the return value
187 test_all_events:
188 xorl %ecx,%ecx
189 notl %ecx
190 cli # tests must not race interrupts
191 /*test_softirqs:*/
192 movl DOMAIN_processor(%ebx),%eax
193 shl $6,%eax # sizeof(irq_cpustat) == 64
194 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
195 jnz process_softirqs
196 /*test_guest_events:*/
197 movl DOMAIN_shared_info(%ebx),%eax
198 testb $0xFF,SHINFO_upcall_mask(%eax)
199 jnz restore_all_guest
200 testb $0xFF,SHINFO_upcall_pending(%eax)
201 jz restore_all_guest
202 /*process_guest_events:*/
203 leal DOMAIN_trap_bounce(%ebx),%edx
204 movl DOMAIN_event_addr(%ebx),%eax
205 movl %eax,TRAPBOUNCE_eip(%edx)
206 movl DOMAIN_event_sel(%ebx),%eax
207 movw %ax,TRAPBOUNCE_cs(%edx)
208 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
209 call create_bounce_frame
210 movl DOMAIN_shared_info(%ebx),%eax
211 movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
212 jmp restore_all_guest
214 ALIGN
215 process_softirqs:
216 sti
217 call SYMBOL_NAME(do_softirq)
218 jmp test_all_events
220 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
221 /* {EIP, CS, EFLAGS, [ESP, SS]} */
222 /* %edx == trap_bounce, %ebx == task_struct */
223 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
224 create_bounce_frame:
225 movl XREGS_eflags+4(%esp),%ecx
226 movb XREGS_cs+4(%esp),%cl
227 testl $(2|X86_EFLAGS_VM),%ecx
228 jz ring1 /* jump if returning to an existing ring-1 activation */
229 /* obtain ss/esp from TSS -- no current ring-1 activations */
230 movl DOMAIN_processor(%ebx),%eax
231 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
232 movl %eax, %ecx
233 shll $7, %ecx
234 shll $13, %eax
235 addl %ecx,%eax
236 addl $init_tss + 12,%eax
237 movl (%eax),%esi /* tss->esp1 */
238 FLT7: movl 4(%eax),%gs /* tss->ss1 */
239 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
240 jz nvm86_1
241 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
242 movl XREGS_es+4(%esp),%eax
243 FLT8: movl %eax,%gs:(%esi)
244 movl XREGS_ds+4(%esp),%eax
245 FLT9: movl %eax,%gs:4(%esi)
246 movl XREGS_fs+4(%esp),%eax
247 FLT10: movl %eax,%gs:8(%esi)
248 movl XREGS_gs+4(%esp),%eax
249 FLT11: movl %eax,%gs:12(%esi)
250 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
251 movl XREGS_esp+4(%esp),%eax
252 FLT12: movl %eax,%gs:(%esi)
253 movl XREGS_ss+4(%esp),%eax
254 FLT13: movl %eax,%gs:4(%esi)
255 jmp 1f
256 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
257 movl XREGS_esp+4(%esp),%esi
258 FLT14: movl XREGS_ss+4(%esp),%gs
259 1: /* Construct a stack frame: EFLAGS, CS/EIP */
260 subl $12,%esi
261 movl XREGS_eip+4(%esp),%eax
262 FLT15: movl %eax,%gs:(%esi)
263 movl XREGS_cs+4(%esp),%eax
264 FLT16: movl %eax,%gs:4(%esi)
265 movl XREGS_eflags+4(%esp),%eax
266 FLT17: movl %eax,%gs:8(%esi)
267 movb TRAPBOUNCE_flags(%edx),%cl
268 test $TBF_EXCEPTION_ERRCODE,%cl
269 jz 1f
270 subl $4,%esi # push error_code onto guest frame
271 movl TRAPBOUNCE_error_code(%edx),%eax
272 FLT18: movl %eax,%gs:(%esi)
273 testb $TBF_EXCEPTION_CR2,%cl
274 jz 2f
275 subl $4,%esi # push %cr2 onto guest frame
276 movl TRAPBOUNCE_cr2(%edx),%eax
277 FLT19: movl %eax,%gs:(%esi)
278 1: testb $TBF_FAILSAFE,%cl
279 jz 2f
280 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
281 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
282 jz nvm86_2
283 xorl %eax,%eax # VM86: we write zero selector values
284 FLT20: movl %eax,%gs:(%esi)
285 FLT21: movl %eax,%gs:4(%esi)
286 FLT22: movl %eax,%gs:8(%esi)
287 FLT23: movl %eax,%gs:12(%esi)
288 jmp 2f
289 nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
290 FLT24: movl %eax,%gs:(%esi)
291 movl XREGS_es+4(%esp),%eax
292 FLT25: movl %eax,%gs:4(%esi)
293 movl XREGS_fs+4(%esp),%eax
294 FLT26: movl %eax,%gs:8(%esi)
295 movl XREGS_gs+4(%esp),%eax
296 FLT27: movl %eax,%gs:12(%esi)
297 2: movb $0,TRAPBOUNCE_flags(%edx)
298 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
299 jz nvm86_3
300 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
301 movl %eax,XREGS_ds+4(%esp)
302 movl %eax,XREGS_es+4(%esp)
303 movl %eax,XREGS_fs+4(%esp)
304 movl %eax,XREGS_gs+4(%esp)
305 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
306 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
307 andl $0xfffcbeff,XREGS_eflags+4(%esp)
308 movl %gs,XREGS_ss+4(%esp)
309 movl %esi,XREGS_esp+4(%esp)
310 movzwl TRAPBOUNCE_cs(%edx),%eax
311 movl %eax,XREGS_cs+4(%esp)
312 movl TRAPBOUNCE_eip(%edx),%eax
313 movl %eax,XREGS_eip+4(%esp)
314 ret
315 .section .fixup,"ax"
316 FIX7: sti
317 popl %esi
318 addl $4,%esp # Discard create_b_frame return address
319 pushfl # EFLAGS
320 movl $__HYPERVISOR_CS,%eax
321 pushl %eax # CS
322 movl $DBLFLT2,%eax
323 pushl %eax # EIP
324 pushl %esi # error_code/entry_vector
325 jmp error_code
326 DBLFLT2:jmp process_guest_exception_and_events
327 .previous
328 .section __pre_ex_table,"a"
329 .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
330 .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
331 .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
332 .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
333 .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
334 .previous
335 .section __ex_table,"a"
336 .long DBLFLT2,domain_crash
337 .previous
339 ALIGN
340 process_guest_exception_and_events:
341 leal DOMAIN_trap_bounce(%ebx),%edx
342 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
343 jz test_all_events
344 call create_bounce_frame
345 jmp test_all_events
347 ALIGN
348 ENTRY(ret_from_intr)
349 GET_CURRENT(%ebx)
350 movl XREGS_eflags(%esp),%eax
351 movb XREGS_cs(%esp),%al
352 testl $(3|X86_EFLAGS_VM),%eax
353 jnz test_all_events
354 jmp restore_all_xen
356 ENTRY(divide_error)
357 pushl $TRAP_divide_error<<16
358 ALIGN
359 error_code:
360 SAVE_ALL_NOSEGREGS(a)
361 SET_XEN_SEGMENTS(a)
362 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
363 jz exception_with_ints_disabled
364 1: sti # re-enable interrupts
365 xorl %eax,%eax
366 movw XREGS_entry_vector(%esp),%ax
367 movl %esp,%edx
368 pushl %edx # push the xen_regs pointer
369 GET_CURRENT(%ebx)
370 call *SYMBOL_NAME(exception_table)(,%eax,4)
371 addl $4,%esp
372 movl XREGS_eflags(%esp),%eax
373 movb XREGS_cs(%esp),%al
374 testl $(3|X86_EFLAGS_VM),%eax
375 jz restore_all_xen
376 jmp process_guest_exception_and_events
378 exception_with_ints_disabled:
379 movl XREGS_eflags(%esp),%eax
380 movb XREGS_cs(%esp),%al
381 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
382 jnz 1b # it really does happen!
383 # (e.g., DOM0 X server)
384 pushl XREGS_eip(%esp)
385 call search_pre_exception_table
386 addl $4,%esp
387 testl %eax,%eax # no fixup code for faulting EIP?
388 jz FATAL_exception_with_ints_disabled
389 movl %eax,XREGS_eip(%esp)
390 movl %esp,%esi
391 subl $4,%esp
392 movl %esp,%edi
393 movl $XREGS_kernel_sizeof/4,%ecx
394 rep; movsl # make room for error_code/entry_vector
395 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
396 movl %eax,XREGS_kernel_sizeof(%esp)
397 jmp restore_all_xen # return to fixup code
399 FATAL_exception_with_ints_disabled:
400 xorl %esi,%esi
401 movw XREGS_entry_vector(%esp),%si
402 movl %esp,%edx
403 pushl %edx # push the xen_regs pointer
404 pushl %esi # push the trapnr (entry vector)
405 call SYMBOL_NAME(fatal_trap)
406 ud2
408 ENTRY(coprocessor_error)
409 pushl $TRAP_copro_error<<16
410 jmp error_code
412 ENTRY(simd_coprocessor_error)
413 pushl $TRAP_simd_error<<16
414 jmp error_code
416 ENTRY(device_not_available)
417 pushl $TRAP_no_device<<16
418 jmp error_code
420 ENTRY(debug)
421 pushl $TRAP_debug<<16
422 jmp error_code
424 ENTRY(int3)
425 pushl $TRAP_int3<<16
426 jmp error_code
428 ENTRY(overflow)
429 pushl $TRAP_overflow<<16
430 jmp error_code
432 ENTRY(bounds)
433 pushl $TRAP_bounds<<16
434 jmp error_code
436 ENTRY(invalid_op)
437 pushl $TRAP_invalid_op<<16
438 jmp error_code
440 ENTRY(coprocessor_segment_overrun)
441 pushl $TRAP_copro_seg<<16
442 jmp error_code
444 ENTRY(invalid_TSS)
445 movw $TRAP_invalid_tss,2(%esp)
446 jmp error_code
448 ENTRY(segment_not_present)
449 movw $TRAP_no_segment,2(%esp)
450 jmp error_code
452 ENTRY(stack_segment)
453 movw $TRAP_stack_error,2(%esp)
454 jmp error_code
456 ENTRY(general_protection)
457 movw $TRAP_gp_fault,2(%esp)
458 jmp error_code
460 ENTRY(alignment_check)
461 movw $TRAP_alignment_check,2(%esp)
462 jmp error_code
464 ENTRY(page_fault)
465 movw $TRAP_page_fault,2(%esp)
466 jmp error_code
468 ENTRY(machine_check)
469 pushl $TRAP_machine_check<<16
470 jmp error_code
472 ENTRY(spurious_interrupt_bug)
473 pushl $TRAP_spurious_int<<16
474 jmp error_code
476 ENTRY(nmi)
477 # Save state but do not trash the segment registers!
478 # We may otherwise be unable to reload them or copy them to ring 1.
479 pushl %eax
480 SAVE_ALL_NOSEGREGS(a)
482 # Check for hardware problems.
483 inb $0x61,%al
484 testb $0x80,%al
485 jne nmi_parity_err
486 testb $0x40,%al
487 jne nmi_io_err
488 movl %eax,%ebx
490 # Okay, its almost a normal NMI tick. We can only process it if:
491 # A. We are the outermost Xen activation (in which case we have
492 # the selectors safely saved on our stack)
493 # B. DS-GS all contain sane Xen values.
494 # In all other cases we bail without touching DS-GS, as we have
495 # interrupted an enclosing Xen activation in tricky prologue or
496 # epilogue code.
497 movl XREGS_eflags(%esp),%eax
498 movb XREGS_cs(%esp),%al
499 testl $(3|X86_EFLAGS_VM),%eax
500 jnz do_watchdog_tick
501 movl XREGS_ds(%esp),%eax
502 cmpw $(__HYPERVISOR_DS),%ax
503 jne restore_all_xen
504 movl XREGS_es(%esp),%eax
505 cmpw $(__HYPERVISOR_DS),%ax
506 jne restore_all_xen
507 movl XREGS_fs(%esp),%eax
508 cmpw $(__HYPERVISOR_DS),%ax
509 jne restore_all_xen
510 movl XREGS_gs(%esp),%eax
511 cmpw $(__HYPERVISOR_DS),%ax
512 jne restore_all_xen
514 do_watchdog_tick:
515 movl $(__HYPERVISOR_DS),%edx
516 movl %edx,%ds
517 movl %edx,%es
518 movl %esp,%edx
519 pushl %ebx # reason
520 pushl %edx # regs
521 call SYMBOL_NAME(do_nmi)
522 addl $8,%esp
523 movl XREGS_eflags(%esp),%eax
524 movb XREGS_cs(%esp),%al
525 testl $(3|X86_EFLAGS_VM),%eax
526 jz restore_all_xen
527 GET_CURRENT(%ebx)
528 jmp restore_all_guest
530 nmi_parity_err:
531 # Clear and disable the parity-error line
532 andb $0xf,%al
533 orb $0x4,%al
534 outb %al,$0x61
535 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
536 je restore_all_xen
537 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
538 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
539 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
540 je restore_all_xen
541 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
542 movl %edx,%ds
543 movl %edx,%es
544 movl %esp,%edx
545 push %edx
546 call SYMBOL_NAME(mem_parity_error)
547 addl $4,%esp
548 jmp ret_from_intr
550 nmi_io_err:
551 # Clear and disable the I/O-error line
552 andb $0xf,%al
553 orb $0x8,%al
554 outb %al,$0x61
555 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
556 je restore_all_xen
557 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
558 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
559 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
560 je restore_all_xen
561 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
562 movl %edx,%ds
563 movl %edx,%es
564 movl %esp,%edx
565 push %edx
566 call SYMBOL_NAME(io_check_error)
567 addl $4,%esp
568 jmp ret_from_intr
571 ENTRY(setup_vm86_frame)
572 # Copies the entire stack frame forwards by 16 bytes.
573 .macro copy_vm86_words count=18
574 .if \count
575 pushl ((\count-1)*4)(%esp)
576 popl ((\count-1)*4)+16(%esp)
577 copy_vm86_words "(\count-1)"
578 .endif
579 .endm
580 copy_vm86_words
581 addl $16,%esp
582 ret
584 do_switch_vm86:
585 # Discard the return address
586 addl $4,%esp
588 movl XREGS_eflags(%esp),%ecx
590 # GS:ESI == Ring-1 stack activation
591 movl XREGS_esp(%esp),%esi
592 VFLT1: movl XREGS_ss(%esp),%gs
594 # ES:EDI == Ring-0 stack activation
595 leal XREGS_eip(%esp),%edi
597 # Restore the hypercall-number-clobbered EAX on our stack frame
598 VFLT2: movl %gs:(%esi),%eax
599 movl %eax,XREGS_eax(%esp)
600 addl $4,%esi
602 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
603 movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
604 VFLT3: movl %gs:(%esi),%eax
605 stosl
606 addl $4,%esi
607 loop VFLT3
609 # Fix up EFLAGS
610 andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
611 andl $X86_EFLAGS_IOPL,%ecx # Ignore attempts to change EFLAGS.IOPL
612 jnz 1f
613 orl $X86_EFLAGS_IF,%ecx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
614 1: orl $X86_EFLAGS_VM,%ecx # Force EFLAGS.VM
615 orl %ecx,XREGS_eflags(%esp)
617 jmp test_all_events
619 .section __ex_table,"a"
620 .long VFLT1,domain_crash
621 .long VFLT2,domain_crash
622 .long VFLT3,domain_crash
623 .previous
625 .data
627 ENTRY(exception_table)
628 .long SYMBOL_NAME(do_divide_error)
629 .long SYMBOL_NAME(do_debug)
630 .long 0 # nmi
631 .long SYMBOL_NAME(do_int3)
632 .long SYMBOL_NAME(do_overflow)
633 .long SYMBOL_NAME(do_bounds)
634 .long SYMBOL_NAME(do_invalid_op)
635 .long SYMBOL_NAME(math_state_restore)
636 .long 0 # double fault
637 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
638 .long SYMBOL_NAME(do_invalid_TSS)
639 .long SYMBOL_NAME(do_segment_not_present)
640 .long SYMBOL_NAME(do_stack_segment)
641 .long SYMBOL_NAME(do_general_protection)
642 .long SYMBOL_NAME(do_page_fault)
643 .long SYMBOL_NAME(do_spurious_interrupt_bug)
644 .long SYMBOL_NAME(do_coprocessor_error)
645 .long SYMBOL_NAME(do_alignment_check)
646 .long SYMBOL_NAME(do_machine_check)
647 .long SYMBOL_NAME(do_simd_coprocessor_error)
649 ENTRY(hypercall_table)
650 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
651 .long SYMBOL_NAME(do_mmu_update)
652 .long SYMBOL_NAME(do_set_gdt)
653 .long SYMBOL_NAME(do_stack_switch)
654 .long SYMBOL_NAME(do_set_callbacks)
655 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
656 .long SYMBOL_NAME(do_sched_op)
657 .long SYMBOL_NAME(do_dom0_op)
658 .long SYMBOL_NAME(do_set_debugreg)
659 .long SYMBOL_NAME(do_get_debugreg)
660 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
661 .long SYMBOL_NAME(do_set_fast_trap)
662 .long SYMBOL_NAME(do_dom_mem_op)
663 .long SYMBOL_NAME(do_multicall)
664 .long SYMBOL_NAME(do_update_va_mapping)
665 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
666 .long SYMBOL_NAME(do_event_channel_op)
667 .long SYMBOL_NAME(do_xen_version)
668 .long SYMBOL_NAME(do_console_io)
669 .long SYMBOL_NAME(do_physdev_op)
670 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
671 .long SYMBOL_NAME(do_vm_assist)
672 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
673 .long SYMBOL_NAME(do_switch_vm86)
674 .rept NR_hypercalls-((.-hypercall_table)/4)
675 .long SYMBOL_NAME(do_ni_hypercall)
676 .endr