debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 3374:b2fa96909734

bitkeeper revision 1.1159.1.507 (41d2c267giyCo6LZ_--l9fFOwkMRIQ)

manual merge
author kaf24@scramble.cl.cam.ac.uk
date Wed Dec 29 14:42:47 2004 +0000 (2004-12-29)
parents b9ab4345fd1b a3b623535680
children ef529c8bd197 0451cbfd268d
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <public/xen.h>
62 #define GET_CURRENT(reg) \
63 movl $8192-4, reg; \
64 orl %esp, reg; \
65 andl $~3,reg; \
66 movl (reg),reg;
68 #ifdef CONFIG_VMX
69 /*
70 * At VMExit time the processor saves the guest selectors, esp, eip,
71 * and eflags. Therefore we don't save them, but simply decrement
72 * the kernel stack pointer to make it consistent with the stack frame
73 * at usual interruption time. The eflags of the host is not saved by VMX,
74 * and we set it to the fixed value.
75 *
76 * We also need the room, especially because orig_eax field is used
77 * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
78 * (1/1) u16 error_code;
79 * (2/1) u16 entry_vector;
80 * (2) u32 eip;
81 * (3) u32 cs;
82 * (4) u32 eflags;
83 */
84 #define VMX_MONITOR_EFLAGS 0x202 /* IF on */
85 #define NR_SKIPPED_REGS 4 /* See the above explanation */
86 #define VMX_SAVE_ALL_NOSEGREGS \
87 pushl $VMX_MONITOR_EFLAGS; \
88 popf; \
89 subl $(NR_SKIPPED_REGS*4), %esp; \
90 pushl %eax; \
91 pushl %ebp; \
92 pushl %edi; \
93 pushl %esi; \
94 pushl %edx; \
95 pushl %ecx; \
96 pushl %ebx;
98 ENTRY(vmx_asm_vmexit_handler)
99 /* selectors are restored/saved by VMX */
100 VMX_SAVE_ALL_NOSEGREGS
101 call SYMBOL_NAME(vmx_vmexit_handler)
102 jmp vmx_asm_do_resume
104 ENTRY(vmx_asm_do_launch)
105 popl %ebx
106 popl %ecx
107 popl %edx
108 popl %esi
109 popl %edi
110 popl %ebp
111 popl %eax
112 addl $(NR_SKIPPED_REGS*4), %esp
113 /* VMLUANCH */
114 .byte 0x0f,0x01,0xc2
115 pushf
116 call SYMBOL_NAME(vm_launch_fail)
117 hlt
119 ALIGN
121 ENTRY(vmx_asm_do_resume)
122 vmx_test_all_events:
123 GET_CURRENT(%ebx)
124 /* test_all_events: */
125 xorl %ecx,%ecx
126 notl %ecx
127 cli # tests must not race interrupts
128 /*test_softirqs:*/
129 movl EDOMAIN_processor(%ebx),%eax
130 shl $6,%eax # sizeof(irq_cpustat) == 64
131 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
132 jnz vmx_process_softirqs
134 vmx_restore_all_guest:
135 call SYMBOL_NAME(load_cr2)
136 /*
137 * Check if we are going back to VMX-based VM
138 * By this time, all the setups in the VMCS must be complete.
139 */
140 popl %ebx
141 popl %ecx
142 popl %edx
143 popl %esi
144 popl %edi
145 popl %ebp
146 popl %eax
147 addl $(NR_SKIPPED_REGS*4), %esp
148 /* VMRESUME */
149 .byte 0x0f,0x01,0xc3
150 pushf
151 call SYMBOL_NAME(vm_resume_fail)
152 /* Should never reach here */
153 hlt
155 ALIGN
156 vmx_process_softirqs:
157 sti
158 call SYMBOL_NAME(do_softirq)
159 jmp vmx_test_all_events
160 #endif
162 ENTRY(continue_nonidle_task)
163 GET_CURRENT(%ebx)
164 jmp test_all_events
166 ALIGN
167 restore_all_guest:
168 testb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
169 jnz failsafe_callback
170 testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
171 jnz restore_all_vm86
172 FLT1: movl XREGS_ds(%esp),%ds
173 FLT2: movl XREGS_es(%esp),%es
174 FLT3: movl XREGS_fs(%esp),%fs
175 FLT4: movl XREGS_gs(%esp),%gs
176 restore_all_vm86:
177 popl %ebx
178 popl %ecx
179 popl %edx
180 popl %esi
181 popl %edi
182 popl %ebp
183 popl %eax
184 addl $4,%esp
185 FLT5: iret
186 .section .fixup,"ax"
187 FIX5: subl $28,%esp
188 pushl 28(%esp) # error_code/entry_vector
189 movl %eax,XREGS_eax+4(%esp)
190 movl %ebp,XREGS_ebp+4(%esp)
191 movl %edi,XREGS_edi+4(%esp)
192 movl %esi,XREGS_esi+4(%esp)
193 movl %edx,XREGS_edx+4(%esp)
194 movl %ecx,XREGS_ecx+4(%esp)
195 movl %ebx,XREGS_ebx+4(%esp)
196 FIX1: SET_XEN_SEGMENTS(a)
197 movl %eax,%fs
198 movl %eax,%gs
199 sti
200 popl %esi
201 pushfl # EFLAGS
202 movl $__HYPERVISOR_CS,%eax
203 pushl %eax # CS
204 movl $DBLFLT1,%eax
205 pushl %eax # EIP
206 pushl %esi # error_code/entry_vector
207 jmp error_code
208 DBLFLT1:GET_CURRENT(%ebx)
209 jmp test_all_events
210 DBLFIX1:GET_CURRENT(%ebx)
211 testb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
212 jnz domain_crash # cannot reenter failsafe code
213 orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
214 jmp test_all_events # will return via failsafe code
215 .previous
216 .section __pre_ex_table,"a"
217 .long FLT1,FIX1
218 .long FLT2,FIX1
219 .long FLT3,FIX1
220 .long FLT4,FIX1
221 .long FLT5,FIX5
222 .previous
223 .section __ex_table,"a"
224 .long DBLFLT1,DBLFIX1
225 .previous
227 /* No special register assumptions */
228 failsafe_callback:
229 GET_CURRENT(%ebx)
230 andb $~TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
231 leal EDOMAIN_trap_bounce(%ebx),%edx
232 movl EDOMAIN_failsafe_addr(%ebx),%eax
233 movl %eax,TRAPBOUNCE_eip(%edx)
234 movl EDOMAIN_failsafe_sel(%ebx),%eax
235 movw %ax,TRAPBOUNCE_cs(%edx)
236 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
237 call create_bounce_frame
238 popl %ebx
239 popl %ecx
240 popl %edx
241 popl %esi
242 popl %edi
243 popl %ebp
244 popl %eax
245 addl $4,%esp
246 FLT6: iret
247 .section .fixup,"ax"
248 FIX6: pushl %ebx
249 GET_CURRENT(%ebx)
250 orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
251 pop %ebx
252 jmp FIX5
253 .section __pre_ex_table,"a"
254 .long FLT6,FIX6
255 .previous
257 ALIGN
258 restore_all_xen:
259 popl %ebx
260 popl %ecx
261 popl %edx
262 popl %esi
263 popl %edi
264 popl %ebp
265 popl %eax
266 addl $4,%esp
267 iret
269 ALIGN
270 ENTRY(hypercall)
271 subl $4,%esp
272 SAVE_ALL(b)
273 sti
274 GET_CURRENT(%ebx)
275 andl $(NR_hypercalls-1),%eax
276 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
278 ret_from_hypercall:
279 movl %eax,XREGS_eax(%esp) # save the return value
281 test_all_events:
282 xorl %ecx,%ecx
283 notl %ecx
284 cli # tests must not race interrupts
285 /*test_softirqs:*/
286 movl EDOMAIN_processor(%ebx),%eax
287 shl $6,%eax # sizeof(irq_cpustat) == 64
288 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
289 jnz process_softirqs
290 /*test_guest_events:*/
291 movl EDOMAIN_vcpu_info(%ebx),%eax
292 testb $0xFF,VCPUINFO_upcall_mask(%eax)
293 jnz restore_all_guest
294 testb $0xFF,VCPUINFO_upcall_pending(%eax)
295 jz restore_all_guest
296 /*process_guest_events:*/
297 leal EDOMAIN_trap_bounce(%ebx),%edx
298 movl EDOMAIN_event_addr(%ebx),%eax
299 movl %eax,TRAPBOUNCE_eip(%edx)
300 movl EDOMAIN_event_sel(%ebx),%eax
301 movw %ax,TRAPBOUNCE_cs(%edx)
302 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
303 call create_bounce_frame
304 movl EDOMAIN_vcpu_info(%ebx),%eax
305 movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
306 jmp restore_all_guest
308 ALIGN
309 process_softirqs:
310 sti
311 call SYMBOL_NAME(do_softirq)
312 jmp test_all_events
314 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
315 /* {EIP, CS, EFLAGS, [ESP, SS]} */
316 /* %edx == trap_bounce, %ebx == task_struct */
317 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
318 create_bounce_frame:
319 movl XREGS_eflags+4(%esp),%ecx
320 movb XREGS_cs+4(%esp),%cl
321 testl $(2|X86_EFLAGS_VM),%ecx
322 jz ring1 /* jump if returning to an existing ring-1 activation */
323 /* obtain ss/esp from TSS -- no current ring-1 activations */
324 movl EDOMAIN_processor(%ebx),%eax
325 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
326 movl %eax, %ecx
327 shll $7, %ecx
328 shll $13, %eax
329 addl %ecx,%eax
330 addl $init_tss + 12,%eax
331 movl (%eax),%esi /* tss->esp1 */
332 FLT7: movl 4(%eax),%gs /* tss->ss1 */
333 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
334 jz nvm86_1
335 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
336 movl XREGS_es+4(%esp),%eax
337 FLT8: movl %eax,%gs:(%esi)
338 movl XREGS_ds+4(%esp),%eax
339 FLT9: movl %eax,%gs:4(%esi)
340 movl XREGS_fs+4(%esp),%eax
341 FLT10: movl %eax,%gs:8(%esi)
342 movl XREGS_gs+4(%esp),%eax
343 FLT11: movl %eax,%gs:12(%esi)
344 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
345 movl XREGS_esp+4(%esp),%eax
346 FLT12: movl %eax,%gs:(%esi)
347 movl XREGS_ss+4(%esp),%eax
348 FLT13: movl %eax,%gs:4(%esi)
349 jmp 1f
350 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
351 movl XREGS_esp+4(%esp),%esi
352 FLT14: movl XREGS_ss+4(%esp),%gs
353 1: /* Construct a stack frame: EFLAGS, CS/EIP */
354 subl $12,%esi
355 movl XREGS_eip+4(%esp),%eax
356 FLT15: movl %eax,%gs:(%esi)
357 movl XREGS_cs+4(%esp),%eax
358 FLT16: movl %eax,%gs:4(%esi)
359 movl XREGS_eflags+4(%esp),%eax
360 FLT17: movl %eax,%gs:8(%esi)
361 movb TRAPBOUNCE_flags(%edx),%cl
362 test $TBF_EXCEPTION_ERRCODE,%cl
363 jz 1f
364 subl $4,%esi # push error_code onto guest frame
365 movl TRAPBOUNCE_error_code(%edx),%eax
366 FLT18: movl %eax,%gs:(%esi)
367 testb $TBF_EXCEPTION_CR2,%cl
368 jz 2f
369 subl $4,%esi # push %cr2 onto guest frame
370 movl TRAPBOUNCE_cr2(%edx),%eax
371 FLT19: movl %eax,%gs:(%esi)
372 1: testb $TBF_FAILSAFE,%cl
373 jz 2f
374 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
375 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
376 jz nvm86_2
377 xorl %eax,%eax # VM86: we write zero selector values
378 FLT20: movl %eax,%gs:(%esi)
379 FLT21: movl %eax,%gs:4(%esi)
380 FLT22: movl %eax,%gs:8(%esi)
381 FLT23: movl %eax,%gs:12(%esi)
382 jmp 2f
383 nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
384 FLT24: movl %eax,%gs:(%esi)
385 movl XREGS_es+4(%esp),%eax
386 FLT25: movl %eax,%gs:4(%esi)
387 movl XREGS_fs+4(%esp),%eax
388 FLT26: movl %eax,%gs:8(%esi)
389 movl XREGS_gs+4(%esp),%eax
390 FLT27: movl %eax,%gs:12(%esi)
391 2: movb $0,TRAPBOUNCE_flags(%edx)
392 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
393 jz nvm86_3
394 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
395 movl %eax,XREGS_ds+4(%esp)
396 movl %eax,XREGS_es+4(%esp)
397 movl %eax,XREGS_fs+4(%esp)
398 movl %eax,XREGS_gs+4(%esp)
399 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
400 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
401 andl $0xfffcbeff,XREGS_eflags+4(%esp)
402 movl %gs,XREGS_ss+4(%esp)
403 movl %esi,XREGS_esp+4(%esp)
404 movzwl TRAPBOUNCE_cs(%edx),%eax
405 movl %eax,XREGS_cs+4(%esp)
406 movl TRAPBOUNCE_eip(%edx),%eax
407 movl %eax,XREGS_eip+4(%esp)
408 ret
409 .section .fixup,"ax"
410 FIX7: sti
411 popl %esi
412 addl $4,%esp # Discard create_b_frame return address
413 pushfl # EFLAGS
414 movl $__HYPERVISOR_CS,%eax
415 pushl %eax # CS
416 movl $DBLFLT2,%eax
417 pushl %eax # EIP
418 pushl %esi # error_code/entry_vector
419 jmp error_code
420 DBLFLT2:jmp process_guest_exception_and_events
421 .previous
422 .section __pre_ex_table,"a"
423 .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
424 .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
425 .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
426 .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
427 .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
428 .previous
429 .section __ex_table,"a"
430 .long DBLFLT2,domain_crash
431 .previous
433 ALIGN
434 process_guest_exception_and_events:
435 leal EDOMAIN_trap_bounce(%ebx),%edx
436 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
437 jz test_all_events
438 cli # create_bounce_frame needs CLI for pre-exceptions to work
439 call create_bounce_frame
440 jmp test_all_events
442 ALIGN
443 ENTRY(ret_from_intr)
444 GET_CURRENT(%ebx)
445 movl XREGS_eflags(%esp),%eax
446 movb XREGS_cs(%esp),%al
447 testl $(3|X86_EFLAGS_VM),%eax
448 jnz test_all_events
449 jmp restore_all_xen
451 ENTRY(divide_error)
452 pushl $TRAP_divide_error<<16
453 ALIGN
454 error_code:
455 SAVE_ALL_NOSEGREGS(a)
456 SET_XEN_SEGMENTS(a)
457 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
458 jz exception_with_ints_disabled
459 1: sti # re-enable interrupts
460 xorl %eax,%eax
461 movw XREGS_entry_vector(%esp),%ax
462 movl %esp,%edx
463 pushl %edx # push the xen_regs pointer
464 GET_CURRENT(%ebx)
465 call *SYMBOL_NAME(exception_table)(,%eax,4)
466 addl $4,%esp
467 movl XREGS_eflags(%esp),%eax
468 movb XREGS_cs(%esp),%al
469 testl $(3|X86_EFLAGS_VM),%eax
470 jz restore_all_xen
471 jmp process_guest_exception_and_events
473 exception_with_ints_disabled:
474 movl XREGS_eflags(%esp),%eax
475 movb XREGS_cs(%esp),%al
476 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
477 jnz 1b # it really does happen!
478 # (e.g., DOM0 X server)
479 pushl XREGS_eip(%esp)
480 call search_pre_exception_table
481 addl $4,%esp
482 testl %eax,%eax # no fixup code for faulting EIP?
483 jz FATAL_exception_with_ints_disabled
484 movl %eax,XREGS_eip(%esp)
485 movl %esp,%esi
486 subl $4,%esp
487 movl %esp,%edi
488 movl $XREGS_kernel_sizeof/4,%ecx
489 rep; movsl # make room for error_code/entry_vector
490 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
491 movl %eax,XREGS_kernel_sizeof(%esp)
492 jmp restore_all_xen # return to fixup code
494 FATAL_exception_with_ints_disabled:
495 xorl %esi,%esi
496 movw XREGS_entry_vector(%esp),%si
497 movl %esp,%edx
498 pushl %edx # push the xen_regs pointer
499 pushl %esi # push the trapnr (entry vector)
500 call SYMBOL_NAME(fatal_trap)
501 ud2
503 ENTRY(coprocessor_error)
504 pushl $TRAP_copro_error<<16
505 jmp error_code
507 ENTRY(simd_coprocessor_error)
508 pushl $TRAP_simd_error<<16
509 jmp error_code
511 ENTRY(device_not_available)
512 pushl $TRAP_no_device<<16
513 jmp error_code
515 ENTRY(debug)
516 pushl $TRAP_debug<<16
517 jmp error_code
519 ENTRY(int3)
520 pushl $TRAP_int3<<16
521 jmp error_code
523 ENTRY(overflow)
524 pushl $TRAP_overflow<<16
525 jmp error_code
527 ENTRY(bounds)
528 pushl $TRAP_bounds<<16
529 jmp error_code
531 ENTRY(invalid_op)
532 pushl $TRAP_invalid_op<<16
533 jmp error_code
535 ENTRY(coprocessor_segment_overrun)
536 pushl $TRAP_copro_seg<<16
537 jmp error_code
539 ENTRY(invalid_TSS)
540 movw $TRAP_invalid_tss,2(%esp)
541 jmp error_code
543 ENTRY(segment_not_present)
544 movw $TRAP_no_segment,2(%esp)
545 jmp error_code
547 ENTRY(stack_segment)
548 movw $TRAP_stack_error,2(%esp)
549 jmp error_code
551 ENTRY(general_protection)
552 movw $TRAP_gp_fault,2(%esp)
553 jmp error_code
555 ENTRY(alignment_check)
556 movw $TRAP_alignment_check,2(%esp)
557 jmp error_code
559 ENTRY(page_fault)
560 movw $TRAP_page_fault,2(%esp)
561 jmp error_code
563 ENTRY(machine_check)
564 pushl $TRAP_machine_check<<16
565 jmp error_code
567 ENTRY(spurious_interrupt_bug)
568 pushl $TRAP_spurious_int<<16
569 jmp error_code
571 ENTRY(nmi)
572 # Save state but do not trash the segment registers!
573 # We may otherwise be unable to reload them or copy them to ring 1.
574 pushl %eax
575 SAVE_ALL_NOSEGREGS(a)
577 # Check for hardware problems.
578 inb $0x61,%al
579 testb $0x80,%al
580 jne nmi_parity_err
581 testb $0x40,%al
582 jne nmi_io_err
583 movl %eax,%ebx
585 # Okay, its almost a normal NMI tick. We can only process it if:
586 # A. We are the outermost Xen activation (in which case we have
587 # the selectors safely saved on our stack)
588 # B. DS-GS all contain sane Xen values.
589 # In all other cases we bail without touching DS-GS, as we have
590 # interrupted an enclosing Xen activation in tricky prologue or
591 # epilogue code.
592 movl XREGS_eflags(%esp),%eax
593 movb XREGS_cs(%esp),%al
594 testl $(3|X86_EFLAGS_VM),%eax
595 jnz do_watchdog_tick
596 movl %ds,%eax
597 cmpw $(__HYPERVISOR_DS),%ax
598 jne restore_all_xen
599 movl %es,%eax
600 cmpw $(__HYPERVISOR_DS),%ax
601 jne restore_all_xen
603 do_watchdog_tick:
604 movl $(__HYPERVISOR_DS),%edx
605 movl %edx,%ds
606 movl %edx,%es
607 movl %esp,%edx
608 pushl %ebx # reason
609 pushl %edx # regs
610 call SYMBOL_NAME(do_nmi)
611 addl $8,%esp
612 movl XREGS_eflags(%esp),%eax
613 movb XREGS_cs(%esp),%al
614 testl $(3|X86_EFLAGS_VM),%eax
615 jz restore_all_xen
616 GET_CURRENT(%ebx)
617 jmp restore_all_guest
619 nmi_parity_err:
620 # Clear and disable the parity-error line
621 andb $0xf,%al
622 orb $0x4,%al
623 outb %al,$0x61
624 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
625 je restore_all_xen
626 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
627 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
628 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
629 je restore_all_xen
630 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
631 movl %edx,%ds
632 movl %edx,%es
633 movl %esp,%edx
634 push %edx
635 call SYMBOL_NAME(mem_parity_error)
636 addl $4,%esp
637 jmp ret_from_intr
639 nmi_io_err:
640 # Clear and disable the I/O-error line
641 andb $0xf,%al
642 orb $0x8,%al
643 outb %al,$0x61
644 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
645 je restore_all_xen
646 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
647 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
648 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
649 je restore_all_xen
650 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
651 movl %edx,%ds
652 movl %edx,%es
653 movl %esp,%edx
654 push %edx
655 call SYMBOL_NAME(io_check_error)
656 addl $4,%esp
657 jmp ret_from_intr
660 ENTRY(setup_vm86_frame)
661 # Copies the entire stack frame forwards by 16 bytes.
662 .macro copy_vm86_words count=18
663 .if \count
664 pushl ((\count-1)*4)(%esp)
665 popl ((\count-1)*4)+16(%esp)
666 copy_vm86_words "(\count-1)"
667 .endif
668 .endm
669 copy_vm86_words
670 addl $16,%esp
671 ret
673 do_switch_vm86:
674 # Discard the return address
675 addl $4,%esp
677 movl XREGS_eflags(%esp),%edx
679 # GS:ESI == Ring-1 stack activation
680 movl XREGS_esp(%esp),%esi
681 VFLT1: movl XREGS_ss(%esp),%gs
683 # ES:EDI == Ring-0 stack activation
684 leal XREGS_eip(%esp),%edi
686 # Restore the hypercall-number-clobbered EAX on our stack frame
687 VFLT2: movl %gs:(%esi),%eax
688 movl %eax,XREGS_eax(%esp)
689 addl $4,%esi
691 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
692 movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
693 VFLT3: movl %gs:(%esi),%eax
694 stosl
695 addl $4,%esi
696 loop VFLT3
698 # Fix up EFLAGS
699 andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
700 andl $X86_EFLAGS_IOPL,%edx # Ignore attempts to change EFLAGS.IOPL
701 jnz 1f
702 orl $X86_EFLAGS_IF,%edx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
703 1: orl $X86_EFLAGS_VM,%edx # Force EFLAGS.VM
704 orl %edx,XREGS_eflags(%esp)
706 jmp test_all_events
708 .section __ex_table,"a"
709 .long VFLT1,domain_crash
710 .long VFLT2,domain_crash
711 .long VFLT3,domain_crash
712 .previous
714 .data
716 ENTRY(exception_table)
717 .long SYMBOL_NAME(do_divide_error)
718 .long SYMBOL_NAME(do_debug)
719 .long 0 # nmi
720 .long SYMBOL_NAME(do_int3)
721 .long SYMBOL_NAME(do_overflow)
722 .long SYMBOL_NAME(do_bounds)
723 .long SYMBOL_NAME(do_invalid_op)
724 .long SYMBOL_NAME(math_state_restore)
725 .long 0 # double fault
726 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
727 .long SYMBOL_NAME(do_invalid_TSS)
728 .long SYMBOL_NAME(do_segment_not_present)
729 .long SYMBOL_NAME(do_stack_segment)
730 .long SYMBOL_NAME(do_general_protection)
731 .long SYMBOL_NAME(do_page_fault)
732 .long SYMBOL_NAME(do_spurious_interrupt_bug)
733 .long SYMBOL_NAME(do_coprocessor_error)
734 .long SYMBOL_NAME(do_alignment_check)
735 .long SYMBOL_NAME(do_machine_check)
736 .long SYMBOL_NAME(do_simd_coprocessor_error)
738 ENTRY(hypercall_table)
739 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
740 .long SYMBOL_NAME(do_mmu_update)
741 .long SYMBOL_NAME(do_set_gdt)
742 .long SYMBOL_NAME(do_stack_switch)
743 .long SYMBOL_NAME(do_set_callbacks)
744 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
745 .long SYMBOL_NAME(do_sched_op)
746 .long SYMBOL_NAME(do_dom0_op)
747 .long SYMBOL_NAME(do_set_debugreg)
748 .long SYMBOL_NAME(do_get_debugreg)
749 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
750 .long SYMBOL_NAME(do_set_fast_trap)
751 .long SYMBOL_NAME(do_dom_mem_op)
752 .long SYMBOL_NAME(do_multicall)
753 .long SYMBOL_NAME(do_update_va_mapping)
754 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
755 .long SYMBOL_NAME(do_event_channel_op)
756 .long SYMBOL_NAME(do_xen_version)
757 .long SYMBOL_NAME(do_console_io)
758 .long SYMBOL_NAME(do_physdev_op)
759 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
760 .long SYMBOL_NAME(do_vm_assist)
761 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
762 .long SYMBOL_NAME(do_switch_vm86)
763 .long SYMBOL_NAME(do_boot_vcpu)
764 .rept NR_hypercalls-((.-hypercall_table)/4)
765 .long SYMBOL_NAME(do_ni_hypercall)
766 .endr