debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 3675:8710698e57e1

bitkeeper revision 1.1159.240.2 (42028548cWA5UfVOtIVVGp5n3bPOKQ)

Merge scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:48 2005 +0000 (2005-02-03)
parents 578b6c14e635 fb875591fd72
children dbc41aaba297
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <public/xen.h>
63 #define GET_CURRENT(reg) \
64 movl $8192-4, reg; \
65 orl %esp, reg; \
66 andl $~3,reg; \
67 movl (reg),reg;
69 #ifdef CONFIG_VMX
70 /*
71 * At VMExit time the processor saves the guest selectors, esp, eip,
72 * and eflags. Therefore we don't save them, but simply decrement
73 * the kernel stack pointer to make it consistent with the stack frame
74 * at usual interruption time. The eflags of the host is not saved by VMX,
75 * and we set it to the fixed value.
76 *
77 * We also need the room, especially because orig_eax field is used
78 * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
79 * (10) u32 gs;
80 * (9) u32 fs;
81 * (8) u32 ds;
82 * (7) u32 es;
83 * <- get_stack_top() (= HOST_ESP)
84 * (6) u32 ss;
85 * (5) u32 esp;
86 * (4) u32 eflags;
87 * (3) u32 cs;
88 * (2) u32 eip;
89 * (2/1) u16 entry_vector;
90 * (1/1) u16 error_code;
91 * However, get_stack_top() acturally returns 20 bytes below the real
92 * top of the stack to allow space for:
93 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
94 */
95 #define VMX_MONITOR_EFLAGS 0x202 /* IF on */
96 #define NR_SKIPPED_REGS 6 /* See the above explanation */
97 #define VMX_SAVE_ALL_NOSEGREGS \
98 pushl $VMX_MONITOR_EFLAGS; \
99 popf; \
100 subl $(NR_SKIPPED_REGS*4), %esp; \
101 pushl %eax; \
102 pushl %ebp; \
103 pushl %edi; \
104 pushl %esi; \
105 pushl %edx; \
106 pushl %ecx; \
107 pushl %ebx;
109 ENTRY(vmx_asm_vmexit_handler)
110 /* selectors are restored/saved by VMX */
111 VMX_SAVE_ALL_NOSEGREGS
112 call SYMBOL_NAME(vmx_vmexit_handler)
113 jmp vmx_asm_do_resume
115 ENTRY(vmx_asm_do_launch)
116 popl %ebx
117 popl %ecx
118 popl %edx
119 popl %esi
120 popl %edi
121 popl %ebp
122 popl %eax
123 addl $(NR_SKIPPED_REGS*4), %esp
124 /* VMLUANCH */
125 .byte 0x0f,0x01,0xc2
126 pushf
127 call SYMBOL_NAME(vm_launch_fail)
128 hlt
130 ALIGN
132 ENTRY(vmx_asm_do_resume)
133 vmx_test_all_events:
134 GET_CURRENT(%ebx)
135 /* test_all_events: */
136 xorl %ecx,%ecx
137 notl %ecx
138 cli # tests must not race interrupts
139 /*test_softirqs:*/
140 movl EDOMAIN_processor(%ebx),%eax
141 shl $6,%eax # sizeof(irq_cpustat) == 64
142 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
143 jnz vmx_process_softirqs
145 vmx_restore_all_guest:
146 call SYMBOL_NAME(load_cr2)
147 /*
148 * Check if we are going back to VMX-based VM
149 * By this time, all the setups in the VMCS must be complete.
150 */
151 popl %ebx
152 popl %ecx
153 popl %edx
154 popl %esi
155 popl %edi
156 popl %ebp
157 popl %eax
158 addl $(NR_SKIPPED_REGS*4), %esp
159 /* VMRESUME */
160 .byte 0x0f,0x01,0xc3
161 pushf
162 call SYMBOL_NAME(vm_resume_fail)
163 /* Should never reach here */
164 hlt
166 ALIGN
167 vmx_process_softirqs:
168 sti
169 call SYMBOL_NAME(do_softirq)
170 jmp vmx_test_all_events
171 #endif
173 ENTRY(continue_nonidle_task)
174 GET_CURRENT(%ebx)
175 jmp test_all_events
177 ALIGN
178 restore_all_guest:
179 testb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
180 jnz failsafe_callback
181 testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
182 jnz restore_all_vm86
183 FLT1: movl XREGS_ds(%esp),%ds
184 FLT2: movl XREGS_es(%esp),%es
185 FLT3: movl XREGS_fs(%esp),%fs
186 FLT4: movl XREGS_gs(%esp),%gs
187 restore_all_vm86:
188 popl %ebx
189 popl %ecx
190 popl %edx
191 popl %esi
192 popl %edi
193 popl %ebp
194 popl %eax
195 addl $4,%esp
196 FLT5: iret
197 .section .fixup,"ax"
198 FIX5: subl $28,%esp
199 pushl 28(%esp) # error_code/entry_vector
200 movl %eax,XREGS_eax+4(%esp)
201 movl %ebp,XREGS_ebp+4(%esp)
202 movl %edi,XREGS_edi+4(%esp)
203 movl %esi,XREGS_esi+4(%esp)
204 movl %edx,XREGS_edx+4(%esp)
205 movl %ecx,XREGS_ecx+4(%esp)
206 movl %ebx,XREGS_ebx+4(%esp)
207 FIX1: SET_XEN_SEGMENTS(a)
208 movl %eax,%fs
209 movl %eax,%gs
210 sti
211 popl %esi
212 pushfl # EFLAGS
213 movl $__HYPERVISOR_CS,%eax
214 pushl %eax # CS
215 movl $DBLFLT1,%eax
216 pushl %eax # EIP
217 pushl %esi # error_code/entry_vector
218 jmp error_code
219 DBLFLT1:GET_CURRENT(%ebx)
220 jmp test_all_events
221 DBLFIX1:GET_CURRENT(%ebx)
222 testb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
223 jnz domain_crash # cannot reenter failsafe code
224 orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
225 jmp test_all_events # will return via failsafe code
226 .previous
227 .section __pre_ex_table,"a"
228 .long FLT1,FIX1
229 .long FLT2,FIX1
230 .long FLT3,FIX1
231 .long FLT4,FIX1
232 .long FLT5,FIX5
233 .previous
234 .section __ex_table,"a"
235 .long DBLFLT1,DBLFIX1
236 .previous
238 /* No special register assumptions */
239 failsafe_callback:
240 GET_CURRENT(%ebx)
241 andb $~TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
242 leal EDOMAIN_trap_bounce(%ebx),%edx
243 movl EDOMAIN_failsafe_addr(%ebx),%eax
244 movl %eax,TRAPBOUNCE_eip(%edx)
245 movl EDOMAIN_failsafe_sel(%ebx),%eax
246 movw %ax,TRAPBOUNCE_cs(%edx)
247 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
248 call create_bounce_frame
249 popl %ebx
250 popl %ecx
251 popl %edx
252 popl %esi
253 popl %edi
254 popl %ebp
255 popl %eax
256 addl $4,%esp
257 FLT6: iret
258 .section .fixup,"ax"
259 FIX6: pushl %ebx
260 GET_CURRENT(%ebx)
261 orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
262 pop %ebx
263 jmp FIX5
264 .section __pre_ex_table,"a"
265 .long FLT6,FIX6
266 .previous
268 ALIGN
269 restore_all_xen:
270 popl %ebx
271 popl %ecx
272 popl %edx
273 popl %esi
274 popl %edi
275 popl %ebp
276 popl %eax
277 addl $4,%esp
278 iret
280 ALIGN
281 ENTRY(hypercall)
282 subl $4,%esp
283 SAVE_ALL(b)
284 sti
285 GET_CURRENT(%ebx)
286 andl $(NR_hypercalls-1),%eax
287 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
289 ret_from_hypercall:
290 movl %eax,XREGS_eax(%esp) # save the return value
292 test_all_events:
293 xorl %ecx,%ecx
294 notl %ecx
295 cli # tests must not race interrupts
296 /*test_softirqs:*/
297 movl EDOMAIN_processor(%ebx),%eax
298 shl $6,%eax # sizeof(irq_cpustat) == 64
299 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
300 jnz process_softirqs
301 /*test_guest_events:*/
302 movl EDOMAIN_vcpu_info(%ebx),%eax
303 testb $0xFF,VCPUINFO_upcall_mask(%eax)
304 jnz restore_all_guest
305 testb $0xFF,VCPUINFO_upcall_pending(%eax)
306 jz restore_all_guest
307 /*process_guest_events:*/
308 leal EDOMAIN_trap_bounce(%ebx),%edx
309 movl EDOMAIN_event_addr(%ebx),%eax
310 movl %eax,TRAPBOUNCE_eip(%edx)
311 movl EDOMAIN_event_sel(%ebx),%eax
312 movw %ax,TRAPBOUNCE_cs(%edx)
313 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
314 call create_bounce_frame
315 movl EDOMAIN_vcpu_info(%ebx),%eax
316 movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
317 jmp restore_all_guest
319 ALIGN
320 process_softirqs:
321 sti
322 call SYMBOL_NAME(do_softirq)
323 jmp test_all_events
325 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
326 /* {EIP, CS, EFLAGS, [ESP, SS]} */
327 /* %edx == trap_bounce, %ebx == task_struct */
328 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
329 create_bounce_frame:
330 movl XREGS_eflags+4(%esp),%ecx
331 movb XREGS_cs+4(%esp),%cl
332 testl $(2|X86_EFLAGS_VM),%ecx
333 jz ring1 /* jump if returning to an existing ring-1 activation */
334 /* obtain ss/esp from TSS -- no current ring-1 activations */
335 movl EDOMAIN_processor(%ebx),%eax
336 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
337 movl %eax, %ecx
338 shll $7, %ecx
339 shll $13, %eax
340 addl %ecx,%eax
341 addl $init_tss + 12,%eax
342 movl (%eax),%esi /* tss->esp1 */
343 FLT7: movl 4(%eax),%gs /* tss->ss1 */
344 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
345 jz nvm86_1
346 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
347 movl XREGS_es+4(%esp),%eax
348 FLT8: movl %eax,%gs:(%esi)
349 movl XREGS_ds+4(%esp),%eax
350 FLT9: movl %eax,%gs:4(%esi)
351 movl XREGS_fs+4(%esp),%eax
352 FLT10: movl %eax,%gs:8(%esi)
353 movl XREGS_gs+4(%esp),%eax
354 FLT11: movl %eax,%gs:12(%esi)
355 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
356 movl XREGS_esp+4(%esp),%eax
357 FLT12: movl %eax,%gs:(%esi)
358 movl XREGS_ss+4(%esp),%eax
359 FLT13: movl %eax,%gs:4(%esi)
360 jmp 1f
361 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
362 movl XREGS_esp+4(%esp),%esi
363 FLT14: movl XREGS_ss+4(%esp),%gs
364 1: /* Construct a stack frame: EFLAGS, CS/EIP */
365 subl $12,%esi
366 movl XREGS_eip+4(%esp),%eax
367 FLT15: movl %eax,%gs:(%esi)
368 movl XREGS_cs+4(%esp),%eax
369 FLT16: movl %eax,%gs:4(%esi)
370 movl XREGS_eflags+4(%esp),%eax
371 FLT17: movl %eax,%gs:8(%esi)
372 movb TRAPBOUNCE_flags(%edx),%cl
373 test $TBF_EXCEPTION_ERRCODE,%cl
374 jz 1f
375 subl $4,%esi # push error_code onto guest frame
376 movl TRAPBOUNCE_error_code(%edx),%eax
377 FLT18: movl %eax,%gs:(%esi)
378 testb $TBF_EXCEPTION_CR2,%cl
379 jz 2f
380 subl $4,%esi # push %cr2 onto guest frame
381 movl TRAPBOUNCE_cr2(%edx),%eax
382 FLT19: movl %eax,%gs:(%esi)
383 1: testb $TBF_FAILSAFE,%cl
384 jz 2f
385 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
386 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
387 jz nvm86_2
388 xorl %eax,%eax # VM86: we write zero selector values
389 FLT20: movl %eax,%gs:(%esi)
390 FLT21: movl %eax,%gs:4(%esi)
391 FLT22: movl %eax,%gs:8(%esi)
392 FLT23: movl %eax,%gs:12(%esi)
393 jmp 2f
394 nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
395 FLT24: movl %eax,%gs:(%esi)
396 movl XREGS_es+4(%esp),%eax
397 FLT25: movl %eax,%gs:4(%esi)
398 movl XREGS_fs+4(%esp),%eax
399 FLT26: movl %eax,%gs:8(%esi)
400 movl XREGS_gs+4(%esp),%eax
401 FLT27: movl %eax,%gs:12(%esi)
402 2: movb $0,TRAPBOUNCE_flags(%edx)
403 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
404 jz nvm86_3
405 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
406 movl %eax,XREGS_ds+4(%esp)
407 movl %eax,XREGS_es+4(%esp)
408 movl %eax,XREGS_fs+4(%esp)
409 movl %eax,XREGS_gs+4(%esp)
410 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
411 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
412 andl $0xfffcbeff,XREGS_eflags+4(%esp)
413 movl %gs,XREGS_ss+4(%esp)
414 movl %esi,XREGS_esp+4(%esp)
415 movzwl TRAPBOUNCE_cs(%edx),%eax
416 movl %eax,XREGS_cs+4(%esp)
417 movl TRAPBOUNCE_eip(%edx),%eax
418 movl %eax,XREGS_eip+4(%esp)
419 ret
420 .section .fixup,"ax"
421 FIX7: sti
422 popl %esi
423 addl $4,%esp # Discard create_b_frame return address
424 pushfl # EFLAGS
425 movl $__HYPERVISOR_CS,%eax
426 pushl %eax # CS
427 movl $DBLFLT2,%eax
428 pushl %eax # EIP
429 pushl %esi # error_code/entry_vector
430 jmp error_code
431 DBLFLT2:jmp process_guest_exception_and_events
432 .previous
433 .section __pre_ex_table,"a"
434 .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
435 .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
436 .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
437 .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
438 .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
439 .previous
440 .section __ex_table,"a"
441 .long DBLFLT2,domain_crash
442 .previous
444 ALIGN
445 process_guest_exception_and_events:
446 leal EDOMAIN_trap_bounce(%ebx),%edx
447 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
448 jz test_all_events
449 cli # create_bounce_frame needs CLI for pre-exceptions to work
450 call create_bounce_frame
451 jmp test_all_events
453 ALIGN
454 ENTRY(ret_from_intr)
455 GET_CURRENT(%ebx)
456 movl XREGS_eflags(%esp),%eax
457 movb XREGS_cs(%esp),%al
458 testl $(3|X86_EFLAGS_VM),%eax
459 jnz test_all_events
460 jmp restore_all_xen
462 ENTRY(divide_error)
463 pushl $TRAP_divide_error<<16
464 ALIGN
465 error_code:
466 SAVE_ALL_NOSEGREGS(a)
467 SET_XEN_SEGMENTS(a)
468 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
469 jz exception_with_ints_disabled
470 1: sti # re-enable interrupts
471 xorl %eax,%eax
472 movw XREGS_entry_vector(%esp),%ax
473 movl %esp,%edx
474 pushl %edx # push the xen_regs pointer
475 GET_CURRENT(%ebx)
476 call *SYMBOL_NAME(exception_table)(,%eax,4)
477 addl $4,%esp
478 movl XREGS_eflags(%esp),%eax
479 movb XREGS_cs(%esp),%al
480 testl $(3|X86_EFLAGS_VM),%eax
481 jz restore_all_xen
482 jmp process_guest_exception_and_events
484 exception_with_ints_disabled:
485 movl XREGS_eflags(%esp),%eax
486 movb XREGS_cs(%esp),%al
487 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
488 jnz 1b # it really does happen!
489 # (e.g., DOM0 X server)
490 pushl XREGS_eip(%esp)
491 call search_pre_exception_table
492 addl $4,%esp
493 testl %eax,%eax # no fixup code for faulting EIP?
494 jz FATAL_exception_with_ints_disabled
495 movl %eax,XREGS_eip(%esp)
496 movl %esp,%esi
497 subl $4,%esp
498 movl %esp,%edi
499 movl $XREGS_kernel_sizeof/4,%ecx
500 rep; movsl # make room for error_code/entry_vector
501 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
502 movl %eax,XREGS_kernel_sizeof(%esp)
503 jmp restore_all_xen # return to fixup code
505 FATAL_exception_with_ints_disabled:
506 xorl %esi,%esi
507 movw XREGS_entry_vector(%esp),%si
508 movl %esp,%edx
509 pushl %edx # push the xen_regs pointer
510 pushl %esi # push the trapnr (entry vector)
511 call SYMBOL_NAME(fatal_trap)
512 ud2
514 ENTRY(coprocessor_error)
515 pushl $TRAP_copro_error<<16
516 jmp error_code
518 ENTRY(simd_coprocessor_error)
519 pushl $TRAP_simd_error<<16
520 jmp error_code
522 ENTRY(device_not_available)
523 pushl $TRAP_no_device<<16
524 jmp error_code
526 ENTRY(debug)
527 pushl $TRAP_debug<<16
528 jmp error_code
530 ENTRY(int3)
531 pushl $TRAP_int3<<16
532 jmp error_code
534 ENTRY(overflow)
535 pushl $TRAP_overflow<<16
536 jmp error_code
538 ENTRY(bounds)
539 pushl $TRAP_bounds<<16
540 jmp error_code
542 ENTRY(invalid_op)
543 pushl $TRAP_invalid_op<<16
544 jmp error_code
546 ENTRY(coprocessor_segment_overrun)
547 pushl $TRAP_copro_seg<<16
548 jmp error_code
550 ENTRY(invalid_TSS)
551 movw $TRAP_invalid_tss,2(%esp)
552 jmp error_code
554 ENTRY(segment_not_present)
555 movw $TRAP_no_segment,2(%esp)
556 jmp error_code
558 ENTRY(stack_segment)
559 movw $TRAP_stack_error,2(%esp)
560 jmp error_code
562 ENTRY(general_protection)
563 movw $TRAP_gp_fault,2(%esp)
564 jmp error_code
566 ENTRY(alignment_check)
567 movw $TRAP_alignment_check,2(%esp)
568 jmp error_code
570 ENTRY(page_fault)
571 movw $TRAP_page_fault,2(%esp)
572 jmp error_code
574 ENTRY(machine_check)
575 pushl $TRAP_machine_check<<16
576 jmp error_code
578 ENTRY(spurious_interrupt_bug)
579 pushl $TRAP_spurious_int<<16
580 jmp error_code
582 ENTRY(nmi)
583 # Save state but do not trash the segment registers!
584 # We may otherwise be unable to reload them or copy them to ring 1.
585 pushl %eax
586 SAVE_ALL_NOSEGREGS(a)
588 # Check for hardware problems.
589 inb $0x61,%al
590 testb $0x80,%al
591 jne nmi_parity_err
592 testb $0x40,%al
593 jne nmi_io_err
594 movl %eax,%ebx
596 # Okay, its almost a normal NMI tick. We can only process it if:
597 # A. We are the outermost Xen activation (in which case we have
598 # the selectors safely saved on our stack)
599 # B. DS-GS all contain sane Xen values.
600 # In all other cases we bail without touching DS-GS, as we have
601 # interrupted an enclosing Xen activation in tricky prologue or
602 # epilogue code.
603 movl XREGS_eflags(%esp),%eax
604 movb XREGS_cs(%esp),%al
605 testl $(3|X86_EFLAGS_VM),%eax
606 jnz do_watchdog_tick
607 movl %ds,%eax
608 cmpw $(__HYPERVISOR_DS),%ax
609 jne defer_nmi
610 movl %es,%eax
611 cmpw $(__HYPERVISOR_DS),%ax
612 jne defer_nmi
614 do_watchdog_tick:
615 movl $(__HYPERVISOR_DS),%edx
616 movl %edx,%ds
617 movl %edx,%es
618 movl %esp,%edx
619 pushl %ebx # reason
620 pushl %edx # regs
621 call SYMBOL_NAME(do_nmi)
622 addl $8,%esp
623 movl XREGS_eflags(%esp),%eax
624 movb XREGS_cs(%esp),%al
625 testl $(3|X86_EFLAGS_VM),%eax
626 jz restore_all_xen
627 GET_CURRENT(%ebx)
628 jmp restore_all_guest
630 defer_nmi:
631 movl $FIXMAP_apic_base,%eax
632 # apic_wait_icr_idle()
633 1: movl %ss:APIC_ICR(%eax),%ebx
634 testl $APIC_ICR_BUSY,%ebx
635 jnz 1b
636 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
637 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
638 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
639 jmp restore_all_xen
641 nmi_parity_err:
642 # Clear and disable the parity-error line
643 andb $0xf,%al
644 orb $0x4,%al
645 outb %al,$0x61
646 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
647 je restore_all_xen
648 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
649 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
650 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
651 je restore_all_xen
652 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
653 movl %edx,%ds
654 movl %edx,%es
655 movl %esp,%edx
656 push %edx
657 call SYMBOL_NAME(mem_parity_error)
658 addl $4,%esp
659 jmp ret_from_intr
661 nmi_io_err:
662 # Clear and disable the I/O-error line
663 andb $0xf,%al
664 orb $0x8,%al
665 outb %al,$0x61
666 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
667 je restore_all_xen
668 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
669 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
670 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
671 je restore_all_xen
672 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
673 movl %edx,%ds
674 movl %edx,%es
675 movl %esp,%edx
676 push %edx
677 call SYMBOL_NAME(io_check_error)
678 addl $4,%esp
679 jmp ret_from_intr
682 ENTRY(setup_vm86_frame)
683 # Copies the entire stack frame forwards by 16 bytes.
684 .macro copy_vm86_words count=18
685 .if \count
686 pushl ((\count-1)*4)(%esp)
687 popl ((\count-1)*4)+16(%esp)
688 copy_vm86_words "(\count-1)"
689 .endif
690 .endm
691 copy_vm86_words
692 addl $16,%esp
693 ret
695 do_switch_vm86:
696 # Discard the return address
697 addl $4,%esp
699 movl XREGS_eflags(%esp),%edx
701 # GS:ESI == Ring-1 stack activation
702 movl XREGS_esp(%esp),%esi
703 VFLT1: movl XREGS_ss(%esp),%gs
705 # ES:EDI == Ring-0 stack activation
706 leal XREGS_eip(%esp),%edi
708 # Restore the hypercall-number-clobbered EAX on our stack frame
709 VFLT2: movl %gs:(%esi),%eax
710 movl %eax,XREGS_eax(%esp)
711 addl $4,%esi
713 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
714 movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
715 VFLT3: movl %gs:(%esi),%eax
716 stosl
717 addl $4,%esi
718 loop VFLT3
720 # Fix up EFLAGS
721 andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
722 andl $X86_EFLAGS_IOPL,%edx # Ignore attempts to change EFLAGS.IOPL
723 jnz 1f
724 orl $X86_EFLAGS_IF,%edx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
725 1: orl $X86_EFLAGS_VM,%edx # Force EFLAGS.VM
726 orl %edx,XREGS_eflags(%esp)
728 jmp test_all_events
730 .section __ex_table,"a"
731 .long VFLT1,domain_crash
732 .long VFLT2,domain_crash
733 .long VFLT3,domain_crash
734 .previous
736 .data
738 ENTRY(exception_table)
739 .long SYMBOL_NAME(do_divide_error)
740 .long SYMBOL_NAME(do_debug)
741 .long 0 # nmi
742 .long SYMBOL_NAME(do_int3)
743 .long SYMBOL_NAME(do_overflow)
744 .long SYMBOL_NAME(do_bounds)
745 .long SYMBOL_NAME(do_invalid_op)
746 .long SYMBOL_NAME(math_state_restore)
747 .long 0 # double fault
748 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
749 .long SYMBOL_NAME(do_invalid_TSS)
750 .long SYMBOL_NAME(do_segment_not_present)
751 .long SYMBOL_NAME(do_stack_segment)
752 .long SYMBOL_NAME(do_general_protection)
753 .long SYMBOL_NAME(do_page_fault)
754 .long SYMBOL_NAME(do_spurious_interrupt_bug)
755 .long SYMBOL_NAME(do_coprocessor_error)
756 .long SYMBOL_NAME(do_alignment_check)
757 .long SYMBOL_NAME(do_machine_check)
758 .long SYMBOL_NAME(do_simd_coprocessor_error)
760 ENTRY(hypercall_table)
761 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
762 .long SYMBOL_NAME(do_mmu_update)
763 .long SYMBOL_NAME(do_set_gdt)
764 .long SYMBOL_NAME(do_stack_switch)
765 .long SYMBOL_NAME(do_set_callbacks)
766 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
767 .long SYMBOL_NAME(do_sched_op)
768 .long SYMBOL_NAME(do_dom0_op)
769 .long SYMBOL_NAME(do_set_debugreg)
770 .long SYMBOL_NAME(do_get_debugreg)
771 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
772 .long SYMBOL_NAME(do_set_fast_trap)
773 .long SYMBOL_NAME(do_dom_mem_op)
774 .long SYMBOL_NAME(do_multicall)
775 .long SYMBOL_NAME(do_update_va_mapping)
776 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
777 .long SYMBOL_NAME(do_event_channel_op)
778 .long SYMBOL_NAME(do_xen_version)
779 .long SYMBOL_NAME(do_console_io)
780 .long SYMBOL_NAME(do_physdev_op)
781 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
782 .long SYMBOL_NAME(do_vm_assist)
783 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
784 .long SYMBOL_NAME(do_switch_vm86)
785 .long SYMBOL_NAME(do_boot_vcpu)
786 .rept NR_hypercalls-((.-hypercall_table)/4)
787 .long SYMBOL_NAME(do_ni_hypercall)
788 .endr