debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 21972:6f07d9ac1e7c

x86: Fix NMI injection to PV guests

Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Aug 05 14:41:14 2010 +0100 (2010-08-05)
parents a3a55a6e4761
children 8dc27840025c
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 ALIGN
65 restore_all_guest:
66 ASSERT_INTERRUPTS_DISABLED
67 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
68 popl %ebx
69 popl %ecx
70 popl %edx
71 popl %esi
72 popl %edi
73 popl %ebp
74 popl %eax
75 leal 4(%esp),%esp
76 jnz .Lrestore_iret_guest
77 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
78 testb $2,UREGS_cs-UREGS_eip(%esp)
79 jnz .Lrestore_sregs_guest
80 call restore_ring0_guest
81 jmp .Lrestore_iret_guest
82 #endif
83 .Lrestore_sregs_guest:
84 .Lft1: mov UREGS_ds-UREGS_eip(%esp),%ds
85 .Lft2: mov UREGS_es-UREGS_eip(%esp),%es
86 .Lft3: mov UREGS_fs-UREGS_eip(%esp),%fs
87 .Lft4: mov UREGS_gs-UREGS_eip(%esp),%gs
88 .Lrestore_iret_guest:
89 .Lft5: iret
90 .section .fixup,"ax"
91 .Lfx1: sti
92 SAVE_ALL_GPRS
93 mov UREGS_error_code(%esp),%esi
94 pushfl # EFLAGS
95 movl $__HYPERVISOR_CS,%eax
96 pushl %eax # CS
97 movl $.Ldf1,%eax
98 pushl %eax # EIP
99 pushl %esi # error_code/entry_vector
100 jmp handle_exception
101 .Ldf1: GET_CURRENT(%ebx)
102 jmp test_all_events
103 failsafe_callback:
104 GET_CURRENT(%ebx)
105 leal VCPU_trap_bounce(%ebx),%edx
106 movl VCPU_failsafe_addr(%ebx),%eax
107 movl %eax,TRAPBOUNCE_eip(%edx)
108 movl VCPU_failsafe_sel(%ebx),%eax
109 movw %ax,TRAPBOUNCE_cs(%edx)
110 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
111 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
112 jnc 1f
113 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
114 1: call create_bounce_frame
115 xorl %eax,%eax
116 movl %eax,UREGS_ds(%esp)
117 movl %eax,UREGS_es(%esp)
118 movl %eax,UREGS_fs(%esp)
119 movl %eax,UREGS_gs(%esp)
120 jmp test_all_events
121 .previous
122 .section __pre_ex_table,"a"
123 .long .Lft1,.Lfx1
124 .long .Lft2,.Lfx1
125 .long .Lft3,.Lfx1
126 .long .Lft4,.Lfx1
127 .long .Lft5,.Lfx1
128 .previous
129 .section __ex_table,"a"
130 .long .Ldf1,failsafe_callback
131 .previous
133 ALIGN
134 restore_all_xen:
135 popl %ebx
136 popl %ecx
137 popl %edx
138 popl %esi
139 popl %edi
140 popl %ebp
141 popl %eax
142 addl $4,%esp
143 iret
145 ALIGN
146 ENTRY(hypercall)
147 subl $4,%esp
148 FIXUP_RING0_GUEST_STACK
149 SAVE_ALL(1f,1f)
150 1: sti
151 GET_CURRENT(%ebx)
152 cmpl $NR_hypercalls,%eax
153 jae bad_hypercall
154 PERFC_INCR(PERFC_hypercalls, %eax, %ebx)
155 #ifndef NDEBUG
156 /* Create shadow parameters and corrupt those not used by this call. */
157 pushl %eax
158 pushl UREGS_eip+4(%esp)
159 pushl 28(%esp) # EBP
160 pushl 28(%esp) # EDI
161 pushl 28(%esp) # ESI
162 pushl 28(%esp) # EDX
163 pushl 28(%esp) # ECX
164 pushl 28(%esp) # EBX
165 movzb hypercall_args_table(,%eax,1),%ecx
166 leal (%esp,%ecx,4),%edi
167 subl $6,%ecx
168 negl %ecx
169 movl %eax,%esi
170 movl $0xDEADBEEF,%eax
171 rep stosl
172 movl %esi,%eax
173 #define SHADOW_BYTES 32 /* 6 shadow parameters + EIP + hypercall # */
174 #else
175 /*
176 * We need shadow parameters even on non-debug builds. We depend on the
177 * original versions not being clobbered (needed to create a hypercall
178 * continuation). But that isn't guaranteed by the function-call ABI.
179 */
180 pushl 20(%esp) # EBP
181 pushl 20(%esp) # EDI
182 pushl 20(%esp) # ESI
183 pushl 20(%esp) # EDX
184 pushl 20(%esp) # ECX
185 pushl 20(%esp) # EBX
186 #define SHADOW_BYTES 24 /* 6 shadow parameters */
187 #endif
188 cmpb $0,tb_init_done
189 je 1f
190 call trace_hypercall
191 /* Now restore all the registers that trace_hypercall clobbered */
192 movl UREGS_eax+SHADOW_BYTES(%esp),%eax /* Hypercall # */
193 #undef SHADOW_BYTES
194 1: call *hypercall_table(,%eax,4)
195 addl $24,%esp # Discard the shadow parameters
196 #ifndef NDEBUG
197 /* Deliberately corrupt real parameter regs used by this hypercall. */
198 popl %ecx # Shadow EIP
199 cmpl %ecx,UREGS_eip+4(%esp)
200 popl %ecx # Shadow hypercall index
201 jne skip_clobber # If EIP has changed then don't clobber
202 movzb hypercall_args_table(,%ecx,1),%ecx
203 movl %esp,%edi
204 movl %eax,%esi
205 movl $0xDEADBEEF,%eax
206 rep stosl
207 movl %esi,%eax
208 skip_clobber:
209 #endif
210 movl %eax,UREGS_eax(%esp) # save the return value
212 test_all_events:
213 xorl %ecx,%ecx
214 notl %ecx
215 cli # tests must not race interrupts
216 /*test_softirqs:*/
217 movl VCPU_processor(%ebx),%eax
218 shl $IRQSTAT_shift,%eax
219 test %ecx,irq_stat(%eax,1)
220 jnz process_softirqs
221 testb $1,VCPU_mce_pending(%ebx)
222 jnz process_mce
223 testb $1,VCPU_nmi_pending(%ebx)
224 jnz process_nmi
225 test_guest_events:
226 movl VCPU_vcpu_info(%ebx),%eax
227 testb $0xFF,VCPUINFO_upcall_mask(%eax)
228 jnz restore_all_guest
229 testb $0xFF,VCPUINFO_upcall_pending(%eax)
230 jz restore_all_guest
231 /*process_guest_events:*/
232 sti
233 leal VCPU_trap_bounce(%ebx),%edx
234 movl VCPU_event_addr(%ebx),%eax
235 movl %eax,TRAPBOUNCE_eip(%edx)
236 movl VCPU_event_sel(%ebx),%eax
237 movw %ax,TRAPBOUNCE_cs(%edx)
238 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
239 call create_bounce_frame
240 jmp test_all_events
242 ALIGN
243 process_softirqs:
244 sti
245 call do_softirq
246 jmp test_all_events
248 ALIGN
249 /* %ebx: struct vcpu */
250 process_mce:
251 testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
252 jnz test_guest_events
253 sti
254 movb $0,VCPU_mce_pending(%ebx)
255 call set_guest_machinecheck_trapbounce
256 test %eax,%eax
257 jz test_all_events
258 movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
259 movb %dl,VCPU_mce_old_mask(%ebx) # iret hypercall
260 orl $1 << VCPU_TRAP_MCE,%edx
261 movb %dl,VCPU_async_exception_mask(%ebx)
262 jmp process_trap
264 ALIGN
265 /* %ebx: struct vcpu */
266 process_nmi:
267 testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%ebx)
268 jnz test_guest_events
269 sti
270 movb $0,VCPU_nmi_pending(%ebx)
271 call set_guest_nmi_trapbounce
272 test %eax,%eax
273 jz test_all_events
274 movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
275 movb %dl,VCPU_nmi_old_mask(%ebx) # iret hypercall
276 orl $1 << VCPU_TRAP_NMI,%edx
277 movb %dl,VCPU_async_exception_mask(%ebx)
278 /* FALLTHROUGH */
279 process_trap:
280 leal VCPU_trap_bounce(%ebx),%edx
281 call create_bounce_frame
282 jmp test_all_events
284 bad_hypercall:
285 movl $-ENOSYS,UREGS_eax(%esp)
286 jmp test_all_events
288 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
289 /* {EIP, CS, EFLAGS, [ESP, SS]} */
290 /* %edx == trap_bounce, %ebx == struct vcpu */
291 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
292 create_bounce_frame:
293 ASSERT_INTERRUPTS_ENABLED
294 movl UREGS_eflags+4(%esp),%ecx
295 movb UREGS_cs+4(%esp),%cl
296 testl $(2|X86_EFLAGS_VM),%ecx
297 jz ring1 /* jump if returning to an existing ring-1 activation */
298 movl VCPU_kernel_sp(%ebx),%esi
299 .Lft6: mov VCPU_kernel_ss(%ebx),%gs
300 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
301 jz .Lnvm86_1
302 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
303 movl UREGS_es+4(%esp),%eax
304 .Lft7: movl %eax,%gs:(%esi)
305 movl UREGS_ds+4(%esp),%eax
306 .Lft8: movl %eax,%gs:4(%esi)
307 movl UREGS_fs+4(%esp),%eax
308 .Lft9: movl %eax,%gs:8(%esi)
309 movl UREGS_gs+4(%esp),%eax
310 .Lft10: movl %eax,%gs:12(%esi)
311 .Lnvm86_1:
312 subl $8,%esi /* push SS/ESP (inter-priv iret) */
313 movl UREGS_esp+4(%esp),%eax
314 .Lft11: movl %eax,%gs:(%esi)
315 movl UREGS_ss+4(%esp),%eax
316 .Lft12: movl %eax,%gs:4(%esi)
317 jmp 1f
318 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
319 movl UREGS_esp+4(%esp),%esi
320 .Lft13: mov UREGS_ss+4(%esp),%gs
321 1: /* Construct a stack frame: EFLAGS, CS/EIP */
322 movb TRAPBOUNCE_flags(%edx),%cl
323 subl $12,%esi
324 movl UREGS_eip+4(%esp),%eax
325 .Lft14: movl %eax,%gs:(%esi)
326 movl VCPU_vcpu_info(%ebx),%eax
327 pushl VCPUINFO_upcall_mask(%eax)
328 testb $TBF_INTERRUPT,%cl
329 setnz %ch # TBF_INTERRUPT -> set upcall mask
330 orb %ch,VCPUINFO_upcall_mask(%eax)
331 popl %eax
332 shll $16,%eax # Bits 16-23: saved_upcall_mask
333 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
334 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
335 testw $2,%ax
336 jnz .Lft15
337 and $~3,%ax # RPL 1 -> RPL 0
338 #endif
339 .Lft15: movl %eax,%gs:4(%esi)
340 test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
341 setz %ch # %ch == !saved_upcall_mask
342 movl UREGS_eflags+4(%esp),%eax
343 andl $~X86_EFLAGS_IF,%eax
344 shlb $1,%ch # Bit 9 (EFLAGS.IF)
345 orb %ch,%ah # Fold EFLAGS.IF into %eax
346 .Lft16: movl %eax,%gs:8(%esi)
347 test $TBF_EXCEPTION_ERRCODE,%cl
348 jz 1f
349 subl $4,%esi # push error_code onto guest frame
350 movl TRAPBOUNCE_error_code(%edx),%eax
351 .Lft17: movl %eax,%gs:(%esi)
352 1: testb $TBF_FAILSAFE,%cl
353 jz 2f
354 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
355 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
356 jz .Lnvm86_2
357 xorl %eax,%eax # VM86: we write zero selector values
358 .Lft18: movl %eax,%gs:(%esi)
359 .Lft19: movl %eax,%gs:4(%esi)
360 .Lft20: movl %eax,%gs:8(%esi)
361 .Lft21: movl %eax,%gs:12(%esi)
362 jmp 2f
363 .Lnvm86_2:
364 movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
365 .Lft22: movl %eax,%gs:(%esi)
366 movl UREGS_es+4(%esp),%eax
367 .Lft23: movl %eax,%gs:4(%esi)
368 movl UREGS_fs+4(%esp),%eax
369 .Lft24: movl %eax,%gs:8(%esi)
370 movl UREGS_gs+4(%esp),%eax
371 .Lft25: movl %eax,%gs:12(%esi)
372 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
373 jz .Lnvm86_3
374 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
375 movl %eax,UREGS_ds+4(%esp)
376 movl %eax,UREGS_es+4(%esp)
377 movl %eax,UREGS_fs+4(%esp)
378 movl %eax,UREGS_gs+4(%esp)
379 .Lnvm86_3:
380 /* Rewrite our stack frame and return to ring 1. */
381 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
382 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
383 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
384 mov %gs,UREGS_ss+4(%esp)
385 movl %esi,UREGS_esp+4(%esp)
386 movzwl TRAPBOUNCE_cs(%edx),%eax
387 /* Null selectors (0-3) are not allowed. */
388 testl $~3,%eax
389 jz domain_crash_synchronous
390 movl %eax,UREGS_cs+4(%esp)
391 movl TRAPBOUNCE_eip(%edx),%eax
392 movl %eax,UREGS_eip+4(%esp)
393 ret
394 .section __ex_table,"a"
395 .long .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
396 .long .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
397 .long .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
398 .long .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
399 .long .Lft14,domain_crash_synchronous , .Lft15,domain_crash_synchronous
400 .long .Lft16,domain_crash_synchronous , .Lft17,domain_crash_synchronous
401 .long .Lft18,domain_crash_synchronous , .Lft19,domain_crash_synchronous
402 .long .Lft20,domain_crash_synchronous , .Lft21,domain_crash_synchronous
403 .long .Lft22,domain_crash_synchronous , .Lft23,domain_crash_synchronous
404 .long .Lft24,domain_crash_synchronous , .Lft25,domain_crash_synchronous
405 .previous
407 domain_crash_synchronous_string:
408 .asciz "domain_crash_sync called from entry.S (%lx)\n"
410 domain_crash_synchronous:
411 pushl $domain_crash_synchronous_string
412 call printk
413 jmp __domain_crash_synchronous
415 ALIGN
416 ENTRY(ret_from_intr)
417 GET_CURRENT(%ebx)
418 movl UREGS_eflags(%esp),%eax
419 movb UREGS_cs(%esp),%al
420 testl $(3|X86_EFLAGS_VM),%eax
421 jnz test_all_events
422 jmp restore_all_xen
424 ENTRY(divide_error)
425 pushl $TRAP_divide_error<<16
426 ALIGN
427 handle_exception:
428 FIXUP_RING0_GUEST_STACK
429 SAVE_ALL(1f,2f)
430 .text 1
431 /* Exception within Xen: make sure we have valid %ds,%es. */
432 1: mov %ecx,%ds
433 mov %ecx,%es
434 jmp 2f
435 .previous
436 2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
437 jz exception_with_ints_disabled
438 sti # re-enable interrupts
439 1: xorl %eax,%eax
440 movw UREGS_entry_vector(%esp),%ax
441 movl %esp,%edx
442 pushl %edx # push the cpu_user_regs pointer
443 GET_CURRENT(%ebx)
444 PERFC_INCR(PERFC_exceptions, %eax, %ebx)
445 call *exception_table(,%eax,4)
446 addl $4,%esp
447 movl UREGS_eflags(%esp),%eax
448 movb UREGS_cs(%esp),%al
449 testl $(3|X86_EFLAGS_VM),%eax
450 jz restore_all_xen
451 leal VCPU_trap_bounce(%ebx),%edx
452 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
453 jz test_all_events
454 call create_bounce_frame
455 movb $0,TRAPBOUNCE_flags(%edx)
456 jmp test_all_events
458 exception_with_ints_disabled:
459 movl UREGS_eflags(%esp),%eax
460 movb UREGS_cs(%esp),%al
461 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
462 jnz FATAL_exception_with_ints_disabled
463 pushl %esp
464 call search_pre_exception_table
465 addl $4,%esp
466 testl %eax,%eax # no fixup code for faulting EIP?
467 jz 1b
468 movl %eax,UREGS_eip(%esp)
469 movl %esp,%esi
470 subl $4,%esp
471 movl %esp,%edi
472 movl $UREGS_kernel_sizeof/4,%ecx
473 rep; movsl # make room for error_code/entry_vector
474 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
475 movl %eax,UREGS_kernel_sizeof(%esp)
476 jmp restore_all_xen # return to fixup code
478 FATAL_exception_with_ints_disabled:
479 xorl %esi,%esi
480 movw UREGS_entry_vector(%esp),%si
481 movl %esp,%edx
482 pushl %edx # push the cpu_user_regs pointer
483 pushl %esi # push the trapnr (entry vector)
484 call fatal_trap
485 ud2
487 ENTRY(coprocessor_error)
488 pushl $TRAP_copro_error<<16
489 jmp handle_exception
491 ENTRY(simd_coprocessor_error)
492 pushl $TRAP_simd_error<<16
493 jmp handle_exception
495 ENTRY(device_not_available)
496 pushl $TRAP_no_device<<16
497 jmp handle_exception
499 ENTRY(debug)
500 pushl $TRAP_debug<<16
501 jmp handle_exception
503 ENTRY(int3)
504 pushl $TRAP_int3<<16
505 jmp handle_exception
507 ENTRY(overflow)
508 pushl $TRAP_overflow<<16
509 jmp handle_exception
511 ENTRY(bounds)
512 pushl $TRAP_bounds<<16
513 jmp handle_exception
515 ENTRY(invalid_op)
516 pushl $TRAP_invalid_op<<16
517 jmp handle_exception
519 ENTRY(coprocessor_segment_overrun)
520 pushl $TRAP_copro_seg<<16
521 jmp handle_exception
523 ENTRY(invalid_TSS)
524 movw $TRAP_invalid_tss,2(%esp)
525 jmp handle_exception
527 ENTRY(segment_not_present)
528 movw $TRAP_no_segment,2(%esp)
529 jmp handle_exception
531 ENTRY(stack_segment)
532 movw $TRAP_stack_error,2(%esp)
533 jmp handle_exception
535 ENTRY(general_protection)
536 movw $TRAP_gp_fault,2(%esp)
537 jmp handle_exception
539 ENTRY(alignment_check)
540 movw $TRAP_alignment_check,2(%esp)
541 jmp handle_exception
543 ENTRY(page_fault)
544 movw $TRAP_page_fault,2(%esp)
545 jmp handle_exception
547 ENTRY(spurious_interrupt_bug)
548 pushl $TRAP_spurious_int<<16
549 jmp handle_exception
551 ENTRY(early_page_fault)
552 SAVE_ALL(1f,1f)
553 1: movl %esp,%eax
554 pushl %eax
555 call do_early_page_fault
556 addl $4,%esp
557 jmp restore_all_xen
559 handle_nmi_mce:
560 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
561 # NMI/MCE entry protocol is incompatible with guest kernel in ring 0.
562 addl $4,%esp
563 iret
564 #else
565 # Save state but do not trash the segment registers!
566 SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common)
567 .Lnmi_mce_common:
568 xorl %eax,%eax
569 movw UREGS_entry_vector(%esp),%ax
570 movl %esp,%edx
571 pushl %edx
572 call *exception_table(,%eax,4)
573 addl $4,%esp
574 /*
575 * NB. We may return to Xen context with polluted %ds/%es. But in such
576 * cases we have put guest DS/ES on the guest stack frame, which will
577 * be detected by SAVE_ALL(), or we have rolled back restore_guest.
578 */
579 jmp ret_from_intr
580 .Lnmi_mce_xen:
581 /* Check the outer (guest) context for %ds/%es state validity. */
582 GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%ebx)
583 testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
584 mov %ds,%eax
585 mov %es,%edx
586 jnz .Lnmi_mce_vm86
587 /* We may have interrupted Xen while messing with %ds/%es... */
588 cmpw %ax,%cx
589 mov %ecx,%ds /* Ensure %ds is valid */
590 cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */
591 cmpw %dx,%cx
592 movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */
593 cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */
594 mov %ecx,%es /* Ensure %es is valid */
595 movl $.Lrestore_sregs_guest,%ecx
596 movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */
597 cmpl %ecx,UREGS_eip(%esp)
598 jbe .Lnmi_mce_common
599 cmpl $.Lrestore_iret_guest,UREGS_eip(%esp)
600 ja .Lnmi_mce_common
601 /* Roll outer context restore_guest back to restoring %ds/%es. */
602 movl %ecx,UREGS_eip(%esp)
603 jmp .Lnmi_mce_common
604 .Lnmi_mce_vm86:
605 /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
606 mov %ecx,%ds
607 mov %ecx,%es
608 jmp .Lnmi_mce_common
609 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
611 ENTRY(nmi)
612 pushl $TRAP_nmi<<16
613 jmp handle_nmi_mce
615 ENTRY(machine_check)
616 pushl $TRAP_machine_check<<16
617 jmp handle_nmi_mce
619 ENTRY(setup_vm86_frame)
620 mov %ecx,%ds
621 mov %ecx,%es
622 # Copies the entire stack frame forwards by 16 bytes.
623 .macro copy_vm86_words count=18
624 .if \count
625 pushl ((\count-1)*4)(%esp)
626 popl ((\count-1)*4)+16(%esp)
627 copy_vm86_words "(\count-1)"
628 .endif
629 .endm
630 copy_vm86_words
631 addl $16,%esp
632 ret
634 .section .rodata, "a", @progbits
636 ENTRY(exception_table)
637 .long do_divide_error
638 .long do_debug
639 .long do_nmi
640 .long do_int3
641 .long do_overflow
642 .long do_bounds
643 .long do_invalid_op
644 .long do_device_not_available
645 .long 0 # double fault
646 .long do_coprocessor_segment_overrun
647 .long do_invalid_TSS
648 .long do_segment_not_present
649 .long do_stack_segment
650 .long do_general_protection
651 .long do_page_fault
652 .long do_spurious_interrupt_bug
653 .long do_coprocessor_error
654 .long do_alignment_check
655 .long do_machine_check
656 .long do_simd_coprocessor_error
658 ENTRY(hypercall_table)
659 .long do_set_trap_table /* 0 */
660 .long do_mmu_update
661 .long do_set_gdt
662 .long do_stack_switch
663 .long do_set_callbacks
664 .long do_fpu_taskswitch /* 5 */
665 .long do_sched_op_compat
666 .long do_platform_op
667 .long do_set_debugreg
668 .long do_get_debugreg
669 .long do_update_descriptor /* 10 */
670 .long do_ni_hypercall
671 .long do_memory_op
672 .long do_multicall
673 .long do_update_va_mapping
674 .long do_set_timer_op /* 15 */
675 .long do_event_channel_op_compat
676 .long do_xen_version
677 .long do_console_io
678 .long do_physdev_op_compat
679 .long do_grant_table_op /* 20 */
680 .long do_vm_assist
681 .long do_update_va_mapping_otherdomain
682 .long do_iret
683 .long do_vcpu_op
684 .long do_ni_hypercall /* 25 */
685 .long do_mmuext_op
686 .long do_xsm_op
687 .long do_nmi_op
688 .long do_sched_op
689 .long do_callback_op /* 30 */
690 .long do_xenoprof_op
691 .long do_event_channel_op
692 .long do_physdev_op
693 .long do_hvm_op
694 .long do_sysctl /* 35 */
695 .long do_domctl
696 .long do_kexec_op
697 .long do_tmem_op
698 .rept __HYPERVISOR_arch_0-((.-hypercall_table)/4)
699 .long do_ni_hypercall
700 .endr
701 .long do_mca /* 48 */
702 .rept NR_hypercalls-((.-hypercall_table)/4)
703 .long do_ni_hypercall
704 .endr
706 ENTRY(hypercall_args_table)
707 .byte 1 /* do_set_trap_table */ /* 0 */
708 .byte 4 /* do_mmu_update */
709 .byte 2 /* do_set_gdt */
710 .byte 2 /* do_stack_switch */
711 .byte 4 /* do_set_callbacks */
712 .byte 1 /* do_fpu_taskswitch */ /* 5 */
713 .byte 2 /* do_sched_op_compat */
714 .byte 1 /* do_platform_op */
715 .byte 2 /* do_set_debugreg */
716 .byte 1 /* do_get_debugreg */
717 .byte 4 /* do_update_descriptor */ /* 10 */
718 .byte 0 /* do_ni_hypercall */
719 .byte 2 /* do_memory_op */
720 .byte 2 /* do_multicall */
721 .byte 4 /* do_update_va_mapping */
722 .byte 2 /* do_set_timer_op */ /* 15 */
723 .byte 1 /* do_event_channel_op_compat */
724 .byte 2 /* do_xen_version */
725 .byte 3 /* do_console_io */
726 .byte 1 /* do_physdev_op_compat */
727 .byte 3 /* do_grant_table_op */ /* 20 */
728 .byte 2 /* do_vm_assist */
729 .byte 5 /* do_update_va_mapping_otherdomain */
730 .byte 0 /* do_iret */
731 .byte 3 /* do_vcpu_op */
732 .byte 0 /* do_ni_hypercall */ /* 25 */
733 .byte 4 /* do_mmuext_op */
734 .byte 1 /* do_xsm_op */
735 .byte 2 /* do_nmi_op */
736 .byte 2 /* do_sched_op */
737 .byte 2 /* do_callback_op */ /* 30 */
738 .byte 2 /* do_xenoprof_op */
739 .byte 2 /* do_event_channel_op */
740 .byte 2 /* do_physdev_op */
741 .byte 2 /* do_hvm_op */
742 .byte 1 /* do_sysctl */ /* 35 */
743 .byte 1 /* do_domctl */
744 .byte 2 /* do_kexec_op */
745 .byte 1 /* do_tmem_op */
746 .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
747 .byte 0 /* do_ni_hypercall */
748 .endr
749 .byte 1 /* do_mca */ /* 48 */
750 .rept NR_hypercalls-(.-hypercall_args_table)
751 .byte 0 /* do_ni_hypercall */
752 .endr