debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents b9017fdaad4d
children
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 ALIGN
65 restore_all_guest:
66 ASSERT_INTERRUPTS_DISABLED
67 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
68 popl %ebx
69 popl %ecx
70 popl %edx
71 popl %esi
72 popl %edi
73 popl %ebp
74 popl %eax
75 leal 4(%esp),%esp
76 jnz .Lrestore_iret_guest
77 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
78 testb $2,UREGS_cs-UREGS_eip(%esp)
79 jnz .Lrestore_sregs_guest
80 call restore_ring0_guest
81 jmp .Lrestore_iret_guest
82 #endif
83 .Lrestore_sregs_guest:
84 .Lft1: mov UREGS_ds-UREGS_eip(%esp),%ds
85 .Lft2: mov UREGS_es-UREGS_eip(%esp),%es
86 .Lft3: mov UREGS_fs-UREGS_eip(%esp),%fs
87 .Lft4: mov UREGS_gs-UREGS_eip(%esp),%gs
88 .Lrestore_iret_guest:
89 .Lft5: iret
90 .section .fixup,"ax"
91 .Lfx1: sti
92 SAVE_ALL_GPRS
93 mov UREGS_error_code(%esp),%esi
94 pushfl # EFLAGS
95 movl $__HYPERVISOR_CS,%eax
96 pushl %eax # CS
97 movl $.Ldf1,%eax
98 pushl %eax # EIP
99 pushl %esi # error_code/entry_vector
100 jmp handle_exception
101 .Ldf1: GET_CURRENT(%ebx)
102 jmp test_all_events
103 failsafe_callback:
104 GET_CURRENT(%ebx)
105 leal VCPU_trap_bounce(%ebx),%edx
106 movl VCPU_failsafe_addr(%ebx),%eax
107 movl %eax,TRAPBOUNCE_eip(%edx)
108 movl VCPU_failsafe_sel(%ebx),%eax
109 movw %ax,TRAPBOUNCE_cs(%edx)
110 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
111 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
112 jnc 1f
113 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
114 1: call create_bounce_frame
115 xorl %eax,%eax
116 movl %eax,UREGS_ds(%esp)
117 movl %eax,UREGS_es(%esp)
118 movl %eax,UREGS_fs(%esp)
119 movl %eax,UREGS_gs(%esp)
120 jmp test_all_events
121 .previous
122 _ASM_PRE_EXTABLE(.Lft1, .Lfx1)
123 _ASM_PRE_EXTABLE(.Lft2, .Lfx1)
124 _ASM_PRE_EXTABLE(.Lft3, .Lfx1)
125 _ASM_PRE_EXTABLE(.Lft4, .Lfx1)
126 _ASM_PRE_EXTABLE(.Lft5, .Lfx1)
127 _ASM_EXTABLE(.Ldf1, failsafe_callback)
129 ALIGN
130 restore_all_xen:
131 popl %ebx
132 popl %ecx
133 popl %edx
134 popl %esi
135 popl %edi
136 popl %ebp
137 popl %eax
138 addl $4,%esp
139 iret
141 ALIGN
142 ENTRY(hypercall)
143 subl $4,%esp
144 FIXUP_RING0_GUEST_STACK
145 SAVE_ALL(,1f)
146 1: sti
147 GET_CURRENT(%ebx)
148 cmpl $NR_hypercalls,%eax
149 jae bad_hypercall
150 PERFC_INCR(hypercalls, %eax, %ebx)
151 #ifndef NDEBUG
152 /* Create shadow parameters and corrupt those not used by this call. */
153 pushl %eax
154 pushl UREGS_eip+4(%esp)
155 pushl 28(%esp) # EBP
156 pushl 28(%esp) # EDI
157 pushl 28(%esp) # ESI
158 pushl 28(%esp) # EDX
159 pushl 28(%esp) # ECX
160 pushl 28(%esp) # EBX
161 movzb hypercall_args_table(,%eax,1),%ecx
162 leal (%esp,%ecx,4),%edi
163 subl $6,%ecx
164 negl %ecx
165 movl %eax,%esi
166 movl $0xDEADBEEF,%eax
167 rep stosl
168 movl %esi,%eax
169 #define SHADOW_BYTES 32 /* 6 shadow parameters + EIP + hypercall # */
170 #else
171 /*
172 * We need shadow parameters even on non-debug builds. We depend on the
173 * original versions not being clobbered (needed to create a hypercall
174 * continuation). But that isn't guaranteed by the function-call ABI.
175 */
176 pushl 20(%esp) # EBP
177 pushl 20(%esp) # EDI
178 pushl 20(%esp) # ESI
179 pushl 20(%esp) # EDX
180 pushl 20(%esp) # ECX
181 pushl 20(%esp) # EBX
182 #define SHADOW_BYTES 24 /* 6 shadow parameters */
183 #endif
184 cmpb $0,tb_init_done
185 UNLIKELY_START(ne, trace)
186 call trace_hypercall
187 /* Now restore all the registers that trace_hypercall clobbered */
188 movl UREGS_eax+SHADOW_BYTES(%esp),%eax /* Hypercall # */
189 UNLIKELY_END(trace)
190 call *hypercall_table(,%eax,4)
191 movl %eax,UREGS_eax+SHADOW_BYTES(%esp) # save the return value
192 #undef SHADOW_BYTES
193 addl $24,%esp # Discard the shadow parameters
194 #ifndef NDEBUG
195 /* Deliberately corrupt real parameter regs used by this hypercall. */
196 popl %ecx # Shadow EIP
197 cmpl %ecx,UREGS_eip+4(%esp)
198 popl %ecx # Shadow hypercall index
199 jne skip_clobber # If EIP has changed then don't clobber
200 movzb hypercall_args_table(,%ecx,1),%ecx
201 movl %esp,%edi
202 movl $0xDEADBEEF,%eax
203 rep stosl
204 skip_clobber:
205 #endif
207 test_all_events:
208 xorl %ecx,%ecx
209 notl %ecx
210 cli # tests must not race interrupts
211 /*test_softirqs:*/
212 movl VCPU_processor(%ebx),%eax
213 shl $IRQSTAT_shift,%eax
214 test %ecx,irq_stat(%eax,1)
215 jnz process_softirqs
216 testb $1,VCPU_mce_pending(%ebx)
217 jnz process_mce
218 testb $1,VCPU_nmi_pending(%ebx)
219 jnz process_nmi
220 test_guest_events:
221 movl VCPU_vcpu_info(%ebx),%eax
222 testb $0xFF,VCPUINFO_upcall_mask(%eax)
223 jnz restore_all_guest
224 testb $0xFF,VCPUINFO_upcall_pending(%eax)
225 jz restore_all_guest
226 /*process_guest_events:*/
227 sti
228 leal VCPU_trap_bounce(%ebx),%edx
229 movl VCPU_event_addr(%ebx),%eax
230 movl %eax,TRAPBOUNCE_eip(%edx)
231 movl VCPU_event_sel(%ebx),%eax
232 movw %ax,TRAPBOUNCE_cs(%edx)
233 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
234 call create_bounce_frame
235 jmp test_all_events
237 ALIGN
238 process_softirqs:
239 sti
240 call do_softirq
241 jmp test_all_events
243 ALIGN
244 /* %ebx: struct vcpu */
245 process_mce:
246 testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
247 jnz test_guest_events
248 sti
249 movb $0,VCPU_mce_pending(%ebx)
250 call set_guest_machinecheck_trapbounce
251 test %eax,%eax
252 jz test_all_events
253 movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
254 movb %dl,VCPU_mce_old_mask(%ebx) # iret hypercall
255 orl $1 << VCPU_TRAP_MCE,%edx
256 movb %dl,VCPU_async_exception_mask(%ebx)
257 jmp process_trap
259 ALIGN
260 /* %ebx: struct vcpu */
261 process_nmi:
262 testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%ebx)
263 jnz test_guest_events
264 sti
265 movb $0,VCPU_nmi_pending(%ebx)
266 call set_guest_nmi_trapbounce
267 test %eax,%eax
268 jz test_all_events
269 movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
270 movb %dl,VCPU_nmi_old_mask(%ebx) # iret hypercall
271 orl $1 << VCPU_TRAP_NMI,%edx
272 movb %dl,VCPU_async_exception_mask(%ebx)
273 /* FALLTHROUGH */
274 process_trap:
275 leal VCPU_trap_bounce(%ebx),%edx
276 call create_bounce_frame
277 jmp test_all_events
279 bad_hypercall:
280 movl $-ENOSYS,UREGS_eax(%esp)
281 jmp test_all_events
283 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
284 /* {EIP, CS, EFLAGS, [ESP, SS]} */
285 /* %edx == trap_bounce, %ebx == struct vcpu */
286 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
287 create_bounce_frame:
288 ASSERT_INTERRUPTS_ENABLED
289 movl UREGS_eflags+4(%esp),%ecx
290 movb UREGS_cs+4(%esp),%cl
291 testl $(2|X86_EFLAGS_VM),%ecx
292 jz ring1 /* jump if returning to an existing ring-1 activation */
293 movl VCPU_kernel_sp(%ebx),%esi
294 .Lft6: mov VCPU_kernel_ss(%ebx),%gs
295 testl $X86_EFLAGS_VM,%ecx
296 UNLIKELY_START(nz, bounce_vm86_1)
297 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
298 movl UREGS_es+4(%esp),%eax
299 .Lft7: movl %eax,%gs:(%esi)
300 movl UREGS_ds+4(%esp),%eax
301 .Lft8: movl %eax,%gs:4(%esi)
302 movl UREGS_fs+4(%esp),%eax
303 .Lft9: movl %eax,%gs:8(%esi)
304 movl UREGS_gs+4(%esp),%eax
305 .Lft10: movl %eax,%gs:12(%esi)
306 UNLIKELY_END(bounce_vm86_1)
307 subl $8,%esi /* push SS/ESP (inter-priv iret) */
308 movl UREGS_esp+4(%esp),%eax
309 .Lft11: movl %eax,%gs:(%esi)
310 movl UREGS_ss+4(%esp),%eax
311 .Lft12: movl %eax,%gs:4(%esi)
312 jmp 1f
313 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
314 movl UREGS_esp+4(%esp),%esi
315 .Lft13: mov UREGS_ss+4(%esp),%gs
316 1: /* Construct a stack frame: EFLAGS, CS/EIP */
317 movb TRAPBOUNCE_flags(%edx),%cl
318 subl $12,%esi
319 movl UREGS_eip+4(%esp),%eax
320 .Lft14: movl %eax,%gs:(%esi)
321 movl VCPU_vcpu_info(%ebx),%eax
322 pushl VCPUINFO_upcall_mask(%eax)
323 testb $TBF_INTERRUPT,%cl
324 setnz %ch # TBF_INTERRUPT -> set upcall mask
325 orb %ch,VCPUINFO_upcall_mask(%eax)
326 popl %eax
327 shll $16,%eax # Bits 16-23: saved_upcall_mask
328 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
329 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
330 testw $2,%ax
331 jnz .Lft15
332 and $~3,%ax # RPL 1 -> RPL 0
333 #endif
334 .Lft15: movl %eax,%gs:4(%esi)
335 test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
336 setz %ch # %ch == !saved_upcall_mask
337 movl UREGS_eflags+4(%esp),%eax
338 andl $~X86_EFLAGS_IF,%eax
339 shlb $1,%ch # Bit 9 (EFLAGS.IF)
340 orb %ch,%ah # Fold EFLAGS.IF into %eax
341 .Lft16: movl %eax,%gs:8(%esi)
342 test $TBF_EXCEPTION_ERRCODE,%cl
343 jz 1f
344 subl $4,%esi # push error_code onto guest frame
345 movl TRAPBOUNCE_error_code(%edx),%eax
346 .Lft17: movl %eax,%gs:(%esi)
347 1: testb $TBF_FAILSAFE,%cl
348 UNLIKELY_START(nz, bounce_failsafe)
349 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
350 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
351 jnz .Lvm86_2
352 movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
353 .Lft22: movl %eax,%gs:(%esi)
354 movl UREGS_es+4(%esp),%eax
355 .Lft23: movl %eax,%gs:4(%esi)
356 movl UREGS_fs+4(%esp),%eax
357 .Lft24: movl %eax,%gs:8(%esi)
358 movl UREGS_gs+4(%esp),%eax
359 .Lft25: movl %eax,%gs:12(%esi)
360 jmp .Lnvm86_3
361 .Lvm86_2:
362 xorl %eax,%eax # VM86: we write zero selector values
363 .Lft18: movl %eax,%gs:(%esi)
364 .Lft19: movl %eax,%gs:4(%esi)
365 .Lft20: movl %eax,%gs:8(%esi)
366 .Lft21: movl %eax,%gs:12(%esi)
367 UNLIKELY_END(bounce_failsafe)
368 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
369 UNLIKELY_START(nz, bounce_vm86_3)
370 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
371 movl %eax,UREGS_ds+4(%esp)
372 movl %eax,UREGS_es+4(%esp)
373 movl %eax,UREGS_fs+4(%esp)
374 movl %eax,UREGS_gs+4(%esp)
375 UNLIKELY_END(bounce_vm86_3)
376 .Lnvm86_3:
377 /* Rewrite our stack frame and return to ring 1. */
378 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
379 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
380 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
381 mov %gs,UREGS_ss+4(%esp)
382 movl %esi,UREGS_esp+4(%esp)
383 movzwl TRAPBOUNCE_cs(%edx),%eax
384 /* Null selectors (0-3) are not allowed. */
385 testl $~3,%eax
386 jz domain_crash_synchronous
387 movl %eax,UREGS_cs+4(%esp)
388 movl TRAPBOUNCE_eip(%edx),%eax
389 movl %eax,UREGS_eip+4(%esp)
390 ret
391 _ASM_EXTABLE(.Lft6, domain_crash_synchronous)
392 _ASM_EXTABLE(.Lft7, domain_crash_synchronous)
393 _ASM_EXTABLE(.Lft8, domain_crash_synchronous)
394 _ASM_EXTABLE(.Lft9, domain_crash_synchronous)
395 _ASM_EXTABLE(.Lft10, domain_crash_synchronous)
396 _ASM_EXTABLE(.Lft11, domain_crash_synchronous)
397 _ASM_EXTABLE(.Lft12, domain_crash_synchronous)
398 _ASM_EXTABLE(.Lft13, domain_crash_synchronous)
399 _ASM_EXTABLE(.Lft14, domain_crash_synchronous)
400 _ASM_EXTABLE(.Lft15, domain_crash_synchronous)
401 _ASM_EXTABLE(.Lft16, domain_crash_synchronous)
402 _ASM_EXTABLE(.Lft17, domain_crash_synchronous)
403 _ASM_EXTABLE(.Lft18, domain_crash_synchronous)
404 _ASM_EXTABLE(.Lft19, domain_crash_synchronous)
405 _ASM_EXTABLE(.Lft20, domain_crash_synchronous)
406 _ASM_EXTABLE(.Lft21, domain_crash_synchronous)
407 _ASM_EXTABLE(.Lft22, domain_crash_synchronous)
408 _ASM_EXTABLE(.Lft23, domain_crash_synchronous)
409 _ASM_EXTABLE(.Lft24, domain_crash_synchronous)
410 _ASM_EXTABLE(.Lft25, domain_crash_synchronous)
412 domain_crash_synchronous_string:
413 .asciz "domain_crash_sync called from entry.S (%lx)\n"
415 domain_crash_synchronous:
416 pushl $domain_crash_synchronous_string
417 call printk
418 jmp __domain_crash_synchronous
420 ALIGN
421 ENTRY(ret_from_intr)
422 GET_CURRENT(%ebx)
423 movl UREGS_eflags(%esp),%eax
424 movb UREGS_cs(%esp),%al
425 testl $(3|X86_EFLAGS_VM),%eax
426 jnz test_all_events
427 jmp restore_all_xen
429 ENTRY(divide_error)
430 pushl $TRAP_divide_error<<16
431 ALIGN
432 handle_exception:
433 FIXUP_RING0_GUEST_STACK
434 SAVE_ALL(1f,2f)
435 .text 1
436 /* Exception within Xen: make sure we have valid %ds,%es. */
437 1: mov %ecx,%ds
438 mov %ecx,%es
439 jmp 2f
440 .previous
441 2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
442 jz exception_with_ints_disabled
443 sti # re-enable interrupts
444 1: xorl %eax,%eax
445 movw UREGS_entry_vector(%esp),%ax
446 movl %esp,%edx
447 pushl %edx # push the cpu_user_regs pointer
448 GET_CURRENT(%ebx)
449 PERFC_INCR(exceptions, %eax, %ebx)
450 call *exception_table(,%eax,4)
451 addl $4,%esp
452 movl UREGS_eflags(%esp),%eax
453 movb UREGS_cs(%esp),%al
454 testl $(3|X86_EFLAGS_VM),%eax
455 jz restore_all_xen
456 leal VCPU_trap_bounce(%ebx),%edx
457 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
458 jz test_all_events
459 call create_bounce_frame
460 movb $0,TRAPBOUNCE_flags(%edx)
461 jmp test_all_events
463 exception_with_ints_disabled:
464 movl UREGS_eflags(%esp),%eax
465 movb UREGS_cs(%esp),%al
466 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
467 jnz FATAL_exception_with_ints_disabled
468 pushl %esp
469 call search_pre_exception_table
470 addl $4,%esp
471 testl %eax,%eax # no fixup code for faulting EIP?
472 jz 1b
473 movl %eax,UREGS_eip(%esp)
474 movl %esp,%esi
475 subl $4,%esp
476 movl %esp,%edi
477 movl $UREGS_kernel_sizeof/4,%ecx
478 rep; movsl # make room for error_code/entry_vector
479 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
480 movl %eax,UREGS_kernel_sizeof(%esp)
481 jmp restore_all_xen # return to fixup code
483 FATAL_exception_with_ints_disabled:
484 xorl %esi,%esi
485 movw UREGS_entry_vector(%esp),%si
486 movl %esp,%edx
487 pushl %edx # push the cpu_user_regs pointer
488 pushl %esi # push the trapnr (entry vector)
489 call fatal_trap
490 ud2
492 ENTRY(coprocessor_error)
493 pushl $TRAP_copro_error<<16
494 jmp handle_exception
496 ENTRY(simd_coprocessor_error)
497 pushl $TRAP_simd_error<<16
498 jmp handle_exception
500 ENTRY(device_not_available)
501 pushl $TRAP_no_device<<16
502 jmp handle_exception
504 ENTRY(debug)
505 pushl $TRAP_debug<<16
506 jmp handle_exception
508 ENTRY(int3)
509 pushl $TRAP_int3<<16
510 jmp handle_exception
512 ENTRY(overflow)
513 pushl $TRAP_overflow<<16
514 jmp handle_exception
516 ENTRY(bounds)
517 pushl $TRAP_bounds<<16
518 jmp handle_exception
520 ENTRY(invalid_op)
521 pushl $TRAP_invalid_op<<16
522 jmp handle_exception
524 ENTRY(coprocessor_segment_overrun)
525 pushl $TRAP_copro_seg<<16
526 jmp handle_exception
528 ENTRY(invalid_TSS)
529 movw $TRAP_invalid_tss,2(%esp)
530 jmp handle_exception
532 ENTRY(segment_not_present)
533 movw $TRAP_no_segment,2(%esp)
534 jmp handle_exception
536 ENTRY(stack_segment)
537 movw $TRAP_stack_error,2(%esp)
538 jmp handle_exception
540 ENTRY(general_protection)
541 movw $TRAP_gp_fault,2(%esp)
542 jmp handle_exception
544 ENTRY(alignment_check)
545 movw $TRAP_alignment_check,2(%esp)
546 jmp handle_exception
548 ENTRY(page_fault)
549 movw $TRAP_page_fault,2(%esp)
550 jmp handle_exception
552 ENTRY(spurious_interrupt_bug)
553 pushl $TRAP_spurious_int<<16
554 jmp handle_exception
556 .pushsection .init.text, "ax", @progbits
557 ENTRY(early_page_fault)
558 SAVE_ALL(1f,1f)
559 1: movl %esp,%eax
560 pushl %eax
561 call do_early_page_fault
562 addl $4,%esp
563 jmp restore_all_xen
564 .popsection
566 handle_nmi_mce:
567 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
568 # NMI/MCE entry protocol is incompatible with guest kernel in ring 0.
569 addl $4,%esp
570 iret
571 #else
572 # Save state but do not trash the segment registers!
573 SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common)
574 .Lnmi_mce_common:
575 xorl %eax,%eax
576 movw UREGS_entry_vector(%esp),%ax
577 movl %esp,%edx
578 pushl %edx
579 call *exception_table(,%eax,4)
580 addl $4,%esp
581 /*
582 * NB. We may return to Xen context with polluted %ds/%es. But in such
583 * cases we have put guest DS/ES on the guest stack frame, which will
584 * be detected by SAVE_ALL(), or we have rolled back restore_guest.
585 */
586 jmp ret_from_intr
587 .Lnmi_mce_xen:
588 /* Check the outer (guest) context for %ds/%es state validity. */
589 GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%ebx)
590 testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
591 mov %ds,%eax
592 mov %es,%edx
593 jnz .Lnmi_mce_vm86
594 /* We may have interrupted Xen while messing with %ds/%es... */
595 cmpw %ax,%cx
596 mov %ecx,%ds /* Ensure %ds is valid */
597 cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */
598 cmpw %dx,%cx
599 movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */
600 cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */
601 mov %ecx,%es /* Ensure %es is valid */
602 movl $.Lrestore_sregs_guest,%ecx
603 movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */
604 cmpl %ecx,UREGS_eip(%esp)
605 jbe .Lnmi_mce_common
606 cmpl $.Lrestore_iret_guest,UREGS_eip(%esp)
607 ja .Lnmi_mce_common
608 /* Roll outer context restore_guest back to restoring %ds/%es. */
609 movl %ecx,UREGS_eip(%esp)
610 jmp .Lnmi_mce_common
611 .Lnmi_mce_vm86:
612 /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
613 mov %ecx,%ds
614 mov %ecx,%es
615 jmp .Lnmi_mce_common
616 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
618 ENTRY(nmi)
619 pushl $TRAP_nmi<<16
620 jmp handle_nmi_mce
622 ENTRY(machine_check)
623 pushl $TRAP_machine_check<<16
624 jmp handle_nmi_mce
626 ENTRY(setup_vm86_frame)
627 mov %ecx,%ds
628 mov %ecx,%es
629 # Copies the entire stack frame forwards by 16 bytes.
630 .macro copy_vm86_words count=18
631 .if \count
632 pushl ((\count-1)*4)(%esp)
633 popl ((\count-1)*4)+16(%esp)
634 copy_vm86_words "(\count-1)"
635 .endif
636 .endm
637 copy_vm86_words
638 addl $16,%esp
639 ret
641 .section .rodata, "a", @progbits
643 ENTRY(exception_table)
644 .long do_divide_error
645 .long do_debug
646 .long do_nmi
647 .long do_int3
648 .long do_overflow
649 .long do_bounds
650 .long do_invalid_op
651 .long do_device_not_available
652 .long 0 # double fault
653 .long do_coprocessor_segment_overrun
654 .long do_invalid_TSS
655 .long do_segment_not_present
656 .long do_stack_segment
657 .long do_general_protection
658 .long do_page_fault
659 .long do_spurious_interrupt_bug
660 .long do_coprocessor_error
661 .long do_alignment_check
662 .long do_machine_check
663 .long do_simd_coprocessor_error
665 ENTRY(hypercall_table)
666 .long do_set_trap_table /* 0 */
667 .long do_mmu_update
668 .long do_set_gdt
669 .long do_stack_switch
670 .long do_set_callbacks
671 .long do_fpu_taskswitch /* 5 */
672 .long do_sched_op_compat
673 .long do_platform_op
674 .long do_set_debugreg
675 .long do_get_debugreg
676 .long do_update_descriptor /* 10 */
677 .long do_ni_hypercall
678 .long do_memory_op
679 .long do_multicall
680 .long do_update_va_mapping
681 .long do_set_timer_op /* 15 */
682 .long do_event_channel_op_compat
683 .long do_xen_version
684 .long do_console_io
685 .long do_physdev_op_compat
686 .long do_grant_table_op /* 20 */
687 .long do_vm_assist
688 .long do_update_va_mapping_otherdomain
689 .long do_iret
690 .long do_vcpu_op
691 .long do_ni_hypercall /* 25 */
692 .long do_mmuext_op
693 .long do_xsm_op
694 .long do_nmi_op
695 .long do_sched_op
696 .long do_callback_op /* 30 */
697 .long do_xenoprof_op
698 .long do_event_channel_op
699 .long do_physdev_op
700 .long do_hvm_op
701 .long do_sysctl /* 35 */
702 .long do_domctl
703 .long do_kexec_op
704 .long do_tmem_op
705 .rept __HYPERVISOR_arch_0-((.-hypercall_table)/4)
706 .long do_ni_hypercall
707 .endr
708 .long do_mca /* 48 */
709 .rept NR_hypercalls-((.-hypercall_table)/4)
710 .long do_ni_hypercall
711 .endr
713 ENTRY(hypercall_args_table)
714 .byte 1 /* do_set_trap_table */ /* 0 */
715 .byte 4 /* do_mmu_update */
716 .byte 2 /* do_set_gdt */
717 .byte 2 /* do_stack_switch */
718 .byte 4 /* do_set_callbacks */
719 .byte 1 /* do_fpu_taskswitch */ /* 5 */
720 .byte 2 /* do_sched_op_compat */
721 .byte 1 /* do_platform_op */
722 .byte 2 /* do_set_debugreg */
723 .byte 1 /* do_get_debugreg */
724 .byte 4 /* do_update_descriptor */ /* 10 */
725 .byte 0 /* do_ni_hypercall */
726 .byte 2 /* do_memory_op */
727 .byte 2 /* do_multicall */
728 .byte 4 /* do_update_va_mapping */
729 .byte 2 /* do_set_timer_op */ /* 15 */
730 .byte 1 /* do_event_channel_op_compat */
731 .byte 2 /* do_xen_version */
732 .byte 3 /* do_console_io */
733 .byte 1 /* do_physdev_op_compat */
734 .byte 3 /* do_grant_table_op */ /* 20 */
735 .byte 2 /* do_vm_assist */
736 .byte 5 /* do_update_va_mapping_otherdomain */
737 .byte 0 /* do_iret */
738 .byte 3 /* do_vcpu_op */
739 .byte 0 /* do_ni_hypercall */ /* 25 */
740 .byte 4 /* do_mmuext_op */
741 .byte 1 /* do_xsm_op */
742 .byte 2 /* do_nmi_op */
743 .byte 2 /* do_sched_op */
744 .byte 2 /* do_callback_op */ /* 30 */
745 .byte 2 /* do_xenoprof_op */
746 .byte 2 /* do_event_channel_op */
747 .byte 2 /* do_physdev_op */
748 .byte 2 /* do_hvm_op */
749 .byte 1 /* do_sysctl */ /* 35 */
750 .byte 1 /* do_domctl */
751 .byte 2 /* do_kexec_op */
752 .byte 1 /* do_tmem_op */
753 .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
754 .byte 0 /* do_ni_hypercall */
755 .endr
756 .byte 1 /* do_mca */ /* 48 */
757 .rept NR_hypercalls-(.-hypercall_args_table)
758 .byte 0 /* do_ni_hypercall */
759 .endr