debuggers.hg

view xen/arch/x86/x86_64/entry.S @ 21972:6f07d9ac1e7c

x86: Fix NMI injection to PV guests

Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Aug 05 14:41:14 2010 +0100 (2010-08-05)
parents a3a55a6e4761
children 20c65aa19075
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 ALIGN
16 /* %rbx: struct vcpu */
17 switch_to_kernel:
18 leaq VCPU_trap_bounce(%rbx),%rdx
19 /* TB_eip = (32-bit syscall && syscall32_addr) ?
20 * syscall32_addr : syscall_addr */
21 xor %eax,%eax
22 cmpw $FLAT_USER_CS32,UREGS_cs(%rsp)
23 cmoveq VCPU_syscall32_addr(%rbx),%rax
24 testq %rax,%rax
25 cmovzq VCPU_syscall_addr(%rbx),%rax
26 movq %rax,TRAPBOUNCE_eip(%rdx)
27 /* TB_flags = VGCF_syscall_disables_events ? TBF_INTERRUPT : 0 */
28 btl $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
29 setc %cl
30 leal (,%rcx,TBF_INTERRUPT),%ecx
31 movb %cl,TRAPBOUNCE_flags(%rdx)
32 call create_bounce_frame
33 andl $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
34 jmp test_all_events
36 /* %rbx: struct vcpu, interrupts disabled */
37 restore_all_guest:
38 ASSERT_INTERRUPTS_DISABLED
39 RESTORE_ALL
40 testw $TRAP_syscall,4(%rsp)
41 jz iret_exit_to_guest
43 addq $8,%rsp
44 popq %rcx # RIP
45 popq %r11 # CS
46 cmpw $FLAT_USER_CS32,%r11
47 popq %r11 # RFLAGS
48 popq %rsp # RSP
49 je 1f
50 sysretq
51 1: sysretl
53 ALIGN
54 /* No special register assumptions. */
55 iret_exit_to_guest:
56 addq $8,%rsp
57 .Lft0: iretq
59 .section .fixup,"ax"
60 .Lfx0: sti
61 SAVE_ALL
62 movq UREGS_error_code(%rsp),%rsi
63 movq %rsp,%rax
64 andq $~0xf,%rsp
65 pushq $__HYPERVISOR_DS # SS
66 pushq %rax # RSP
67 pushfq # RFLAGS
68 pushq $__HYPERVISOR_CS # CS
69 leaq .Ldf0(%rip),%rax
70 pushq %rax # RIP
71 pushq %rsi # error_code/entry_vector
72 jmp handle_exception
73 .Ldf0: GET_CURRENT(%rbx)
74 jmp test_all_events
75 failsafe_callback:
76 GET_CURRENT(%rbx)
77 leaq VCPU_trap_bounce(%rbx),%rdx
78 movq VCPU_failsafe_addr(%rbx),%rax
79 movq %rax,TRAPBOUNCE_eip(%rdx)
80 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
81 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
82 jnc 1f
83 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
84 1: call create_bounce_frame
85 jmp test_all_events
86 .previous
87 .section __pre_ex_table,"a"
88 .quad .Lft0,.Lfx0
89 .previous
90 .section __ex_table,"a"
91 .quad .Ldf0,failsafe_callback
92 .previous
94 ALIGN
95 /* No special register assumptions. */
96 restore_all_xen:
97 RESTORE_ALL
98 addq $8,%rsp
99 iretq
101 /*
102 * When entering SYSCALL from kernel mode:
103 * %rax = hypercall vector
104 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
105 * %rcx = SYSCALL-saved %rip
106 * NB. We must move %r10 to %rcx for C function-calling ABI.
107 *
108 * When entering SYSCALL from user mode:
109 * Vector directly to the registered arch.syscall_addr.
110 *
111 * Initial work is done by per-CPU stack trampolines. At this point %rsp
112 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
113 * and %cs have been saved. All other registers are still to be saved onto
114 * the stack, starting with %rip, and an appropriate %ss must be saved into
115 * the space left by the trampoline.
116 */
117 ALIGN
118 ENTRY(syscall_enter)
119 sti
120 movl $FLAT_KERNEL_SS,24(%rsp)
121 pushq %rcx
122 pushq $0
123 movl $TRAP_syscall,4(%rsp)
124 movq 24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before SAVE_ALL */
125 SAVE_ALL
126 GET_CURRENT(%rbx)
127 movq VCPU_domain(%rbx),%rcx
128 testb $1,DOMAIN_is_32bit_pv(%rcx)
129 jnz compat_syscall
130 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
131 jz switch_to_kernel
133 /*hypercall:*/
134 movq %r10,%rcx
135 cmpq $NR_hypercalls,%rax
136 jae bad_hypercall
137 #ifndef NDEBUG
138 /* Deliberately corrupt parameter regs not used by this hypercall. */
139 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
140 leaq hypercall_args_table(%rip),%r10
141 movq $6,%rcx
142 sub (%r10,%rax,1),%cl
143 movq %rsp,%rdi
144 movl $0xDEADBEEF,%eax
145 rep stosq
146 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
147 movq UREGS_rax(%rsp),%rax
148 pushq %rax
149 pushq UREGS_rip+8(%rsp)
150 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
151 #else
152 #define SHADOW_BYTES 0 /* No on-stack shadow state */
153 #endif
154 cmpb $0,tb_init_done(%rip)
155 je 1f
156 call trace_hypercall
157 /* Now restore all the registers that trace_hypercall clobbered */
158 movq UREGS_rax+SHADOW_BYTES(%rsp),%rax /* Hypercall # */
159 movq UREGS_rdi+SHADOW_BYTES(%rsp),%rdi /* Arg 1 */
160 movq UREGS_rsi+SHADOW_BYTES(%rsp),%rsi /* Arg 2 */
161 movq UREGS_rdx+SHADOW_BYTES(%rsp),%rdx /* Arg 3 */
162 movq UREGS_r10+SHADOW_BYTES(%rsp),%rcx /* Arg 4 */
163 movq UREGS_rdi+SHADOW_BYTES(%rsp),%r8 /* Arg 5 */
164 movq UREGS_rbp+SHADOW_BYTES(%rsp),%r9 /* Arg 6 */
165 #undef SHADOW_BYTES
166 1: leaq hypercall_table(%rip),%r10
167 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
168 callq *(%r10,%rax,8)
169 #ifndef NDEBUG
170 /* Deliberately corrupt parameter regs used by this hypercall. */
171 popq %r10 # Shadow RIP
172 cmpq %r10,UREGS_rip+8(%rsp)
173 popq %rcx # Shadow hypercall index
174 jne skip_clobber /* If RIP has changed then don't clobber. */
175 leaq hypercall_args_table(%rip),%r10
176 movb (%r10,%rcx,1),%cl
177 movl $0xDEADBEEF,%r10d
178 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
179 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
180 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
181 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
182 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
183 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
184 skip_clobber:
185 #endif
186 movq %rax,UREGS_rax(%rsp) # save the return value
188 /* %rbx: struct vcpu */
189 test_all_events:
190 cli # tests must not race interrupts
191 /*test_softirqs:*/
192 movl VCPU_processor(%rbx),%eax
193 shl $IRQSTAT_shift,%rax
194 leaq irq_stat(%rip),%rcx
195 testl $~0,(%rcx,%rax,1)
196 jnz process_softirqs
197 testb $1,VCPU_mce_pending(%rbx)
198 jnz process_mce
199 testb $1,VCPU_nmi_pending(%rbx)
200 jnz process_nmi
201 test_guest_events:
202 movq VCPU_vcpu_info(%rbx),%rax
203 testb $0xFF,VCPUINFO_upcall_mask(%rax)
204 jnz restore_all_guest
205 testb $0xFF,VCPUINFO_upcall_pending(%rax)
206 jz restore_all_guest
207 /*process_guest_events:*/
208 sti
209 leaq VCPU_trap_bounce(%rbx),%rdx
210 movq VCPU_event_addr(%rbx),%rax
211 movq %rax,TRAPBOUNCE_eip(%rdx)
212 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
213 call create_bounce_frame
214 jmp test_all_events
216 ALIGN
217 /* %rbx: struct vcpu */
218 process_softirqs:
219 sti
220 call do_softirq
221 jmp test_all_events
223 ALIGN
224 /* %rbx: struct vcpu */
225 process_mce:
226 testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
227 jnz test_guest_events
228 sti
229 movb $0,VCPU_mce_pending(%rbx)
230 call set_guest_machinecheck_trapbounce
231 test %eax,%eax
232 jz test_all_events
233 movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
234 movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
235 orl $1 << VCPU_TRAP_MCE,%edx
236 movb %dl,VCPU_async_exception_mask(%rbx)
237 jmp process_trap
239 ALIGN
240 /* %rbx: struct vcpu */
241 process_nmi:
242 testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
243 jnz test_guest_events
244 sti
245 movb $0,VCPU_nmi_pending(%rbx)
246 call set_guest_nmi_trapbounce
247 test %eax,%eax
248 jz test_all_events
249 movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
250 movb %dl,VCPU_nmi_old_mask(%rbx) # iret hypercall
251 orl $1 << VCPU_TRAP_NMI,%edx
252 movb %dl,VCPU_async_exception_mask(%rbx)
253 /* FALLTHROUGH */
254 process_trap:
255 leaq VCPU_trap_bounce(%rbx),%rdx
256 call create_bounce_frame
257 jmp test_all_events
259 bad_hypercall:
260 movq $-ENOSYS,UREGS_rax(%rsp)
261 jmp test_all_events
263 ENTRY(sysenter_entry)
264 sti
265 pushq $FLAT_USER_SS
266 pushq $0
267 pushfq
268 .globl sysenter_eflags_saved
269 sysenter_eflags_saved:
270 pushq $0
271 pushq $0
272 pushq $0
273 movl $TRAP_syscall,4(%rsp)
274 SAVE_ALL
275 GET_CURRENT(%rbx)
276 cmpb $0,VCPU_sysenter_disables_events(%rbx)
277 movq $0,UREGS_rip(%rsp) /* null rip */
278 movl $3,UREGS_cs(%rsp) /* ring 3 null cs */
279 movq VCPU_sysenter_addr(%rbx),%rax
280 setne %cl
281 leaq VCPU_trap_bounce(%rbx),%rdx
282 testq %rax,%rax
283 leal (,%rcx,TBF_INTERRUPT),%ecx
284 jz 2f
285 1: movq VCPU_domain(%rbx),%rdi
286 movq %rax,TRAPBOUNCE_eip(%rdx)
287 movb %cl,TRAPBOUNCE_flags(%rdx)
288 testb $1,DOMAIN_is_32bit_pv(%rdi)
289 jnz compat_sysenter
290 call create_bounce_frame
291 jmp test_all_events
292 2: movl %eax,TRAPBOUNCE_error_code(%rdx)
293 movq VCPU_gp_fault_addr(%rbx),%rax
294 movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl
295 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
296 jmp 1b
298 ENTRY(int80_direct_trap)
299 pushq $0
300 SAVE_ALL
302 GET_CURRENT(%rbx)
304 /* Check that the callback is non-null. */
305 leaq VCPU_int80_bounce(%rbx),%rdx
306 cmpb $0,TRAPBOUNCE_flags(%rdx)
307 jz int80_slow_path
309 movq VCPU_domain(%rbx),%rax
310 testb $1,DOMAIN_is_32bit_pv(%rax)
311 jnz compat_int80_direct_trap
313 call create_bounce_frame
314 jmp test_all_events
316 int80_slow_path:
317 /*
318 * Setup entry vector and error code as if this was a GPF caused by an
319 * IDT entry with DPL==0.
320 */
321 movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
322 movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
323 /* A GPF wouldn't have incremented the instruction pointer. */
324 subq $2,UREGS_rip(%rsp)
325 jmp handle_exception_saved
327 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
328 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
329 /* %rdx: trap_bounce, %rbx: struct vcpu */
330 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
331 create_bounce_frame:
332 ASSERT_INTERRUPTS_ENABLED
333 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
334 jnz 1f
335 /* Push new frame at registered guest-OS stack base. */
336 pushq %rdx
337 movq %rbx,%rdi
338 call toggle_guest_mode
339 popq %rdx
340 movq VCPU_kernel_sp(%rbx),%rsi
341 jmp 2f
342 1: /* In kernel context already: push new frame at existing %rsp. */
343 movq UREGS_rsp+8(%rsp),%rsi
344 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
345 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
346 movq $HYPERVISOR_VIRT_START,%rax
347 cmpq %rax,%rsi
348 jb 1f # In +ve address space? Then okay.
349 movq $HYPERVISOR_VIRT_END+60,%rax
350 cmpq %rax,%rsi
351 jb domain_crash_synchronous # Above Xen private area? Then okay.
352 1: movb TRAPBOUNCE_flags(%rdx),%cl
353 subq $40,%rsi
354 movq UREGS_ss+8(%rsp),%rax
355 .Lft2: movq %rax,32(%rsi) # SS
356 movq UREGS_rsp+8(%rsp),%rax
357 .Lft3: movq %rax,24(%rsi) # RSP
358 movq VCPU_vcpu_info(%rbx),%rax
359 pushq VCPUINFO_upcall_mask(%rax)
360 testb $TBF_INTERRUPT,%cl
361 setnz %ch # TBF_INTERRUPT -> set upcall mask
362 orb %ch,VCPUINFO_upcall_mask(%rax)
363 popq %rax
364 shlq $32,%rax # Bits 32-39: saved_upcall_mask
365 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
366 .Lft4: movq %rax,8(%rsi) # CS / saved_upcall_mask
367 shrq $32,%rax
368 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
369 setz %ch # %ch == !saved_upcall_mask
370 movl UREGS_eflags+8(%rsp),%eax
371 andl $~X86_EFLAGS_IF,%eax
372 addb %ch,%ch # Bit 9 (EFLAGS.IF)
373 orb %ch,%ah # Fold EFLAGS.IF into %eax
374 .Lft5: movq %rax,16(%rsi) # RFLAGS
375 movq UREGS_rip+8(%rsp),%rax
376 .Lft6: movq %rax,(%rsi) # RIP
377 testb $TBF_EXCEPTION_ERRCODE,%cl
378 jz 1f
379 subq $8,%rsi
380 movl TRAPBOUNCE_error_code(%rdx),%eax
381 .Lft7: movq %rax,(%rsi) # ERROR CODE
382 1: testb $TBF_FAILSAFE,%cl
383 jz 2f
384 subq $32,%rsi
385 movl %gs,%eax
386 .Lft8: movq %rax,24(%rsi) # GS
387 movl %fs,%eax
388 .Lft9: movq %rax,16(%rsi) # FS
389 movl %es,%eax
390 .Lft10: movq %rax,8(%rsi) # ES
391 movl %ds,%eax
392 .Lft11: movq %rax,(%rsi) # DS
393 2: subq $16,%rsi
394 movq UREGS_r11+8(%rsp),%rax
395 .Lft12: movq %rax,8(%rsi) # R11
396 movq UREGS_rcx+8(%rsp),%rax
397 .Lft13: movq %rax,(%rsi) # RCX
398 /* Rewrite our stack frame and return to guest-OS mode. */
399 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
400 /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
401 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
402 andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
403 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
404 movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
405 movq %rsi,UREGS_rsp+8(%rsp)
406 movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
407 movq TRAPBOUNCE_eip(%rdx),%rax
408 testq %rax,%rax
409 jz domain_crash_synchronous
410 movq %rax,UREGS_rip+8(%rsp)
411 ret
412 .section __ex_table,"a"
413 .quad .Lft2,domain_crash_synchronous , .Lft3,domain_crash_synchronous
414 .quad .Lft4,domain_crash_synchronous , .Lft5,domain_crash_synchronous
415 .quad .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
416 .quad .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
417 .quad .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
418 .quad .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
419 .previous
421 domain_crash_synchronous_string:
422 .asciz "domain_crash_sync called from entry.S\n"
424 ENTRY(domain_crash_synchronous)
425 # Get out of the guest-save area of the stack.
426 GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%rax)
427 movq %rax,%rsp
428 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
429 GET_CURRENT(%rax)
430 movq VCPU_domain(%rax),%rax
431 testb $1,DOMAIN_is_32bit_pv(%rax)
432 setz %al
433 leal (%rax,%rax,2),%eax
434 orb %al,UREGS_cs(%rsp)
435 # printk(domain_crash_synchronous_string)
436 leaq domain_crash_synchronous_string(%rip),%rdi
437 xorl %eax,%eax
438 call printk
439 jmp __domain_crash_synchronous
441 ALIGN
442 /* No special register assumptions. */
443 ENTRY(ret_from_intr)
444 GET_CURRENT(%rbx)
445 testb $3,UREGS_cs(%rsp)
446 jz restore_all_xen
447 movq VCPU_domain(%rbx),%rax
448 testb $1,DOMAIN_is_32bit_pv(%rax)
449 jz test_all_events
450 jmp compat_test_all_events
452 ALIGN
453 /* No special register assumptions. */
454 ENTRY(handle_exception)
455 SAVE_ALL
456 handle_exception_saved:
457 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
458 jz exception_with_ints_disabled
459 sti
460 1: movq %rsp,%rdi
461 movl UREGS_entry_vector(%rsp),%eax
462 leaq exception_table(%rip),%rdx
463 GET_CURRENT(%rbx)
464 PERFC_INCR(PERFC_exceptions, %rax, %rbx)
465 callq *(%rdx,%rax,8)
466 testb $3,UREGS_cs(%rsp)
467 jz restore_all_xen
468 leaq VCPU_trap_bounce(%rbx),%rdx
469 movq VCPU_domain(%rbx),%rax
470 testb $1,DOMAIN_is_32bit_pv(%rax)
471 jnz compat_post_handle_exception
472 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
473 jz test_all_events
474 call create_bounce_frame
475 movb $0,TRAPBOUNCE_flags(%rdx)
476 jmp test_all_events
478 /* No special register assumptions. */
479 exception_with_ints_disabled:
480 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
481 jnz FATAL_exception_with_ints_disabled
482 movq %rsp,%rdi
483 call search_pre_exception_table
484 testq %rax,%rax # no fixup code for faulting EIP?
485 jz 1b
486 movq %rax,UREGS_rip(%rsp)
487 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
488 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
489 jz 1f # then there is a pad quadword already
490 movq %rsp,%rsi
491 subq $8,%rsp
492 movq %rsp,%rdi
493 movq $UREGS_kernel_sizeof/8,%rcx
494 rep; movsq # make room for ec/ev
495 1: movq UREGS_error_code(%rsp),%rax # ec/ev
496 movq %rax,UREGS_kernel_sizeof(%rsp)
497 jmp restore_all_xen # return to fixup code
499 /* No special register assumptions. */
500 FATAL_exception_with_ints_disabled:
501 movl UREGS_entry_vector(%rsp),%edi
502 movq %rsp,%rsi
503 call fatal_trap
504 ud2
506 ENTRY(divide_error)
507 pushq $0
508 movl $TRAP_divide_error,4(%rsp)
509 jmp handle_exception
511 ENTRY(coprocessor_error)
512 pushq $0
513 movl $TRAP_copro_error,4(%rsp)
514 jmp handle_exception
516 ENTRY(simd_coprocessor_error)
517 pushq $0
518 movl $TRAP_simd_error,4(%rsp)
519 jmp handle_exception
521 ENTRY(device_not_available)
522 pushq $0
523 movl $TRAP_no_device,4(%rsp)
524 jmp handle_exception
526 ENTRY(debug)
527 pushq $0
528 movl $TRAP_debug,4(%rsp)
529 jmp handle_exception
531 ENTRY(int3)
532 pushq $0
533 movl $TRAP_int3,4(%rsp)
534 jmp handle_exception
536 ENTRY(overflow)
537 pushq $0
538 movl $TRAP_overflow,4(%rsp)
539 jmp handle_exception
541 ENTRY(bounds)
542 pushq $0
543 movl $TRAP_bounds,4(%rsp)
544 jmp handle_exception
546 ENTRY(invalid_op)
547 pushq $0
548 movl $TRAP_invalid_op,4(%rsp)
549 jmp handle_exception
551 ENTRY(coprocessor_segment_overrun)
552 pushq $0
553 movl $TRAP_copro_seg,4(%rsp)
554 jmp handle_exception
556 ENTRY(invalid_TSS)
557 movl $TRAP_invalid_tss,4(%rsp)
558 jmp handle_exception
560 ENTRY(segment_not_present)
561 movl $TRAP_no_segment,4(%rsp)
562 jmp handle_exception
564 ENTRY(stack_segment)
565 movl $TRAP_stack_error,4(%rsp)
566 jmp handle_exception
568 ENTRY(general_protection)
569 movl $TRAP_gp_fault,4(%rsp)
570 jmp handle_exception
572 ENTRY(alignment_check)
573 movl $TRAP_alignment_check,4(%rsp)
574 jmp handle_exception
576 ENTRY(page_fault)
577 movl $TRAP_page_fault,4(%rsp)
578 jmp handle_exception
580 ENTRY(spurious_interrupt_bug)
581 pushq $0
582 movl $TRAP_spurious_int,4(%rsp)
583 jmp handle_exception
585 ENTRY(double_fault)
586 SAVE_ALL
587 movq %rsp,%rdi
588 call do_double_fault
589 ud2
591 ENTRY(early_page_fault)
592 SAVE_ALL
593 movq %rsp,%rdi
594 call do_early_page_fault
595 jmp restore_all_xen
597 handle_ist_exception:
598 SAVE_ALL
599 testb $3,UREGS_cs(%rsp)
600 jz 1f
601 /* Interrupted guest context. Copy the context to stack bottom. */
602 GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%rdi)
603 movq %rsp,%rsi
604 movl $UREGS_kernel_sizeof/8,%ecx
605 movq %rdi,%rsp
606 rep movsq
607 1: movq %rsp,%rdi
608 movl UREGS_entry_vector(%rsp),%eax
609 leaq exception_table(%rip),%rdx
610 callq *(%rdx,%rax,8)
611 jmp ret_from_intr
613 ENTRY(nmi)
614 pushq $0
615 movl $TRAP_nmi,4(%rsp)
616 jmp handle_ist_exception
618 ENTRY(machine_check)
619 pushq $0
620 movl $TRAP_machine_check,4(%rsp)
621 jmp handle_ist_exception
623 .section .rodata, "a", @progbits
625 ENTRY(exception_table)
626 .quad do_divide_error
627 .quad do_debug
628 .quad do_nmi
629 .quad do_int3
630 .quad do_overflow
631 .quad do_bounds
632 .quad do_invalid_op
633 .quad do_device_not_available
634 .quad 0 # double_fault
635 .quad do_coprocessor_segment_overrun
636 .quad do_invalid_TSS
637 .quad do_segment_not_present
638 .quad do_stack_segment
639 .quad do_general_protection
640 .quad do_page_fault
641 .quad do_spurious_interrupt_bug
642 .quad do_coprocessor_error
643 .quad do_alignment_check
644 .quad do_machine_check
645 .quad do_simd_coprocessor_error
647 ENTRY(hypercall_table)
648 .quad do_set_trap_table /* 0 */
649 .quad do_mmu_update
650 .quad do_set_gdt
651 .quad do_stack_switch
652 .quad do_set_callbacks
653 .quad do_fpu_taskswitch /* 5 */
654 .quad do_sched_op_compat
655 .quad do_platform_op
656 .quad do_set_debugreg
657 .quad do_get_debugreg
658 .quad do_update_descriptor /* 10 */
659 .quad do_ni_hypercall
660 .quad do_memory_op
661 .quad do_multicall
662 .quad do_update_va_mapping
663 .quad do_set_timer_op /* 15 */
664 .quad do_event_channel_op_compat
665 .quad do_xen_version
666 .quad do_console_io
667 .quad do_physdev_op_compat
668 .quad do_grant_table_op /* 20 */
669 .quad do_vm_assist
670 .quad do_update_va_mapping_otherdomain
671 .quad do_iret
672 .quad do_vcpu_op
673 .quad do_set_segment_base /* 25 */
674 .quad do_mmuext_op
675 .quad do_xsm_op
676 .quad do_nmi_op
677 .quad do_sched_op
678 .quad do_callback_op /* 30 */
679 .quad do_xenoprof_op
680 .quad do_event_channel_op
681 .quad do_physdev_op
682 .quad do_hvm_op
683 .quad do_sysctl /* 35 */
684 .quad do_domctl
685 .quad do_kexec_op
686 .quad do_tmem_op
687 .rept __HYPERVISOR_arch_0-((.-hypercall_table)/8)
688 .quad do_ni_hypercall
689 .endr
690 .quad do_mca /* 48 */
691 .rept NR_hypercalls-((.-hypercall_table)/8)
692 .quad do_ni_hypercall
693 .endr
695 ENTRY(hypercall_args_table)
696 .byte 1 /* do_set_trap_table */ /* 0 */
697 .byte 4 /* do_mmu_update */
698 .byte 2 /* do_set_gdt */
699 .byte 2 /* do_stack_switch */
700 .byte 3 /* do_set_callbacks */
701 .byte 1 /* do_fpu_taskswitch */ /* 5 */
702 .byte 2 /* do_sched_op_compat */
703 .byte 1 /* do_platform_op */
704 .byte 2 /* do_set_debugreg */
705 .byte 1 /* do_get_debugreg */
706 .byte 2 /* do_update_descriptor */ /* 10 */
707 .byte 0 /* do_ni_hypercall */
708 .byte 2 /* do_memory_op */
709 .byte 2 /* do_multicall */
710 .byte 3 /* do_update_va_mapping */
711 .byte 1 /* do_set_timer_op */ /* 15 */
712 .byte 1 /* do_event_channel_op_compat */
713 .byte 2 /* do_xen_version */
714 .byte 3 /* do_console_io */
715 .byte 1 /* do_physdev_op_compat */
716 .byte 3 /* do_grant_table_op */ /* 20 */
717 .byte 2 /* do_vm_assist */
718 .byte 4 /* do_update_va_mapping_otherdomain */
719 .byte 0 /* do_iret */
720 .byte 3 /* do_vcpu_op */
721 .byte 2 /* do_set_segment_base */ /* 25 */
722 .byte 4 /* do_mmuext_op */
723 .byte 1 /* do_xsm_op */
724 .byte 2 /* do_nmi_op */
725 .byte 2 /* do_sched_op */
726 .byte 2 /* do_callback_op */ /* 30 */
727 .byte 2 /* do_xenoprof_op */
728 .byte 2 /* do_event_channel_op */
729 .byte 2 /* do_physdev_op */
730 .byte 2 /* do_hvm_op */
731 .byte 1 /* do_sysctl */ /* 35 */
732 .byte 1 /* do_domctl */
733 .byte 2 /* do_kexec */
734 .byte 1 /* do_tmem_op */
735 .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
736 .byte 0 /* do_ni_hypercall */
737 .endr
738 .byte 1 /* do_mca */ /* 48 */
739 .rept NR_hypercalls-(.-hypercall_args_table)
740 .byte 0 /* do_ni_hypercall */
741 .endr