debuggers.hg

view xen/arch/x86/x86_64/compat/entry.S @ 21972:6f07d9ac1e7c

x86: Fix NMI injection to PV guests

Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Aug 05 14:41:14 2010 +0100 (2010-08-05)
parents a3a55a6e4761
children d8279118b4bb
line source
1 /*
2 * Compatibility hypercall routines.
3 */
5 #include <xen/config.h>
6 #include <xen/errno.h>
7 #include <xen/softirq.h>
8 #include <asm/asm_defns.h>
9 #include <asm/apicdef.h>
10 #include <asm/page.h>
11 #include <asm/desc.h>
12 #include <public/xen.h>
14 ALIGN
15 ENTRY(compat_hypercall)
16 pushq $0
17 movl $TRAP_syscall,4(%rsp)
18 SAVE_ALL
19 GET_CURRENT(%rbx)
21 cmpl $NR_hypercalls,%eax
22 jae compat_bad_hypercall
23 #ifndef NDEBUG
24 /* Deliberately corrupt parameter regs not used by this hypercall. */
25 pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi
26 pushq UREGS_rbp+5*8(%rsp)
27 leaq compat_hypercall_args_table(%rip),%r10
28 movq $6,%rcx
29 subb (%r10,%rax,1),%cl
30 movq %rsp,%rdi
31 movl $0xDEADBEEF,%eax
32 rep stosq
33 popq %r8 ; popq %r9 ; xchgl %r8d,%r9d /* Args 5&6: zero extend */
34 popq %rdx; popq %rcx; xchgl %edx,%ecx /* Args 3&4: zero extend */
35 popq %rdi; popq %rsi; xchgl %edi,%esi /* Args 1&2: zero extend */
36 movl UREGS_rax(%rsp),%eax
37 pushq %rax
38 pushq UREGS_rip+8(%rsp)
39 #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
40 #else
41 /* Relocate argument registers and zero-extend to 64 bits. */
42 movl %eax,%eax /* Hypercall # */
43 xchgl %ecx,%esi /* Arg 2, Arg 4 */
44 movl %edx,%edx /* Arg 3 */
45 movl %edi,%r8d /* Arg 5 */
46 movl %ebp,%r9d /* Arg 6 */
47 movl UREGS_rbx(%rsp),%edi /* Arg 1 */
48 #define SHADOW_BYTES 0 /* No on-stack shadow state */
49 #endif
50 cmpb $0,tb_init_done(%rip)
51 je 1f
52 call trace_hypercall
53 /* Now restore all the registers that trace_hypercall clobbered */
54 movl UREGS_rax+SHADOW_BYTES(%rsp),%eax /* Hypercall # */
55 movl UREGS_rbx+SHADOW_BYTES(%rsp),%edi /* Arg 1 */
56 movl UREGS_rcx+SHADOW_BYTES(%rsp),%esi /* Arg 2 */
57 movl UREGS_rdx+SHADOW_BYTES(%rsp),%edx /* Arg 3 */
58 movl UREGS_rsi+SHADOW_BYTES(%rsp),%ecx /* Arg 4 */
59 movl UREGS_rdi+SHADOW_BYTES(%rsp),%r8d /* Arg 5 */
60 movl UREGS_rbp+SHADOW_BYTES(%rsp),%r9d /* Arg 6 */
61 #undef SHADOW_BYTES
62 1: leaq compat_hypercall_table(%rip),%r10
63 PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
64 callq *(%r10,%rax,8)
65 #ifndef NDEBUG
66 /* Deliberately corrupt parameter regs used by this hypercall. */
67 popq %r10 # Shadow RIP
68 cmpq %r10,UREGS_rip+8(%rsp)
69 popq %rcx # Shadow hypercall index
70 jne compat_skip_clobber /* If RIP has changed then don't clobber. */
71 leaq compat_hypercall_args_table(%rip),%r10
72 movb (%r10,%rcx,1),%cl
73 movl $0xDEADBEEF,%r10d
74 testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
75 cmpb $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
76 cmpb $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
77 cmpb $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
78 cmpb $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
79 cmpb $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
80 compat_skip_clobber:
81 #endif
82 movl %eax,UREGS_rax(%rsp) # save the return value
84 /* %rbx: struct vcpu */
85 ENTRY(compat_test_all_events)
86 cli # tests must not race interrupts
87 /*compat_test_softirqs:*/
88 movl VCPU_processor(%rbx),%eax
89 shlq $IRQSTAT_shift,%rax
90 leaq irq_stat(%rip),%rcx
91 testl $~0,(%rcx,%rax,1)
92 jnz compat_process_softirqs
93 testb $1,VCPU_mce_pending(%rbx)
94 jnz compat_process_mce
95 testb $1,VCPU_nmi_pending(%rbx)
96 jnz compat_process_nmi
97 compat_test_guest_events:
98 movq VCPU_vcpu_info(%rbx),%rax
99 testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
100 jnz compat_restore_all_guest
101 testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax)
102 jz compat_restore_all_guest
103 /*compat_process_guest_events:*/
104 sti
105 leaq VCPU_trap_bounce(%rbx),%rdx
106 movl VCPU_event_addr(%rbx),%eax
107 movl %eax,TRAPBOUNCE_eip(%rdx)
108 movl VCPU_event_sel(%rbx),%eax
109 movw %ax,TRAPBOUNCE_cs(%rdx)
110 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
111 call compat_create_bounce_frame
112 jmp compat_test_all_events
114 ALIGN
115 /* %rbx: struct vcpu */
116 compat_process_softirqs:
117 sti
118 call do_softirq
119 jmp compat_test_all_events
121 ALIGN
122 /* %rbx: struct vcpu */
123 compat_process_mce:
124 testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
125 jnz compat_test_guest_events
126 sti
127 movb $0,VCPU_mce_pending(%rbx)
128 call set_guest_machinecheck_trapbounce
129 testl %eax,%eax
130 jz compat_test_all_events
131 movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
132 movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
133 orl $1 << VCPU_TRAP_MCE,%edx
134 movb %dl,VCPU_async_exception_mask(%rbx)
135 jmp compat_process_trap
137 ALIGN
138 /* %rbx: struct vcpu */
139 compat_process_nmi:
140 testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
141 jnz compat_test_guest_events
142 sti
143 movb $0,VCPU_nmi_pending(%rbx)
144 call set_guest_nmi_trapbounce
145 testl %eax,%eax
146 jz compat_test_all_events
147 movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
148 movb %dl,VCPU_nmi_old_mask(%rbx) # iret hypercall
149 orl $1 << VCPU_TRAP_NMI,%edx
150 movb %dl,VCPU_async_exception_mask(%rbx)
151 /* FALLTHROUGH */
152 compat_process_trap:
153 leaq VCPU_trap_bounce(%rbx),%rdx
154 call compat_create_bounce_frame
155 jmp compat_test_all_events
157 compat_bad_hypercall:
158 movl $-ENOSYS,UREGS_rax(%rsp)
159 jmp compat_test_all_events
161 /* %rbx: struct vcpu, interrupts disabled */
162 compat_restore_all_guest:
163 ASSERT_INTERRUPTS_DISABLED
164 RESTORE_ALL
165 addq $8,%rsp
166 .Lft0: iretq
168 .section .fixup,"ax"
169 .Lfx0: sti
170 SAVE_ALL
171 movq UREGS_error_code(%rsp),%rsi
172 movq %rsp,%rax
173 andq $~0xf,%rsp
174 pushq $__HYPERVISOR_DS # SS
175 pushq %rax # RSP
176 pushfq # RFLAGS
177 pushq $__HYPERVISOR_CS # CS
178 leaq .Ldf0(%rip),%rax
179 pushq %rax # RIP
180 pushq %rsi # error_code/entry_vector
181 jmp handle_exception
182 .Ldf0: GET_CURRENT(%rbx)
183 jmp compat_test_all_events
184 compat_failsafe_callback:
185 GET_CURRENT(%rbx)
186 leaq VCPU_trap_bounce(%rbx),%rdx
187 movl VCPU_failsafe_addr(%rbx),%eax
188 movl %eax,TRAPBOUNCE_eip(%rdx)
189 movl VCPU_failsafe_sel(%rbx),%eax
190 movw %ax,TRAPBOUNCE_cs(%rdx)
191 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
192 btq $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
193 jnc 1f
194 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
195 1: call compat_create_bounce_frame
196 jmp compat_test_all_events
197 .previous
198 .section __pre_ex_table,"a"
199 .quad .Lft0,.Lfx0
200 .previous
201 .section __ex_table,"a"
202 .quad .Ldf0,compat_failsafe_callback
203 .previous
205 /* %rdx: trap_bounce, %rbx: struct vcpu */
206 ENTRY(compat_post_handle_exception)
207 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
208 jz compat_test_all_events
209 call compat_create_bounce_frame
210 movb $0,TRAPBOUNCE_flags(%rdx)
211 jmp compat_test_all_events
213 ENTRY(compat_syscall)
214 cmpb $0,VCPU_syscall32_disables_events(%rbx)
215 movzwl VCPU_syscall32_sel(%rbx),%esi
216 movq VCPU_syscall32_addr(%rbx),%rax
217 setne %cl
218 leaq VCPU_trap_bounce(%rbx),%rdx
219 testl $~3,%esi
220 leal (,%rcx,TBF_INTERRUPT),%ecx
221 jz 2f
222 1: movq %rax,TRAPBOUNCE_eip(%rdx)
223 movw %si,TRAPBOUNCE_cs(%rdx)
224 movb %cl,TRAPBOUNCE_flags(%rdx)
225 call compat_create_bounce_frame
226 jmp compat_test_all_events
227 2: movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
228 subl $2,UREGS_rip(%rsp)
229 movq VCPU_gp_fault_addr(%rbx),%rax
230 movzwl VCPU_gp_fault_sel(%rbx),%esi
231 movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl
232 movl $0,TRAPBOUNCE_error_code(%rdx)
233 jmp 1b
235 ENTRY(compat_sysenter)
236 cmpl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
237 movzwl VCPU_sysenter_sel(%rbx),%eax
238 movzwl VCPU_gp_fault_sel(%rbx),%ecx
239 cmovel %ecx,%eax
240 testl $~3,%eax
241 movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
242 cmovzl %ecx,%eax
243 movw %ax,TRAPBOUNCE_cs(%rdx)
244 call compat_create_bounce_frame
245 jmp compat_test_all_events
247 ENTRY(compat_int80_direct_trap)
248 call compat_create_bounce_frame
249 jmp compat_test_all_events
251 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
252 /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
253 /* %rdx: trap_bounce, %rbx: struct vcpu */
254 /* On return only %rbx and %rdx are guaranteed non-clobbered. */
255 compat_create_bounce_frame:
256 ASSERT_INTERRUPTS_ENABLED
257 mov %fs,%edi
258 testb $2,UREGS_cs+8(%rsp)
259 jz 1f
260 /* Push new frame at registered guest-OS stack base. */
261 movl VCPU_kernel_sp(%rbx),%esi
262 .Lft1: mov VCPU_kernel_ss(%rbx),%fs
263 subl $2*4,%esi
264 movl UREGS_rsp+8(%rsp),%eax
265 .Lft2: movl %eax,%fs:(%rsi)
266 movl UREGS_ss+8(%rsp),%eax
267 .Lft3: movl %eax,%fs:4(%rsi)
268 jmp 2f
269 1: /* In kernel context already: push new frame at existing %rsp. */
270 movl UREGS_rsp+8(%rsp),%esi
271 .Lft4: mov UREGS_ss+8(%rsp),%fs
272 2:
273 movb TRAPBOUNCE_flags(%rdx),%cl
274 subl $3*4,%esi
275 movq VCPU_vcpu_info(%rbx),%rax
276 pushq COMPAT_VCPUINFO_upcall_mask(%rax)
277 testb $TBF_INTERRUPT,%cl
278 setnz %ch # TBF_INTERRUPT -> set upcall mask
279 orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
280 popq %rax
281 shll $16,%eax # Bits 16-23: saved_upcall_mask
282 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
283 .Lft5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask
284 shrl $16,%eax
285 testb %al,%al # Bits 0-7: saved_upcall_mask
286 setz %ch # %ch == !saved_upcall_mask
287 movl UREGS_eflags+8(%rsp),%eax
288 andl $~X86_EFLAGS_IF,%eax
289 addb %ch,%ch # Bit 9 (EFLAGS.IF)
290 orb %ch,%ah # Fold EFLAGS.IF into %eax
291 .Lft6: movl %eax,%fs:2*4(%rsi) # EFLAGS
292 movl UREGS_rip+8(%rsp),%eax
293 .Lft7: movl %eax,%fs:(%rsi) # EIP
294 testb $TBF_EXCEPTION_ERRCODE,%cl
295 jz 1f
296 subl $4,%esi
297 movl TRAPBOUNCE_error_code(%rdx),%eax
298 .Lft8: movl %eax,%fs:(%rsi) # ERROR CODE
299 1:
300 testb $TBF_FAILSAFE,%cl
301 jz 2f
302 subl $4*4,%esi
303 movl %gs,%eax
304 .Lft9: movl %eax,%fs:3*4(%rsi) # GS
305 .Lft10: movl %edi,%fs:2*4(%rsi) # FS
306 movl %es,%eax
307 .Lft11: movl %eax,%fs:1*4(%rsi) # ES
308 movl %ds,%eax
309 .Lft12: movl %eax,%fs:0*4(%rsi) # DS
310 2:
311 /* Rewrite our stack frame and return to guest-OS mode. */
312 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
313 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
314 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
315 mov %fs,UREGS_ss+8(%rsp)
316 movl %esi,UREGS_rsp+8(%rsp)
317 .Lft13: mov %edi,%fs
318 movzwl TRAPBOUNCE_cs(%rdx),%eax
319 /* Null selectors (0-3) are not allowed. */
320 testl $~3,%eax
321 jz domain_crash_synchronous
322 movl %eax,UREGS_cs+8(%rsp)
323 movl TRAPBOUNCE_eip(%rdx),%eax
324 movl %eax,UREGS_rip+8(%rsp)
325 ret
326 .section .fixup,"ax"
327 .Lfx13:
328 xorl %edi,%edi
329 jmp .Lft13
330 .previous
331 .section __ex_table,"a"
332 .quad .Lft1,domain_crash_synchronous , .Lft2,compat_crash_page_fault
333 .quad .Lft3,compat_crash_page_fault_4 , .Lft4,domain_crash_synchronous
334 .quad .Lft5,compat_crash_page_fault_4 , .Lft6,compat_crash_page_fault_8
335 .quad .Lft7,compat_crash_page_fault , .Lft8,compat_crash_page_fault
336 .quad .Lft9,compat_crash_page_fault_12, .Lft10,compat_crash_page_fault_8
337 .quad .Lft11,compat_crash_page_fault_4 , .Lft12,compat_crash_page_fault
338 .quad .Lft13,.Lfx13
339 .previous
341 compat_crash_page_fault_12:
342 addl $4,%esi
343 compat_crash_page_fault_8:
344 addl $4,%esi
345 compat_crash_page_fault_4:
346 addl $4,%esi
347 compat_crash_page_fault:
348 .Lft14: mov %edi,%fs
349 movl %esi,%edi
350 call show_page_walk
351 jmp domain_crash_synchronous
352 .section .fixup,"ax"
353 .Lfx14:
354 xorl %edi,%edi
355 jmp .Lft14
356 .previous
357 .section __ex_table,"a"
358 .quad .Lft14,.Lfx14
359 .previous
361 .section .rodata, "a", @progbits
363 ENTRY(compat_hypercall_table)
364 .quad compat_set_trap_table /* 0 */
365 .quad do_mmu_update
366 .quad compat_set_gdt
367 .quad do_stack_switch
368 .quad compat_set_callbacks
369 .quad do_fpu_taskswitch /* 5 */
370 .quad do_sched_op_compat
371 .quad compat_platform_op
372 .quad do_set_debugreg
373 .quad do_get_debugreg
374 .quad compat_update_descriptor /* 10 */
375 .quad compat_ni_hypercall
376 .quad compat_memory_op
377 .quad compat_multicall
378 .quad compat_update_va_mapping
379 .quad compat_set_timer_op /* 15 */
380 .quad do_event_channel_op_compat
381 .quad compat_xen_version
382 .quad do_console_io
383 .quad compat_physdev_op_compat
384 .quad compat_grant_table_op /* 20 */
385 .quad compat_vm_assist
386 .quad compat_update_va_mapping_otherdomain
387 .quad compat_iret
388 .quad compat_vcpu_op
389 .quad compat_ni_hypercall /* 25 */
390 .quad compat_mmuext_op
391 .quad do_xsm_op
392 .quad compat_nmi_op
393 .quad compat_sched_op
394 .quad compat_callback_op /* 30 */
395 .quad compat_xenoprof_op
396 .quad do_event_channel_op
397 .quad compat_physdev_op
398 .quad do_hvm_op
399 .quad do_sysctl /* 35 */
400 .quad do_domctl
401 .quad compat_kexec_op
402 .quad do_tmem_op
403 .rept __HYPERVISOR_arch_0-((.-compat_hypercall_table)/8)
404 .quad compat_ni_hypercall
405 .endr
406 .quad do_mca /* 48 */
407 .rept NR_hypercalls-((.-compat_hypercall_table)/8)
408 .quad compat_ni_hypercall
409 .endr
411 ENTRY(compat_hypercall_args_table)
412 .byte 1 /* compat_set_trap_table */ /* 0 */
413 .byte 4 /* compat_mmu_update */
414 .byte 2 /* compat_set_gdt */
415 .byte 2 /* compat_stack_switch */
416 .byte 4 /* compat_set_callbacks */
417 .byte 1 /* compat_fpu_taskswitch */ /* 5 */
418 .byte 2 /* compat_sched_op_compat */
419 .byte 1 /* compat_platform_op */
420 .byte 2 /* compat_set_debugreg */
421 .byte 1 /* compat_get_debugreg */
422 .byte 4 /* compat_update_descriptor */ /* 10 */
423 .byte 0 /* compat_ni_hypercall */
424 .byte 2 /* compat_memory_op */
425 .byte 2 /* compat_multicall */
426 .byte 4 /* compat_update_va_mapping */
427 .byte 2 /* compat_set_timer_op */ /* 15 */
428 .byte 1 /* compat_event_channel_op_compat */
429 .byte 2 /* compat_xen_version */
430 .byte 3 /* compat_console_io */
431 .byte 1 /* compat_physdev_op_compat */
432 .byte 3 /* compat_grant_table_op */ /* 20 */
433 .byte 2 /* compat_vm_assist */
434 .byte 5 /* compat_update_va_mapping_otherdomain */
435 .byte 0 /* compat_iret */
436 .byte 3 /* compat_vcpu_op */
437 .byte 0 /* compat_ni_hypercall */ /* 25 */
438 .byte 4 /* compat_mmuext_op */
439 .byte 1 /* do_xsm_op */
440 .byte 2 /* compat_nmi_op */
441 .byte 2 /* compat_sched_op */
442 .byte 2 /* compat_callback_op */ /* 30 */
443 .byte 2 /* compat_xenoprof_op */
444 .byte 2 /* compat_event_channel_op */
445 .byte 2 /* compat_physdev_op */
446 .byte 2 /* do_hvm_op */
447 .byte 1 /* do_sysctl */ /* 35 */
448 .byte 1 /* do_domctl */
449 .byte 2 /* compat_kexec_op */
450 .byte 1 /* do_tmem_op */
451 .rept __HYPERVISOR_arch_0-(.-compat_hypercall_args_table)
452 .byte 0 /* compat_ni_hypercall */
453 .endr
454 .byte 1 /* do_mca */
455 .rept NR_hypercalls-(.-compat_hypercall_args_table)
456 .byte 0 /* compat_ni_hypercall */
457 .endr