debuggers.hg

view xen/arch/x86/x86_64/entry.S @ 4632:05621922c024

bitkeeper revision 1.1340 (426652e8h-OWfDH7vm_elbT4wtH2cA)

Merge maf46@ssh-relay1.cl.cam.ac.uk:/usr/groups/xeno/BK/xen-unstable.bk
into fleming.research:/scratch/fleming/mafetter/xen-unstable.bk

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Wed Apr 20 13:02:32 2005 +0000 (2005-04-20)
parents b1cb9f7f34f9 d10fe13887d7
children 38a02ee9a9c8 65b28c74cec2
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_CURRENT(reg) \
16 movq $STACK_SIZE-8, reg; \
17 orq %rsp, reg; \
18 andq $~7,reg; \
19 movq (reg),reg;
21 ALIGN
22 /* %rbx: struct exec_domain, interrupts disabled */
23 switch_to_kernel:
24 leaq EDOMAIN_trap_bounce(%rbx),%rdx
25 movq EDOMAIN_syscall_addr(%rbx),%rax
26 movq %rax,TRAPBOUNCE_eip(%rdx)
27 movw $0,TRAPBOUNCE_flags(%rdx)
28 call create_bounce_frame
30 /* %rbx: struct exec_domain */
31 restore_all_guest:
32 RESTORE_ALL
33 testw $TRAP_syscall,4(%rsp)
34 jz iret_exit_to_guest
36 addq $8,%rsp
37 popq %rcx # RIP
38 popq %r11 # CS
39 cmpw $__GUEST_CS32,%r11
40 popq %r11 # RFLAGS
41 cli # No interrupts after stack switch
42 popq %rsp # RSP
43 je 1f
44 sysretq
45 1: sysretl
47 ALIGN
48 /* No special register assumptions. */
49 iret_exit_to_guest:
50 addq $8,%rsp
51 FLT1: iretq
53 .section .fixup,"ax"
54 FIX1: popq -15*8-8(%rsp) # error_code/entry_vector
55 SAVE_ALL # 15*8 bytes pushed
56 movq -8(%rsp),%rsi # error_code/entry_vector
57 sti # after stack abuse (-1024(%rsp))
58 pushq $__HYPERVISOR_DS # SS
59 leaq 8(%rsp),%rax
60 pushq %rax # RSP
61 pushf # RFLAGS
62 pushq $__HYPERVISOR_CS # CS
63 leaq DBLFLT1(%rip),%rax
64 pushq %rax # RIP
65 pushq %rsi # error_code/entry_vector
66 jmp error_code
67 DBLFLT1:GET_CURRENT(%rbx)
68 jmp test_all_events
69 failsafe_callback:
70 GET_CURRENT(%rbx)
71 leaq EDOMAIN_trap_bounce(%rbx),%rdx
72 movq EDOMAIN_failsafe_addr(%rbx),%rax
73 movq %rax,TRAPBOUNCE_eip(%rdx)
74 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
75 call create_bounce_frame
76 jmp test_all_events
77 .previous
78 .section __pre_ex_table,"a"
79 .quad FLT1,FIX1
80 .previous
81 .section __ex_table,"a"
82 .quad DBLFLT1,failsafe_callback
83 .previous
85 ALIGN
86 /* No special register assumptions. */
87 restore_all_xen:
88 RESTORE_ALL
89 addq $8,%rsp
90 iretq
92 /*
93 * When entering SYSCALL from kernel mode:
94 * %rax = hypercall vector
95 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
96 * %r11, %rcx = SYSCALL-saved %rflags and %rip
97 * NB. We must move %r10 to %rcx for C function-calling ABI.
98 *
99 * When entering SYSCALL from user mode:
100 * Vector directly to the registered arch.syscall_addr.
101 *
102 * Initial work is done by per-CPU stack trampolines. At this point %rsp
103 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
104 * and %cs have been saved. All other registers are still to be saved onto
105 * the stack, starting with %rip, and an appropriate %ss must be saved into
106 * the space left by the trampoline.
107 */
108 ALIGN
109 ENTRY(syscall_enter)
110 movl $__GUEST_SS,24(%rsp)
111 pushq %rcx
112 pushq $0
113 movl $TRAP_syscall,4(%rsp)
114 SAVE_ALL
115 GET_CURRENT(%rbx)
116 testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
117 jz switch_to_kernel
119 /*hypercall:*/
120 sti
121 movq %r10,%rcx
122 andq $(NR_hypercalls-1),%rax
123 leaq SYMBOL_NAME(hypercall_table)(%rip),%r10
124 PERFC_INCR(PERFC_hypercalls, %rax)
125 callq *(%r10,%rax,8)
126 movq %rax,XREGS_rax(%rsp) # save the return value
128 /* %rbx: struct exec_domain */
129 test_all_events:
130 cli # tests must not race interrupts
131 /*test_softirqs:*/
132 movl EDOMAIN_processor(%rbx),%eax
133 shl $IRQSTAT_shift,%rax
134 leaq SYMBOL_NAME(irq_stat)(%rip),%rcx
135 testl $~0,(%rcx,%rax,1)
136 jnz process_softirqs
137 /*test_guest_events:*/
138 movq EDOMAIN_vcpu_info(%rbx),%rax
139 testb $0xFF,VCPUINFO_upcall_mask(%rax)
140 jnz restore_all_guest
141 testb $0xFF,VCPUINFO_upcall_pending(%rax)
142 jz restore_all_guest
143 /*process_guest_events:*/
144 sti
145 leaq EDOMAIN_trap_bounce(%rbx),%rdx
146 movq EDOMAIN_event_addr(%rbx),%rax
147 movq %rax,TRAPBOUNCE_eip(%rdx)
148 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
149 call create_bounce_frame
150 movq EDOMAIN_vcpu_info(%rbx),%rax
151 movb $1,VCPUINFO_upcall_mask(%rax) # Upcalls masked during delivery
152 jmp test_all_events
154 #ifdef CONFIG_VMX
155 /*
156 * At VMExit time the processor saves the guest selectors, rsp, rip,
157 * and rflags. Therefore we don't save them, but simply decrement
158 * the kernel stack pointer to make it consistent with the stack frame
159 * at usual interruption time. The rflags of the host is not saved by VMX,
160 * and we set it to the fixed value.
161 *
162 * We also need the room, especially because orig_eax field is used
163 * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
164 * (13) u64 gs_base_user;
165 * (12) u64 gs_base_kernel;
166 * (11) u64 fs_base;
167 * (10) u64 gs;
168 * (9) u64 fs;
169 * (8) u64 ds;
170 * (7) u64 es;
171 * <- get_stack_bottom() (= HOST_ESP)
172 * (6) u64 ss;
173 * (5) u64 rsp;
174 * (4) u64 rflags;
175 * (3) u64 cs;
176 * (2) u64 rip;
177 * (2/1) u32 entry_vector;
178 * (1/1) u32 error_code;
179 * However, get_stack_bottom() actually returns 64 bytes before the real
180 * bottom of the stack to allow space for:
181 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
182 */
183 #define VMX_MONITOR_RFLAGS 0x202 /* IF on */
184 #define NR_SKIPPED_REGS 6 /* See the above explanation */
185 #define VMX_SAVE_ALL_NOSEGREGS \
186 pushq $VMX_MONITOR_RFLAGS; \
187 popfq; \
188 subq $(NR_SKIPPED_REGS*8), %rsp; \
189 pushq %rdi; \
190 pushq %rsi; \
191 pushq %rdx; \
192 pushq %rcx; \
193 pushq %rax; \
194 pushq %r8; \
195 pushq %r9; \
196 pushq %r10; \
197 pushq %r11; \
198 pushq %rbx; \
199 pushq %rbp; \
200 pushq %r12; \
201 pushq %r13; \
202 pushq %r14; \
203 pushq %r15; \
205 ENTRY(vmx_asm_vmexit_handler)
206 /* selectors are restored/saved by VMX */
207 VMX_SAVE_ALL_NOSEGREGS
208 call SYMBOL_NAME(vmx_vmexit_handler)
209 jmp vmx_asm_do_resume
211 ENTRY(vmx_asm_do_launch)
212 popq %r15
213 popq %r14
214 popq %r13
215 popq %r12
216 popq %rbp
217 popq %rbx
218 popq %r11
219 popq %r10
220 popq %r9
221 popq %r8
222 popq %rax
223 popq %rcx
224 popq %rdx
225 popq %rsi
226 popq %rdi
227 addq $(NR_SKIPPED_REGS*8), %rsp
228 /* VMLUANCH */
229 .byte 0x0f,0x01,0xc2
230 pushfq
231 call SYMBOL_NAME(vm_launch_fail)
232 hlt
234 ALIGN
236 ENTRY(vmx_asm_do_resume)
237 vmx_test_all_events:
238 GET_CURRENT(%rbx)
239 /* test_all_events: */
240 cli # tests must not race interrupts
241 /*test_softirqs:*/
242 movl EDOMAIN_processor(%rbx),%eax
243 shl $IRQSTAT_shift,%rax
244 leaq SYMBOL_NAME(irq_stat)(%rip), %rdx
245 testl $~0,(%rdx,%rax,1)
246 jnz vmx_process_softirqs
248 vmx_restore_all_guest:
249 call SYMBOL_NAME(load_cr2)
250 /*
251 * Check if we are going back to VMX-based VM
252 * By this time, all the setups in the VMCS must be complete.
253 */
254 popq %r15
255 popq %r14
256 popq %r13
257 popq %r12
258 popq %rbp
259 popq %rbx
260 popq %r11
261 popq %r10
262 popq %r9
263 popq %r8
264 popq %rax
265 popq %rcx
266 popq %rdx
267 popq %rsi
268 popq %rdi
269 addq $(NR_SKIPPED_REGS*8), %rsp
270 /* VMRESUME */
271 .byte 0x0f,0x01,0xc3
272 pushfq
273 call SYMBOL_NAME(vm_resume_fail)
274 /* Should never reach here */
275 hlt
277 ALIGN
278 vmx_process_softirqs:
279 sti
280 call SYMBOL_NAME(do_softirq)
281 jmp vmx_test_all_events
282 #endif
284 ALIGN
285 /* %rbx: struct exec_domain */
286 process_softirqs:
287 sti
288 call SYMBOL_NAME(do_softirq)
289 jmp test_all_events
291 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
292 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
293 /* %rdx: trap_bounce, %rbx: struct exec_domain */
294 /* On return only %rbx is guaranteed non-clobbered. */
295 create_bounce_frame:
296 testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
297 jnz 1f
298 /* Push new frame at registered guest-OS stack base. */
299 pushq %rdx
300 movq %rbx,%rdi
301 call SYMBOL_NAME(toggle_guest_mode)
302 popq %rdx
303 movq EDOMAIN_kernel_sp(%rbx),%rsi
304 jmp 2f
305 1: /* In kernel context already: push new frame at existing %rsp. */
306 movq XREGS_rsp+8(%rsp),%rsi
307 andb $0xfc,XREGS_cs+8(%rsp) # Indicate kernel context to guest.
308 2: movq $HYPERVISOR_VIRT_START,%rax
309 cmpq %rax,%rsi
310 jb 1f # In +ve address space? Then okay.
311 movq $HYPERVISOR_VIRT_END+60,%rax
312 cmpq %rax,%rsi
313 jb domain_crash_synchronous # Above Xen private area? Then okay.
314 1: subq $40,%rsi
315 movq XREGS_ss+8(%rsp),%rax
316 FLT2: movq %rax,32(%rsi) # SS
317 movq XREGS_rsp+8(%rsp),%rax
318 FLT3: movq %rax,24(%rsi) # RSP
319 movq XREGS_eflags+8(%rsp),%rax
320 FLT4: movq %rax,16(%rsi) # RFLAGS
321 movq XREGS_cs+8(%rsp),%rax
322 FLT5: movq %rax,8(%rsi) # CS
323 movq XREGS_rip+8(%rsp),%rax
324 FLT6: movq %rax,(%rsi) # RIP
325 movb TRAPBOUNCE_flags(%rdx),%cl
326 testb $TBF_EXCEPTION_ERRCODE,%cl
327 jz 1f
328 subq $8,%rsi
329 movl TRAPBOUNCE_error_code(%rdx),%eax
330 FLT7: movq %rax,(%rsi) # ERROR CODE
331 testb $TBF_EXCEPTION_CR2,%cl
332 jz 2f
333 subq $8,%rsi
334 movq TRAPBOUNCE_cr2(%rdx),%rax
335 FLT8: movq %rax,(%rsi) # CR2
336 1: testb $TBF_FAILSAFE,%cl
337 jz 2f
338 subq $32,%rsi
339 movl %gs,%eax
340 FLT9: movq %rax,24(%rsi) # GS
341 movl %fs,%eax
342 FLT10: movq %rax,16(%rsi) # FS
343 movl %es,%eax
344 FLT11: movq %rax,8(%rsi) # ES
345 movl %ds,%eax
346 FLT12: movq %rax,(%rsi) # DS
347 2: subq $16,%rsi
348 movq XREGS_r11+8(%rsp),%rax
349 FLT13: movq %rax,8(%rsi) # R11
350 movq XREGS_rcx+8(%rsp),%rax
351 FLT14: movq %rax,(%rsi) # RCX
352 /* Rewrite our stack frame and return to guest-OS mode. */
353 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
354 movq $TRAP_syscall,XREGS_entry_vector+8(%rsp)
355 andl $0xfffcbeff,XREGS_eflags+8(%rsp)
356 movq $__GUEST_SS,XREGS_ss+8(%rsp)
357 movq %rsi,XREGS_rsp+8(%rsp)
358 movq $__GUEST_CS,XREGS_cs+8(%rsp)
359 movq TRAPBOUNCE_eip(%rdx),%rax
360 movq %rax,XREGS_rip+8(%rsp)
361 movb $0,TRAPBOUNCE_flags(%rdx)
362 ret
363 .section __ex_table,"a"
364 .quad FLT2,domain_crash_synchronous , FLT3,domain_crash_synchronous
365 .quad FLT4,domain_crash_synchronous , FLT5,domain_crash_synchronous
366 .quad FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
367 .quad FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
368 .quad FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
369 .quad FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
370 .quad FLT14,domain_crash_synchronous
371 .previous
373 ALIGN
374 /* %rbx: struct exec_domain */
375 process_guest_exception_and_events:
376 leaq EDOMAIN_trap_bounce(%rbx),%rdx
377 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
378 jz test_all_events
379 call create_bounce_frame
380 jmp test_all_events
382 ALIGN
383 /* No special register assumptions. */
384 ENTRY(ret_from_intr)
385 GET_CURRENT(%rbx)
386 testb $3,XREGS_cs(%rsp)
387 jnz test_all_events
388 jmp restore_all_xen
390 ALIGN
391 /* No special register assumptions. */
392 error_code:
393 SAVE_ALL
394 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%rsp)
395 jz exception_with_ints_disabled
396 sti
397 movq %rsp,%rdi
398 movl XREGS_entry_vector(%rsp),%eax
399 leaq SYMBOL_NAME(exception_table)(%rip),%rdx
400 GET_CURRENT(%rbx)
401 PERFC_INCR(PERFC_exceptions, %rax)
402 callq *(%rdx,%rax,8)
403 testb $3,XREGS_cs(%rsp)
404 jz restore_all_xen
405 jmp process_guest_exception_and_events
407 /* No special register assumptions. */
408 exception_with_ints_disabled:
409 testb $3,XREGS_cs(%rsp) # interrupts disabled outside Xen?
410 jnz FATAL_exception_with_ints_disabled
411 movq %rsp,%rdi
412 call search_pre_exception_table
413 testq %rax,%rax # no fixup code for faulting EIP?
414 jz FATAL_exception_with_ints_disabled
415 movq %rax,XREGS_rip(%rsp)
416 subq $8,XREGS_rsp(%rsp) # add ec/ev to previous stack frame
417 testb $15,XREGS_rsp(%rsp) # return %rsp is now aligned?
418 jz 1f # then there is a pad quadword already
419 movq %rsp,%rsi
420 subq $8,%rsp
421 movq %rsp,%rdi
422 movq $XREGS_kernel_sizeof/8,%rcx
423 rep; movsq # make room for ec/ev
424 1: movq XREGS_error_code(%rsp),%rax # ec/ev
425 movq %rax,XREGS_kernel_sizeof(%rsp)
426 jmp restore_all_xen # return to fixup code
428 /* No special register assumptions. */
429 FATAL_exception_with_ints_disabled:
430 movl XREGS_entry_vector(%rsp),%edi
431 movq %rsp,%rsi
432 call SYMBOL_NAME(fatal_trap)
433 ud2
435 ENTRY(divide_error)
436 pushq $0
437 movl $TRAP_divide_error,4(%rsp)
438 jmp error_code
440 ENTRY(coprocessor_error)
441 pushq $0
442 movl $TRAP_copro_error,4(%rsp)
443 jmp error_code
445 ENTRY(simd_coprocessor_error)
446 pushq $0
447 movl $TRAP_simd_error,4(%rsp)
448 jmp error_code
450 ENTRY(device_not_available)
451 pushq $0
452 movl $TRAP_no_device,4(%rsp)
453 jmp error_code
455 ENTRY(debug)
456 pushq $0
457 movl $TRAP_debug,4(%rsp)
458 jmp error_code
460 ENTRY(int3)
461 pushq $0
462 movl $TRAP_int3,4(%rsp)
463 jmp error_code
465 ENTRY(overflow)
466 pushq $0
467 movl $TRAP_overflow,4(%rsp)
468 jmp error_code
470 ENTRY(bounds)
471 pushq $0
472 movl $TRAP_bounds,4(%rsp)
473 jmp error_code
475 ENTRY(invalid_op)
476 pushq $0
477 movl $TRAP_invalid_op,4(%rsp)
478 jmp error_code
480 ENTRY(coprocessor_segment_overrun)
481 pushq $0
482 movl $TRAP_copro_seg,4(%rsp)
483 jmp error_code
485 ENTRY(invalid_TSS)
486 movl $TRAP_invalid_tss,4(%rsp)
487 jmp error_code
489 ENTRY(segment_not_present)
490 movl $TRAP_no_segment,4(%rsp)
491 jmp error_code
493 ENTRY(stack_segment)
494 movl $TRAP_stack_error,4(%rsp)
495 jmp error_code
497 ENTRY(general_protection)
498 movl $TRAP_gp_fault,4(%rsp)
499 jmp error_code
501 ENTRY(alignment_check)
502 movl $TRAP_alignment_check,4(%rsp)
503 jmp error_code
505 ENTRY(page_fault)
506 movl $TRAP_page_fault,4(%rsp)
507 jmp error_code
509 ENTRY(machine_check)
510 pushq $0
511 movl $TRAP_machine_check,4(%rsp)
512 jmp error_code
514 ENTRY(spurious_interrupt_bug)
515 pushq $0
516 movl $TRAP_spurious_int,4(%rsp)
517 jmp error_code
519 ENTRY(double_fault)
520 movl $TRAP_double_fault,4(%rsp)
521 jmp error_code
523 ENTRY(nmi)
524 pushq $0
525 SAVE_ALL
526 inb $0x61,%al
527 movl %eax,%esi # reason
528 movq %rsp,%rdi # regs
529 call SYMBOL_NAME(do_nmi)
530 jmp restore_all_xen
532 .data
534 ENTRY(exception_table)
535 .quad SYMBOL_NAME(do_divide_error)
536 .quad SYMBOL_NAME(do_debug)
537 .quad 0 # nmi
538 .quad SYMBOL_NAME(do_int3)
539 .quad SYMBOL_NAME(do_overflow)
540 .quad SYMBOL_NAME(do_bounds)
541 .quad SYMBOL_NAME(do_invalid_op)
542 .quad SYMBOL_NAME(math_state_restore)
543 .quad SYMBOL_NAME(do_double_fault)
544 .quad SYMBOL_NAME(do_coprocessor_segment_overrun)
545 .quad SYMBOL_NAME(do_invalid_TSS)
546 .quad SYMBOL_NAME(do_segment_not_present)
547 .quad SYMBOL_NAME(do_stack_segment)
548 .quad SYMBOL_NAME(do_general_protection)
549 .quad SYMBOL_NAME(do_page_fault)
550 .quad SYMBOL_NAME(do_spurious_interrupt_bug)
551 .quad SYMBOL_NAME(do_coprocessor_error)
552 .quad SYMBOL_NAME(do_alignment_check)
553 .quad SYMBOL_NAME(do_machine_check)
554 .quad SYMBOL_NAME(do_simd_coprocessor_error)
556 ENTRY(hypercall_table)
557 .quad SYMBOL_NAME(do_set_trap_table) /* 0 */
558 .quad SYMBOL_NAME(do_mmu_update)
559 .quad SYMBOL_NAME(do_set_gdt)
560 .quad SYMBOL_NAME(do_stack_switch)
561 .quad SYMBOL_NAME(do_set_callbacks)
562 .quad SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
563 .quad SYMBOL_NAME(do_sched_op)
564 .quad SYMBOL_NAME(do_dom0_op)
565 .quad SYMBOL_NAME(do_set_debugreg)
566 .quad SYMBOL_NAME(do_get_debugreg)
567 .quad SYMBOL_NAME(do_update_descriptor) /* 10 */
568 .quad SYMBOL_NAME(do_ni_hypercall)
569 .quad SYMBOL_NAME(do_dom_mem_op)
570 .quad SYMBOL_NAME(do_multicall)
571 .quad SYMBOL_NAME(do_update_va_mapping)
572 .quad SYMBOL_NAME(do_set_timer_op) /* 15 */
573 .quad SYMBOL_NAME(do_event_channel_op)
574 .quad SYMBOL_NAME(do_xen_version)
575 .quad SYMBOL_NAME(do_console_io)
576 .quad SYMBOL_NAME(do_physdev_op)
577 .quad SYMBOL_NAME(do_grant_table_op) /* 20 */
578 .quad SYMBOL_NAME(do_vm_assist)
579 .quad SYMBOL_NAME(do_update_va_mapping_otherdomain)
580 .quad SYMBOL_NAME(do_switch_to_user)
581 .quad SYMBOL_NAME(do_boot_vcpu)
582 .quad SYMBOL_NAME(do_set_segment_base) /* 25 */
583 .quad SYMBOL_NAME(do_mmuext_op)
584 .rept NR_hypercalls-((.-hypercall_table)/4)
585 .quad SYMBOL_NAME(do_ni_hypercall)
586 .endr