rev |
line source |
kaf24@3621
|
1 /*
|
kaf24@3621
|
2 * Hypercall and fault low-level handling routines.
|
kaf24@3621
|
3 *
|
kaf24@3621
|
4 * Copyright (c) 2005, K A Fraser
|
kaf24@3621
|
5 */
|
kaf24@3621
|
6
|
kaf24@3621
|
7 #include <xen/config.h>
|
kaf24@3621
|
8 #include <xen/errno.h>
|
kaf24@3621
|
9 #include <xen/softirq.h>
|
kaf24@3621
|
10 #include <asm/asm_defns.h>
|
kaf24@3621
|
11 #include <asm/apicdef.h>
|
kaf24@3754
|
12 #include <asm/page.h>
|
kaf24@3621
|
13 #include <public/xen.h>
|
kaf24@3621
|
14
|
kaf24@6452
|
15 #define GET_GUEST_REGS(reg) \
|
kaf24@6452
|
16 movq $~(STACK_SIZE-1),reg; \
|
kaf24@6452
|
17 andq %rsp,reg; \
|
kaf24@6452
|
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
|
kaf24@6452
|
19
|
kaf24@3754
|
20 #define GET_CURRENT(reg) \
|
kaf24@3754
|
21 movq $STACK_SIZE-8, reg; \
|
kaf24@3754
|
22 orq %rsp, reg; \
|
kaf24@3754
|
23 andq $~7,reg; \
|
kaf24@3754
|
24 movq (reg),reg;
|
kaf24@3754
|
25
|
kaf24@3754
|
26 ALIGN
|
kaf24@5420
|
27 /* %rbx: struct vcpu */
|
kaf24@4533
|
28 switch_to_kernel:
|
kaf24@5289
|
29 leaq VCPU_trap_bounce(%rbx),%rdx
|
kaf24@5289
|
30 movq VCPU_syscall_addr(%rbx),%rax
|
kaf24@4533
|
31 movq %rax,TRAPBOUNCE_eip(%rdx)
|
kaf24@4533
|
32 movw $0,TRAPBOUNCE_flags(%rdx)
|
kaf24@4533
|
33 call create_bounce_frame
|
kaf24@5420
|
34 jmp test_all_events
|
kaf24@4533
|
35
|
kaf24@5420
|
36 /* %rbx: struct vcpu, interrupts disabled */
|
kaf24@3754
|
37 restore_all_guest:
|
kaf24@3754
|
38 RESTORE_ALL
|
kaf24@3754
|
39 testw $TRAP_syscall,4(%rsp)
|
kaf24@3783
|
40 jz iret_exit_to_guest
|
kaf24@3783
|
41
|
kaf24@3754
|
42 addq $8,%rsp
|
kaf24@3783
|
43 popq %rcx # RIP
|
kaf24@3783
|
44 popq %r11 # CS
|
kaf24@3783
|
45 cmpw $__GUEST_CS32,%r11
|
kaf24@3783
|
46 popq %r11 # RFLAGS
|
kaf24@3783
|
47 popq %rsp # RSP
|
kaf24@3783
|
48 je 1f
|
kaf24@3754
|
49 sysretq
|
kaf24@3783
|
50 1: sysretl
|
kaf24@3783
|
51
|
kaf24@3783
|
52 ALIGN
|
kaf24@3828
|
53 /* No special register assumptions. */
|
kaf24@3783
|
54 iret_exit_to_guest:
|
kaf24@3783
|
55 addq $8,%rsp
|
kaf24@3754
|
56 FLT1: iretq
|
kaf24@3828
|
57
|
kaf24@3754
|
58 .section .fixup,"ax"
|
kaf24@3754
|
59 FIX1: popq -15*8-8(%rsp) # error_code/entry_vector
|
kaf24@3754
|
60 SAVE_ALL # 15*8 bytes pushed
|
kaf24@3754
|
61 movq -8(%rsp),%rsi # error_code/entry_vector
|
kaf24@3754
|
62 sti # after stack abuse (-1024(%rsp))
|
kaf24@3754
|
63 pushq $__HYPERVISOR_DS # SS
|
kaf24@3754
|
64 leaq 8(%rsp),%rax
|
kaf24@3754
|
65 pushq %rax # RSP
|
kaf24@3754
|
66 pushf # RFLAGS
|
kaf24@3754
|
67 pushq $__HYPERVISOR_CS # CS
|
kaf24@3754
|
68 leaq DBLFLT1(%rip),%rax
|
kaf24@3754
|
69 pushq %rax # RIP
|
kaf24@3754
|
70 pushq %rsi # error_code/entry_vector
|
kaf24@3754
|
71 jmp error_code
|
kaf24@3754
|
72 DBLFLT1:GET_CURRENT(%rbx)
|
kaf24@3754
|
73 jmp test_all_events
|
kaf24@4138
|
74 failsafe_callback:
|
kaf24@4138
|
75 GET_CURRENT(%rbx)
|
kaf24@5289
|
76 leaq VCPU_trap_bounce(%rbx),%rdx
|
kaf24@5289
|
77 movq VCPU_failsafe_addr(%rbx),%rax
|
kaf24@4138
|
78 movq %rax,TRAPBOUNCE_eip(%rdx)
|
kaf24@4138
|
79 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
|
kaf24@4138
|
80 call create_bounce_frame
|
kaf24@4138
|
81 jmp test_all_events
|
kaf24@3754
|
82 .previous
|
kaf24@3754
|
83 .section __pre_ex_table,"a"
|
kaf24@3754
|
84 .quad FLT1,FIX1
|
kaf24@3754
|
85 .previous
|
kaf24@3754
|
86 .section __ex_table,"a"
|
kaf24@4138
|
87 .quad DBLFLT1,failsafe_callback
|
kaf24@3754
|
88 .previous
|
kaf24@3754
|
89
|
kaf24@3754
|
90 ALIGN
|
kaf24@3828
|
91 /* No special register assumptions. */
|
kaf24@3754
|
92 restore_all_xen:
|
kaf24@3754
|
93 RESTORE_ALL
|
kaf24@3754
|
94 addq $8,%rsp
|
kaf24@3754
|
95 iretq
|
kaf24@3697
|
96
|
kaf24@3697
|
97 /*
|
kaf24@3783
|
98 * When entering SYSCALL from kernel mode:
|
kaf24@3783
|
99 * %rax = hypercall vector
|
kaf24@3783
|
100 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
|
kaf24@3783
|
101 * %r11, %rcx = SYSCALL-saved %rflags and %rip
|
kaf24@3783
|
102 * NB. We must move %r10 to %rcx for C function-calling ABI.
|
kaf24@3783
|
103 *
|
kaf24@3783
|
104 * When entering SYSCALL from user mode:
|
kaf24@3783
|
105 * Vector directly to the registered arch.syscall_addr.
|
kaf24@3783
|
106 *
|
kaf24@3783
|
107 * Initial work is done by per-CPU stack trampolines. At this point %rsp
|
kaf24@3783
|
108 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
|
kaf24@3783
|
109 * and %cs have been saved. All other registers are still to be saved onto
|
kaf24@3783
|
110 * the stack, starting with %rip, and an appropriate %ss must be saved into
|
kaf24@3783
|
111 * the space left by the trampoline.
|
kaf24@3697
|
112 */
|
kaf24@3754
|
113 ALIGN
|
kaf24@3761
|
114 ENTRY(syscall_enter)
|
kaf24@5420
|
115 sti
|
kaf24@3783
|
116 movl $__GUEST_SS,24(%rsp)
|
kaf24@3650
|
117 pushq %rcx
|
kaf24@3650
|
118 pushq $0
|
kaf24@3754
|
119 movl $TRAP_syscall,4(%rsp)
|
kaf24@3650
|
120 SAVE_ALL
|
kaf24@3761
|
121 GET_CURRENT(%rbx)
|
kaf24@5289
|
122 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
|
kaf24@4533
|
123 jz switch_to_kernel
|
kaf24@3780
|
124
|
kaf24@4533
|
125 /*hypercall:*/
|
kaf24@3697
|
126 movq %r10,%rcx
|
kaf24@3696
|
127 andq $(NR_hypercalls-1),%rax
|
kaf24@6452
|
128 #ifndef NDEBUG
|
kaf24@6452
|
129 /* Deliberately corrupt parameter regs not used by this hypercall. */
|
kaf24@6452
|
130 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
|
kaf24@6452
|
131 leaq hypercall_args_table(%rip),%r10
|
kaf24@6452
|
132 movq $6,%rcx
|
kaf24@6452
|
133 sub (%r10,%rax,1),%cl
|
kaf24@6452
|
134 movq %rsp,%rdi
|
kaf24@6452
|
135 movl $0xDEADBEEF,%eax
|
kaf24@6452
|
136 rep stosq
|
kaf24@6452
|
137 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
|
kaf24@6452
|
138 movq UREGS_rax(%rsp),%rax
|
kaf24@6452
|
139 andq $(NR_hypercalls-1),%rax
|
kaf24@6452
|
140 pushq %rax
|
kaf24@6452
|
141 pushq UREGS_rip+8(%rsp)
|
kaf24@6452
|
142 #endif
|
kaf24@4700
|
143 leaq hypercall_table(%rip),%r10
|
kaf24@3958
|
144 PERFC_INCR(PERFC_hypercalls, %rax)
|
kaf24@3761
|
145 callq *(%r10,%rax,8)
|
kaf24@6452
|
146 #ifndef NDEBUG
|
kaf24@6452
|
147 /* Deliberately corrupt parameter regs used by this hypercall. */
|
kaf24@6452
|
148 popq %r10 # Shadow RIP
|
kaf24@6452
|
149 cmpq %r10,UREGS_rip(%rsp)
|
kaf24@6452
|
150 popq %rcx # Shadow hypercall index
|
kaf24@6452
|
151 jne skip_clobber /* If RIP has changed then don't clobber. */
|
kaf24@6452
|
152 leaq hypercall_args_table(%rip),%r10
|
kaf24@6452
|
153 movb (%r10,%rcx,1),%cl
|
kaf24@6452
|
154 movl $0xDEADBEEF,%r10d
|
kaf24@6452
|
155 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
|
kaf24@6452
|
156 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
|
kaf24@6452
|
157 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
|
kaf24@6452
|
158 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
|
kaf24@6452
|
159 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
|
kaf24@6452
|
160 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
|
kaf24@6452
|
161 skip_clobber:
|
kaf24@6452
|
162 #endif
|
kaf24@6452
|
163 movq %rax,UREGS_rax(%rsp) # save the return value
|
kaf24@3754
|
164
|
kaf24@5289
|
165 /* %rbx: struct vcpu */
|
kaf24@3754
|
166 test_all_events:
|
kaf24@3754
|
167 cli # tests must not race interrupts
|
kaf24@3754
|
168 /*test_softirqs:*/
|
kaf24@5289
|
169 movl VCPU_processor(%rbx),%eax
|
kaf24@4593
|
170 shl $IRQSTAT_shift,%rax
|
kaf24@4700
|
171 leaq irq_stat(%rip),%rcx
|
kaf24@3754
|
172 testl $~0,(%rcx,%rax,1)
|
kaf24@3754
|
173 jnz process_softirqs
|
kaf24@3754
|
174 /*test_guest_events:*/
|
kaf24@5289
|
175 movq VCPU_vcpu_info(%rbx),%rax
|
kaf24@3754
|
176 testb $0xFF,VCPUINFO_upcall_mask(%rax)
|
kaf24@3754
|
177 jnz restore_all_guest
|
kaf24@3754
|
178 testb $0xFF,VCPUINFO_upcall_pending(%rax)
|
kaf24@3754
|
179 jz restore_all_guest
|
kaf24@3754
|
180 /*process_guest_events:*/
|
kaf24@4138
|
181 sti
|
kaf24@5289
|
182 leaq VCPU_trap_bounce(%rbx),%rdx
|
kaf24@5289
|
183 movq VCPU_event_addr(%rbx),%rax
|
kaf24@3754
|
184 movq %rax,TRAPBOUNCE_eip(%rdx)
|
kaf24@3754
|
185 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
|
kaf24@3754
|
186 call create_bounce_frame
|
kaf24@4138
|
187 jmp test_all_events
|
kaf24@3754
|
188
|
arun@4588
|
189 #ifdef CONFIG_VMX
|
arun@4588
|
190 /*
|
arun@4588
|
191 * At VMExit time the processor saves the guest selectors, rsp, rip,
|
arun@4588
|
192 * and rflags. Therefore we don't save them, but simply decrement
|
arun@4588
|
193 * the kernel stack pointer to make it consistent with the stack frame
|
arun@4588
|
194 * at usual interruption time. The rflags of the host is not saved by VMX,
|
arun@4588
|
195 * and we set it to the fixed value.
|
arun@4588
|
196 *
|
arun@4588
|
197 * We also need the room, especially because orig_eax field is used
|
kaf24@4683
|
198 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
|
arun@4588
|
199 * (10) u64 gs;
|
arun@4588
|
200 * (9) u64 fs;
|
arun@4588
|
201 * (8) u64 ds;
|
arun@4588
|
202 * (7) u64 es;
|
arun@4588
|
203 * <- get_stack_bottom() (= HOST_ESP)
|
arun@4588
|
204 * (6) u64 ss;
|
arun@4588
|
205 * (5) u64 rsp;
|
arun@4588
|
206 * (4) u64 rflags;
|
arun@4588
|
207 * (3) u64 cs;
|
arun@4588
|
208 * (2) u64 rip;
|
arun@4588
|
209 * (2/1) u32 entry_vector;
|
arun@4588
|
210 * (1/1) u32 error_code;
|
arun@4588
|
211 */
|
arun@4588
|
212 #define VMX_MONITOR_RFLAGS 0x202 /* IF on */
|
arun@4588
|
213 #define NR_SKIPPED_REGS 6 /* See the above explanation */
|
arun@4588
|
214 #define VMX_SAVE_ALL_NOSEGREGS \
|
arun@4588
|
215 pushq $VMX_MONITOR_RFLAGS; \
|
arun@4588
|
216 popfq; \
|
arun@4588
|
217 subq $(NR_SKIPPED_REGS*8), %rsp; \
|
arun@4588
|
218 pushq %rdi; \
|
arun@4588
|
219 pushq %rsi; \
|
arun@4588
|
220 pushq %rdx; \
|
arun@4588
|
221 pushq %rcx; \
|
arun@4588
|
222 pushq %rax; \
|
arun@4588
|
223 pushq %r8; \
|
arun@4588
|
224 pushq %r9; \
|
arun@4588
|
225 pushq %r10; \
|
arun@4588
|
226 pushq %r11; \
|
arun@4588
|
227 pushq %rbx; \
|
arun@4588
|
228 pushq %rbp; \
|
arun@4588
|
229 pushq %r12; \
|
arun@4588
|
230 pushq %r13; \
|
arun@4588
|
231 pushq %r14; \
|
arun@4588
|
232 pushq %r15; \
|
arun@4588
|
233
|
kaf24@6113
|
234 #define VMX_RESTORE_ALL_NOSEGREGS \
|
kaf24@6113
|
235 popq %r15; \
|
kaf24@6113
|
236 popq %r14; \
|
kaf24@6113
|
237 popq %r13; \
|
kaf24@6113
|
238 popq %r12; \
|
kaf24@6113
|
239 popq %rbp; \
|
kaf24@6113
|
240 popq %rbx; \
|
kaf24@6113
|
241 popq %r11; \
|
kaf24@6113
|
242 popq %r10; \
|
kaf24@6113
|
243 popq %r9; \
|
kaf24@6113
|
244 popq %r8; \
|
kaf24@6113
|
245 popq %rax; \
|
kaf24@6113
|
246 popq %rcx; \
|
kaf24@6113
|
247 popq %rdx; \
|
kaf24@6113
|
248 popq %rsi; \
|
kaf24@6113
|
249 popq %rdi; \
|
kaf24@6113
|
250 addq $(NR_SKIPPED_REGS*8), %rsp; \
|
kaf24@6113
|
251
|
arun@4588
|
252 ENTRY(vmx_asm_vmexit_handler)
|
arun@4588
|
253 /* selectors are restored/saved by VMX */
|
arun@4588
|
254 VMX_SAVE_ALL_NOSEGREGS
|
kaf24@4700
|
255 call vmx_vmexit_handler
|
arun@4588
|
256 jmp vmx_asm_do_resume
|
arun@4588
|
257
|
kaf24@6113
|
258 .macro vmx_asm_common launch initialized
|
kaf24@6113
|
259 1:
|
kaf24@6113
|
260 .if \initialized
|
kaf24@6113
|
261 /* vmx_test_all_events */
|
arun@4588
|
262 GET_CURRENT(%rbx)
|
arun@4588
|
263 /* test_all_events: */
|
arun@4588
|
264 cli # tests must not race interrupts
|
arun@4588
|
265 /*test_softirqs:*/
|
kaf24@5289
|
266 movl VCPU_processor(%rbx),%eax
|
kaf24@4593
|
267 shl $IRQSTAT_shift,%rax
|
kaf24@4700
|
268 leaq irq_stat(%rip), %rdx
|
kaf24@4593
|
269 testl $~0,(%rdx,%rax,1)
|
kaf24@6113
|
270 jnz 2f
|
arun@4588
|
271
|
kaf24@6113
|
272 /* vmx_restore_all_guest */
|
kaf24@6326
|
273 call vmx_intr_assist
|
kaf24@4700
|
274 call load_cr2
|
kaf24@6113
|
275 .endif
|
arun@4588
|
276 /*
|
arun@4588
|
277 * Check if we are going back to VMX-based VM
|
arun@4588
|
278 * By this time, all the setups in the VMCS must be complete.
|
arun@4588
|
279 */
|
kaf24@6113
|
280 VMX_RESTORE_ALL_NOSEGREGS
|
kaf24@6113
|
281 .if \launch
|
kaf24@6113
|
282 /* VMLUANCH */
|
kaf24@6113
|
283 .byte 0x0f,0x01,0xc2
|
kaf24@6113
|
284 pushfq
|
kaf24@6113
|
285 call vm_launch_fail
|
kaf24@6113
|
286 .else
|
arun@4588
|
287 /* VMRESUME */
|
arun@4588
|
288 .byte 0x0f,0x01,0xc3
|
arun@4588
|
289 pushfq
|
kaf24@4700
|
290 call vm_resume_fail
|
kaf24@6113
|
291 .endif
|
arun@4588
|
292 /* Should never reach here */
|
arun@4588
|
293 hlt
|
arun@4588
|
294
|
arun@4588
|
295 ALIGN
|
kaf24@6113
|
296
|
kaf24@6113
|
297 .if \initialized
|
kaf24@6113
|
298 2:
|
kaf24@6113
|
299 /* vmx_process_softirqs */
|
arun@4588
|
300 sti
|
kaf24@4700
|
301 call do_softirq
|
kaf24@6113
|
302 jmp 1b
|
kaf24@6113
|
303 ALIGN
|
kaf24@6113
|
304 .endif
|
kaf24@6113
|
305 .endm
|
kaf24@6113
|
306
|
kaf24@6113
|
307 ENTRY(vmx_asm_do_launch)
|
kaf24@6113
|
308 vmx_asm_common 1 0
|
kaf24@6113
|
309
|
kaf24@6113
|
310 ENTRY(vmx_asm_do_resume)
|
kaf24@6113
|
311 vmx_asm_common 0 1
|
kaf24@6113
|
312
|
kaf24@6113
|
313 ENTRY(vmx_asm_do_relaunch)
|
kaf24@6113
|
314 vmx_asm_common 1 1
|
kaf24@6113
|
315
|
arun@4588
|
316 #endif
|
arun@4588
|
317
|
kaf24@3754
|
318 ALIGN
|
kaf24@5289
|
319 /* %rbx: struct vcpu */
|
kaf24@3754
|
320 process_softirqs:
|
kaf24@3754
|
321 sti
|
kaf24@4700
|
322 call do_softirq
|
kaf24@3754
|
323 jmp test_all_events
|
kaf24@4138
|
324
|
kaf24@3754
|
325 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
|
kaf24@3754
|
326 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
|
kaf24@5289
|
327 /* %rdx: trap_bounce, %rbx: struct vcpu */
|
kaf24@3828
|
328 /* On return only %rbx is guaranteed non-clobbered. */
|
kaf24@3754
|
329 create_bounce_frame:
|
kaf24@5289
|
330 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
|
kaf24@3754
|
331 jnz 1f
|
kaf24@3761
|
332 /* Push new frame at registered guest-OS stack base. */
|
kaf24@4140
|
333 pushq %rdx
|
kaf24@4138
|
334 movq %rbx,%rdi
|
kaf24@4700
|
335 call toggle_guest_mode
|
kaf24@4138
|
336 popq %rdx
|
kaf24@5289
|
337 movq VCPU_kernel_sp(%rbx),%rsi
|
kaf24@4140
|
338 jmp 2f
|
kaf24@4140
|
339 1: /* In kernel context already: push new frame at existing %rsp. */
|
kaf24@4683
|
340 movq UREGS_rsp+8(%rsp),%rsi
|
kaf24@4683
|
341 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
|
kaf24@6478
|
342 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
|
kaf24@6478
|
343 movq $HYPERVISOR_VIRT_START,%rax
|
kaf24@3754
|
344 cmpq %rax,%rsi
|
kaf24@3754
|
345 jb 1f # In +ve address space? Then okay.
|
kaf24@3754
|
346 movq $HYPERVISOR_VIRT_END+60,%rax
|
kaf24@3754
|
347 cmpq %rax,%rsi
|
kaf24@4325
|
348 jb domain_crash_synchronous # Above Xen private area? Then okay.
|
kaf24@4949
|
349 1: movb TRAPBOUNCE_flags(%rdx),%cl
|
kaf24@4949
|
350 subq $40,%rsi
|
kaf24@4683
|
351 movq UREGS_ss+8(%rsp),%rax
|
kaf24@4138
|
352 FLT2: movq %rax,32(%rsi) # SS
|
kaf24@4683
|
353 movq UREGS_rsp+8(%rsp),%rax
|
kaf24@4138
|
354 FLT3: movq %rax,24(%rsi) # RSP
|
kaf24@4683
|
355 movq UREGS_eflags+8(%rsp),%rax
|
kaf24@4138
|
356 FLT4: movq %rax,16(%rsi) # RFLAGS
|
kaf24@5289
|
357 movq VCPU_vcpu_info(%rbx),%rax
|
kaf24@4949
|
358 pushq VCPUINFO_upcall_mask(%rax)
|
kaf24@4949
|
359 testb $TBF_INTERRUPT,%cl
|
kaf24@6017
|
360 setnz %ch # TBF_INTERRUPT -> set upcall mask
|
kaf24@6017
|
361 orb %ch,VCPUINFO_upcall_mask(%rax)
|
kaf24@4949
|
362 popq %rax
|
kaf24@5417
|
363 shlq $32,%rax # Bits 32-39: saved_upcall_mask
|
kaf24@4969
|
364 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
|
kaf24@4949
|
365 FLT5: movq %rax,8(%rsi) # CS/saved_upcall_mask
|
kaf24@4683
|
366 movq UREGS_rip+8(%rsp),%rax
|
kaf24@4138
|
367 FLT6: movq %rax,(%rsi) # RIP
|
kaf24@3754
|
368 testb $TBF_EXCEPTION_ERRCODE,%cl
|
kaf24@3754
|
369 jz 1f
|
kaf24@3754
|
370 subq $8,%rsi
|
kaf24@4138
|
371 movl TRAPBOUNCE_error_code(%rdx),%eax
|
kaf24@4138
|
372 FLT7: movq %rax,(%rsi) # ERROR CODE
|
kaf24@3754
|
373 testb $TBF_EXCEPTION_CR2,%cl
|
kaf24@3754
|
374 jz 2f
|
kaf24@3754
|
375 subq $8,%rsi
|
kaf24@3754
|
376 movq TRAPBOUNCE_cr2(%rdx),%rax
|
kaf24@4138
|
377 FLT8: movq %rax,(%rsi) # CR2
|
kaf24@3754
|
378 1: testb $TBF_FAILSAFE,%cl
|
kaf24@3754
|
379 jz 2f
|
kaf24@3754
|
380 subq $32,%rsi
|
kaf24@3754
|
381 movl %gs,%eax
|
kaf24@4138
|
382 FLT9: movq %rax,24(%rsi) # GS
|
kaf24@3754
|
383 movl %fs,%eax
|
kaf24@4138
|
384 FLT10: movq %rax,16(%rsi) # FS
|
kaf24@3754
|
385 movl %es,%eax
|
kaf24@4138
|
386 FLT11: movq %rax,8(%rsi) # ES
|
kaf24@3754
|
387 movl %ds,%eax
|
kaf24@4138
|
388 FLT12: movq %rax,(%rsi) # DS
|
kaf24@3754
|
389 2: subq $16,%rsi
|
kaf24@4683
|
390 movq UREGS_r11+8(%rsp),%rax
|
kaf24@4450
|
391 FLT13: movq %rax,8(%rsi) # R11
|
kaf24@4683
|
392 movq UREGS_rcx+8(%rsp),%rax
|
kaf24@4138
|
393 FLT14: movq %rax,(%rsi) # RCX
|
kaf24@3754
|
394 /* Rewrite our stack frame and return to guest-OS mode. */
|
kaf24@3754
|
395 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
|
kaf24@5712
|
396 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
|
kaf24@4683
|
397 andl $0xfffcbeff,UREGS_eflags+8(%rsp)
|
kaf24@4683
|
398 movq $__GUEST_SS,UREGS_ss+8(%rsp)
|
kaf24@4683
|
399 movq %rsi,UREGS_rsp+8(%rsp)
|
kaf24@4683
|
400 movq $__GUEST_CS,UREGS_cs+8(%rsp)
|
kaf24@3754
|
401 movq TRAPBOUNCE_eip(%rdx),%rax
|
kaf24@5431
|
402 testq %rax,%rax
|
kaf24@5431
|
403 jz domain_crash_synchronous
|
kaf24@4683
|
404 movq %rax,UREGS_rip+8(%rsp)
|
kaf24@3828
|
405 movb $0,TRAPBOUNCE_flags(%rdx)
|
kaf24@3754
|
406 ret
|
kaf24@3754
|
407 .section __ex_table,"a"
|
kaf24@4325
|
408 .quad FLT2,domain_crash_synchronous , FLT3,domain_crash_synchronous
|
kaf24@4325
|
409 .quad FLT4,domain_crash_synchronous , FLT5,domain_crash_synchronous
|
kaf24@4325
|
410 .quad FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
|
kaf24@4325
|
411 .quad FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
|
kaf24@4325
|
412 .quad FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
|
kaf24@4325
|
413 .quad FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
|
kaf24@4325
|
414 .quad FLT14,domain_crash_synchronous
|
kaf24@3754
|
415 .previous
|
kaf24@3754
|
416
|
kaf24@3754
|
417 ALIGN
|
kaf24@5289
|
418 /* %rbx: struct vcpu */
|
kaf24@3754
|
419 process_guest_exception_and_events:
|
kaf24@5289
|
420 leaq VCPU_trap_bounce(%rbx),%rdx
|
kaf24@3754
|
421 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
|
kaf24@3754
|
422 jz test_all_events
|
kaf24@3754
|
423 call create_bounce_frame
|
kaf24@3754
|
424 jmp test_all_events
|
kaf24@3754
|
425
|
kaf24@3754
|
426 ALIGN
|
kaf24@3828
|
427 /* No special register assumptions. */
|
kaf24@3621
|
428 ENTRY(ret_from_intr)
|
kaf24@3754
|
429 GET_CURRENT(%rbx)
|
kaf24@4683
|
430 testb $3,UREGS_cs(%rsp)
|
kaf24@3754
|
431 jnz test_all_events
|
kaf24@3754
|
432 jmp restore_all_xen
|
kaf24@3621
|
433
|
kaf24@3754
|
434 ALIGN
|
kaf24@3828
|
435 /* No special register assumptions. */
|
kaf24@3621
|
436 error_code:
|
kaf24@3621
|
437 SAVE_ALL
|
kaf24@4683
|
438 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
|
kaf24@3754
|
439 jz exception_with_ints_disabled
|
kaf24@4103
|
440 sti
|
kaf24@3621
|
441 movq %rsp,%rdi
|
kaf24@4683
|
442 movl UREGS_entry_vector(%rsp),%eax
|
kaf24@4700
|
443 leaq exception_table(%rip),%rdx
|
kaf24@3788
|
444 GET_CURRENT(%rbx)
|
kaf24@3958
|
445 PERFC_INCR(PERFC_exceptions, %rax)
|
kaf24@3621
|
446 callq *(%rdx,%rax,8)
|
kaf24@4683
|
447 testb $3,UREGS_cs(%rsp)
|
kaf24@3788
|
448 jz restore_all_xen
|
kaf24@3788
|
449 jmp process_guest_exception_and_events
|
kaf24@3621
|
450
|
kaf24@3828
|
451 /* No special register assumptions. */
|
kaf24@3754
|
452 exception_with_ints_disabled:
|
kaf24@4683
|
453 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
|
kaf24@4103
|
454 jnz FATAL_exception_with_ints_disabled
|
kaf24@3797
|
455 movq %rsp,%rdi
|
kaf24@3754
|
456 call search_pre_exception_table
|
kaf24@3754
|
457 testq %rax,%rax # no fixup code for faulting EIP?
|
kaf24@3754
|
458 jz FATAL_exception_with_ints_disabled
|
kaf24@4683
|
459 movq %rax,UREGS_rip(%rsp)
|
kaf24@4683
|
460 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
|
kaf24@4683
|
461 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
|
kaf24@3788
|
462 jz 1f # then there is a pad quadword already
|
kaf24@3754
|
463 movq %rsp,%rsi
|
kaf24@3754
|
464 subq $8,%rsp
|
kaf24@3754
|
465 movq %rsp,%rdi
|
kaf24@4683
|
466 movq $UREGS_kernel_sizeof/8,%rcx
|
kaf24@3788
|
467 rep; movsq # make room for ec/ev
|
kaf24@4683
|
468 1: movq UREGS_error_code(%rsp),%rax # ec/ev
|
kaf24@4683
|
469 movq %rax,UREGS_kernel_sizeof(%rsp)
|
kaf24@3754
|
470 jmp restore_all_xen # return to fixup code
|
kaf24@3754
|
471
|
kaf24@3828
|
472 /* No special register assumptions. */
|
kaf24@3754
|
473 FATAL_exception_with_ints_disabled:
|
kaf24@4683
|
474 movl UREGS_entry_vector(%rsp),%edi
|
kaf24@3754
|
475 movq %rsp,%rsi
|
kaf24@4700
|
476 call fatal_trap
|
kaf24@3754
|
477 ud2
|
kaf24@3754
|
478
|
kaf24@3621
|
479 ENTRY(divide_error)
|
kaf24@3621
|
480 pushq $0
|
kaf24@3621
|
481 movl $TRAP_divide_error,4(%rsp)
|
kaf24@3621
|
482 jmp error_code
|
kaf24@3621
|
483
|
kaf24@3621
|
484 ENTRY(coprocessor_error)
|
kaf24@3621
|
485 pushq $0
|
kaf24@3621
|
486 movl $TRAP_copro_error,4(%rsp)
|
kaf24@3621
|
487 jmp error_code
|
kaf24@3621
|
488
|
kaf24@3621
|
489 ENTRY(simd_coprocessor_error)
|
kaf24@3621
|
490 pushq $0
|
kaf24@3621
|
491 movl $TRAP_simd_error,4(%rsp)
|
kaf24@3621
|
492 jmp error_code
|
kaf24@3621
|
493
|
kaf24@3621
|
494 ENTRY(device_not_available)
|
kaf24@3621
|
495 pushq $0
|
kaf24@3621
|
496 movl $TRAP_no_device,4(%rsp)
|
kaf24@3621
|
497 jmp error_code
|
kaf24@3621
|
498
|
kaf24@3621
|
499 ENTRY(debug)
|
kaf24@3621
|
500 pushq $0
|
kaf24@3621
|
501 movl $TRAP_debug,4(%rsp)
|
kaf24@3621
|
502 jmp error_code
|
kaf24@3621
|
503
|
kaf24@3621
|
504 ENTRY(int3)
|
kaf24@3621
|
505 pushq $0
|
kaf24@3621
|
506 movl $TRAP_int3,4(%rsp)
|
kaf24@3621
|
507 jmp error_code
|
kaf24@3621
|
508
|
kaf24@3621
|
509 ENTRY(overflow)
|
kaf24@3621
|
510 pushq $0
|
kaf24@3621
|
511 movl $TRAP_overflow,4(%rsp)
|
kaf24@3621
|
512 jmp error_code
|
kaf24@3621
|
513
|
kaf24@3621
|
514 ENTRY(bounds)
|
kaf24@3621
|
515 pushq $0
|
kaf24@3621
|
516 movl $TRAP_bounds,4(%rsp)
|
kaf24@3621
|
517 jmp error_code
|
kaf24@3621
|
518
|
kaf24@3621
|
519 ENTRY(invalid_op)
|
kaf24@3621
|
520 pushq $0
|
kaf24@3621
|
521 movl $TRAP_invalid_op,4(%rsp)
|
kaf24@3621
|
522 jmp error_code
|
kaf24@3621
|
523
|
kaf24@3621
|
524 ENTRY(coprocessor_segment_overrun)
|
kaf24@3621
|
525 pushq $0
|
kaf24@3621
|
526 movl $TRAP_copro_seg,4(%rsp)
|
kaf24@3621
|
527 jmp error_code
|
kaf24@3621
|
528
|
kaf24@3621
|
529 ENTRY(invalid_TSS)
|
kaf24@3621
|
530 movl $TRAP_invalid_tss,4(%rsp)
|
kaf24@3621
|
531 jmp error_code
|
kaf24@3621
|
532
|
kaf24@3621
|
533 ENTRY(segment_not_present)
|
kaf24@3621
|
534 movl $TRAP_no_segment,4(%rsp)
|
kaf24@3621
|
535 jmp error_code
|
kaf24@3621
|
536
|
kaf24@3621
|
537 ENTRY(stack_segment)
|
kaf24@3621
|
538 movl $TRAP_stack_error,4(%rsp)
|
kaf24@3621
|
539 jmp error_code
|
kaf24@3621
|
540
|
kaf24@3621
|
541 ENTRY(general_protection)
|
kaf24@3621
|
542 movl $TRAP_gp_fault,4(%rsp)
|
kaf24@3621
|
543 jmp error_code
|
kaf24@3621
|
544
|
kaf24@3621
|
545 ENTRY(alignment_check)
|
kaf24@3621
|
546 movl $TRAP_alignment_check,4(%rsp)
|
kaf24@3621
|
547 jmp error_code
|
kaf24@3621
|
548
|
kaf24@3621
|
549 ENTRY(page_fault)
|
kaf24@3621
|
550 movl $TRAP_page_fault,4(%rsp)
|
kaf24@3621
|
551 jmp error_code
|
kaf24@3621
|
552
|
kaf24@3621
|
553 ENTRY(machine_check)
|
kaf24@3621
|
554 pushq $0
|
kaf24@3621
|
555 movl $TRAP_machine_check,4(%rsp)
|
kaf24@3621
|
556 jmp error_code
|
kaf24@3621
|
557
|
kaf24@3621
|
558 ENTRY(spurious_interrupt_bug)
|
kaf24@3621
|
559 pushq $0
|
kaf24@3621
|
560 movl $TRAP_spurious_int,4(%rsp)
|
kaf24@3621
|
561 jmp error_code
|
kaf24@3621
|
562
|
kaf24@3630
|
563 ENTRY(double_fault)
|
kaf24@3630
|
564 movl $TRAP_double_fault,4(%rsp)
|
kaf24@3630
|
565 jmp error_code
|
kaf24@3630
|
566
|
kaf24@3621
|
567 ENTRY(nmi)
|
kaf24@3695
|
568 pushq $0
|
kaf24@3695
|
569 SAVE_ALL
|
kaf24@3695
|
570 inb $0x61,%al
|
kaf24@3695
|
571 movl %eax,%esi # reason
|
kaf24@3716
|
572 movq %rsp,%rdi # regs
|
kaf24@4700
|
573 call do_nmi
|
kaf24@3695
|
574 jmp restore_all_xen
|
kaf24@3621
|
575
|
kaf24@4696
|
576 do_arch_sched_op:
|
kaf24@4696
|
577 # Ensure we return success even if we return via schedule_tail()
|
kaf24@4696
|
578 xorl %eax,%eax
|
kaf24@6452
|
579 GET_GUEST_REGS(%r10)
|
kaf24@6452
|
580 movq %rax,UREGS_rax(%r10)
|
kaf24@4700
|
581 jmp do_sched_op
|
kaf24@4696
|
582
|
kaf24@3621
|
583 .data
|
kaf24@3621
|
584
|
kaf24@3621
|
585 ENTRY(exception_table)
|
kaf24@4700
|
586 .quad do_divide_error
|
kaf24@4700
|
587 .quad do_debug
|
kaf24@3621
|
588 .quad 0 # nmi
|
kaf24@4700
|
589 .quad do_int3
|
kaf24@4700
|
590 .quad do_overflow
|
kaf24@4700
|
591 .quad do_bounds
|
kaf24@4700
|
592 .quad do_invalid_op
|
kaf24@4700
|
593 .quad math_state_restore
|
kaf24@4700
|
594 .quad do_double_fault
|
kaf24@4700
|
595 .quad do_coprocessor_segment_overrun
|
kaf24@4700
|
596 .quad do_invalid_TSS
|
kaf24@4700
|
597 .quad do_segment_not_present
|
kaf24@4700
|
598 .quad do_stack_segment
|
kaf24@4700
|
599 .quad do_general_protection
|
kaf24@4700
|
600 .quad do_page_fault
|
kaf24@4700
|
601 .quad do_spurious_interrupt_bug
|
kaf24@4700
|
602 .quad do_coprocessor_error
|
kaf24@4700
|
603 .quad do_alignment_check
|
kaf24@4700
|
604 .quad do_machine_check
|
kaf24@4700
|
605 .quad do_simd_coprocessor_error
|
kaf24@3650
|
606
|
kaf24@3650
|
607 ENTRY(hypercall_table)
|
kaf24@4700
|
608 .quad do_set_trap_table /* 0 */
|
kaf24@4700
|
609 .quad do_mmu_update
|
kaf24@4700
|
610 .quad do_set_gdt
|
kaf24@4700
|
611 .quad do_stack_switch
|
kaf24@4700
|
612 .quad do_set_callbacks
|
kaf24@4700
|
613 .quad do_fpu_taskswitch /* 5 */
|
kaf24@4700
|
614 .quad do_arch_sched_op
|
kaf24@4700
|
615 .quad do_dom0_op
|
kaf24@4700
|
616 .quad do_set_debugreg
|
kaf24@4700
|
617 .quad do_get_debugreg
|
kaf24@4700
|
618 .quad do_update_descriptor /* 10 */
|
kaf24@4700
|
619 .quad do_ni_hypercall
|
kaf24@6468
|
620 .quad do_memory_op
|
kaf24@4700
|
621 .quad do_multicall
|
kaf24@4700
|
622 .quad do_update_va_mapping
|
kaf24@4700
|
623 .quad do_set_timer_op /* 15 */
|
kaf24@4700
|
624 .quad do_event_channel_op
|
kaf24@4700
|
625 .quad do_xen_version
|
kaf24@4700
|
626 .quad do_console_io
|
kaf24@4700
|
627 .quad do_physdev_op
|
kaf24@4700
|
628 .quad do_grant_table_op /* 20 */
|
kaf24@4700
|
629 .quad do_vm_assist
|
kaf24@4700
|
630 .quad do_update_va_mapping_otherdomain
|
kaf24@4700
|
631 .quad do_switch_to_user
|
kaf24@4700
|
632 .quad do_boot_vcpu
|
kaf24@4700
|
633 .quad do_set_segment_base /* 25 */
|
kaf24@4700
|
634 .quad do_mmuext_op
|
smh22@5930
|
635 .quad do_acm_op
|
kaf24@3650
|
636 .rept NR_hypercalls-((.-hypercall_table)/4)
|
kaf24@4700
|
637 .quad do_ni_hypercall
|
kaf24@3650
|
638 .endr
|
kaf24@6452
|
639
|
kaf24@6452
|
640 ENTRY(hypercall_args_table)
|
kaf24@6452
|
641 .byte 1 /* do_set_trap_table */ /* 0 */
|
kaf24@6452
|
642 .byte 4 /* do_mmu_update */
|
kaf24@6452
|
643 .byte 2 /* do_set_gdt */
|
kaf24@6452
|
644 .byte 2 /* do_stack_switch */
|
kaf24@6452
|
645 .byte 3 /* do_set_callbacks */
|
kaf24@6452
|
646 .byte 1 /* do_fpu_taskswitch */ /* 5 */
|
kaf24@6452
|
647 .byte 2 /* do_arch_sched_op */
|
kaf24@6452
|
648 .byte 1 /* do_dom0_op */
|
kaf24@6452
|
649 .byte 2 /* do_set_debugreg */
|
kaf24@6452
|
650 .byte 1 /* do_get_debugreg */
|
kaf24@6452
|
651 .byte 2 /* do_update_descriptor */ /* 10 */
|
kaf24@6452
|
652 .byte 0 /* do_ni_hypercall */
|
kaf24@6468
|
653 .byte 2 /* do_memory_op */
|
kaf24@6452
|
654 .byte 2 /* do_multicall */
|
kaf24@6452
|
655 .byte 3 /* do_update_va_mapping */
|
kaf24@6452
|
656 .byte 1 /* do_set_timer_op */ /* 15 */
|
kaf24@6452
|
657 .byte 1 /* do_event_channel_op */
|
kaf24@6734
|
658 .byte 2 /* do_xen_version */
|
kaf24@6452
|
659 .byte 3 /* do_console_io */
|
kaf24@6452
|
660 .byte 1 /* do_physdev_op */
|
kaf24@6452
|
661 .byte 3 /* do_grant_table_op */ /* 20 */
|
kaf24@6452
|
662 .byte 2 /* do_vm_assist */
|
kaf24@6452
|
663 .byte 4 /* do_update_va_mapping_otherdomain */
|
kaf24@6452
|
664 .byte 0 /* do_switch_to_user */
|
kaf24@6452
|
665 .byte 2 /* do_boot_vcpu */
|
kaf24@6452
|
666 .byte 2 /* do_set_segment_base */ /* 25 */
|
kaf24@6452
|
667 .byte 4 /* do_mmuext_op */
|
kaf24@6452
|
668 .byte 1 /* do_acm_op */
|
kaf24@6452
|
669 .rept NR_hypercalls-(.-hypercall_args_table)
|
kaf24@6452
|
670 .byte 0 /* do_ni_hypercall */
|
kaf24@6452
|
671 .endr
|