rev |
line source |
iap10@3290
|
1 /*
|
iap10@3290
|
2 * vmx.c: handling VMX architecture-related VM exits
|
iap10@3290
|
3 * Copyright (c) 2004, Intel Corporation.
|
iap10@3290
|
4 *
|
iap10@3290
|
5 * This program is free software; you can redistribute it and/or modify it
|
iap10@3290
|
6 * under the terms and conditions of the GNU General Public License,
|
iap10@3290
|
7 * version 2, as published by the Free Software Foundation.
|
iap10@3290
|
8 *
|
iap10@3290
|
9 * This program is distributed in the hope it will be useful, but WITHOUT
|
iap10@3290
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
iap10@3290
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
iap10@3290
|
12 * more details.
|
iap10@3290
|
13 *
|
iap10@3290
|
14 * You should have received a copy of the GNU General Public License along with
|
iap10@3290
|
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
iap10@3290
|
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
|
iap10@3290
|
17 *
|
iap10@3290
|
18 */
|
iap10@3290
|
19
|
iap10@3290
|
20 #include <xen/config.h>
|
iap10@3290
|
21 #include <xen/init.h>
|
iap10@3290
|
22 #include <xen/lib.h>
|
iap10@3807
|
23 #include <xen/trace.h>
|
iap10@3290
|
24 #include <xen/sched.h>
|
kaf24@5146
|
25 #include <xen/irq.h>
|
kaf24@4325
|
26 #include <xen/softirq.h>
|
kaf24@5356
|
27 #include <xen/domain_page.h>
|
iap10@3290
|
28 #include <asm/current.h>
|
iap10@3290
|
29 #include <asm/io.h>
|
iap10@3290
|
30 #include <asm/shadow.h>
|
iap10@3290
|
31 #include <asm/regs.h>
|
iap10@3290
|
32 #include <asm/cpufeature.h>
|
iap10@3290
|
33 #include <asm/processor.h>
|
iap10@3290
|
34 #include <asm/types.h>
|
iap10@3290
|
35 #include <asm/msr.h>
|
iap10@3290
|
36 #include <asm/spinlock.h>
|
iap10@3290
|
37 #include <asm/vmx.h>
|
iap10@3290
|
38 #include <asm/vmx_vmcs.h>
|
iap10@3749
|
39 #include <asm/vmx_intercept.h>
|
iap10@3823
|
40 #include <asm/shadow.h>
|
kaf24@5909
|
41 #if CONFIG_PAGING_LEVELS >= 3
|
kaf24@5722
|
42 #include <asm/shadow_64.h>
|
kaf24@5722
|
43 #endif
|
kaf24@5722
|
44
|
iap10@3290
|
45 #include <public/io/ioreq.h>
|
iap10@3290
|
46
|
kaf24@6705
|
47 int hvm_enabled;
|
kaf24@6705
|
48
|
mafetter@3717
|
49 #ifdef CONFIG_VMX
|
mafetter@3717
|
50
|
iap10@3290
|
51 int vmcs_size;
|
maf46@3855
|
52 unsigned int opt_vmx_debug_level = 0;
|
arun@5133
|
53 integer_param("vmx_debug", opt_vmx_debug_level);
|
iap10@3290
|
54
|
adsharma@6535
|
55 #ifdef TRACE_BUFFER
|
adsharma@6535
|
56 static unsigned long trace_values[NR_CPUS][4];
|
adsharma@6535
|
57 #define TRACE_VMEXIT(index,value) trace_values[current->processor][index]=value
|
adsharma@6535
|
58 #else
|
adsharma@6535
|
59 #define TRACE_VMEXIT(index,value) ((void)0)
|
adsharma@6535
|
60 #endif
|
adsharma@6535
|
61
|
kaf24@5659
|
62 #ifdef __x86_64__
|
kaf24@5659
|
63 static struct msr_state percpu_msr[NR_CPUS];
|
kaf24@5659
|
64
|
kaf24@5659
|
65 static u32 msr_data_index[VMX_MSR_COUNT] =
|
kaf24@5659
|
66 {
|
kaf24@5659
|
67 MSR_LSTAR, MSR_STAR, MSR_CSTAR,
|
kaf24@5659
|
68 MSR_SYSCALL_MASK, MSR_EFER,
|
kaf24@5659
|
69 };
|
kaf24@5659
|
70
|
kaf24@5659
|
71 /*
|
kaf24@5659
|
72 * To avoid MSR save/restore at every VM exit/entry time, we restore
|
kaf24@5659
|
73 * the x86_64 specific MSRs at domain switch time. Since those MSRs are
|
kaf24@5659
|
74 * are not modified once set for generic domains, we don't save them,
|
kaf24@5659
|
75 * but simply reset them to the values set at percpu_traps_init().
|
kaf24@5659
|
76 */
|
kaf24@6199
|
77 void vmx_load_msrs(struct vcpu *n)
|
kaf24@5659
|
78 {
|
kaf24@5659
|
79 struct msr_state *host_state;
|
kaf24@5659
|
80 host_state = &percpu_msr[smp_processor_id()];
|
kaf24@5659
|
81
|
kaf24@5659
|
82 while (host_state->flags){
|
kaf24@5659
|
83 int i;
|
kaf24@5659
|
84
|
kaf24@5659
|
85 i = find_first_set_bit(host_state->flags);
|
kaf24@5659
|
86 wrmsrl(msr_data_index[i], host_state->msr_items[i]);
|
kaf24@5659
|
87 clear_bit(i, &host_state->flags);
|
kaf24@5659
|
88 }
|
kaf24@5659
|
89 }
|
kaf24@5659
|
90
|
kaf24@5659
|
91 static void vmx_save_init_msrs(void)
|
kaf24@5659
|
92 {
|
kaf24@5659
|
93 struct msr_state *host_state;
|
kaf24@5659
|
94 host_state = &percpu_msr[smp_processor_id()];
|
kaf24@5659
|
95 int i;
|
kaf24@5659
|
96
|
kaf24@5659
|
97 for (i = 0; i < VMX_MSR_COUNT; i++)
|
kaf24@5659
|
98 rdmsrl(msr_data_index[i], host_state->msr_items[i]);
|
kaf24@5659
|
99 }
|
kaf24@5659
|
100
|
kaf24@5659
|
101 #define CASE_READ_MSR(address) \
|
kaf24@5659
|
102 case MSR_ ## address: \
|
kaf24@5659
|
103 msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
|
kaf24@5659
|
104 break
|
kaf24@5659
|
105
|
kaf24@5819
|
106 #define CASE_WRITE_MSR(address) \
|
kaf24@5819
|
107 case MSR_ ## address: \
|
kaf24@5819
|
108 { \
|
kaf24@5819
|
109 msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content; \
|
kaf24@5819
|
110 if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) { \
|
kaf24@5819
|
111 set_bit(VMX_INDEX_MSR_ ## address, &msr->flags); \
|
kaf24@5819
|
112 } \
|
kaf24@5819
|
113 wrmsrl(MSR_ ## address, msr_content); \
|
kaf24@5819
|
114 set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags); \
|
kaf24@5819
|
115 } \
|
kaf24@5659
|
116 break
|
kaf24@5659
|
117
|
kaf24@5659
|
118 #define IS_CANO_ADDRESS(add) 1
|
kaf24@5659
|
119 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
|
kaf24@5659
|
120 {
|
kaf24@5659
|
121 u64 msr_content = 0;
|
kaf24@5659
|
122 struct vcpu *vc = current;
|
kaf24@5659
|
123 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
|
kaf24@5659
|
124 switch(regs->ecx){
|
kaf24@6730
|
125 case MSR_EFER:
|
kaf24@6730
|
126 msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
|
kaf24@6730
|
127 VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content);
|
kaf24@6730
|
128 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
|
kaf24@6730
|
129 &vc->arch.arch_vmx.cpu_state))
|
kaf24@6730
|
130 msr_content |= 1 << _EFER_LME;
|
kaf24@5659
|
131
|
kaf24@6730
|
132 if (VMX_LONG_GUEST(vc))
|
kaf24@6730
|
133 msr_content |= 1 << _EFER_LMA;
|
kaf24@6730
|
134 break;
|
kaf24@6730
|
135 case MSR_FS_BASE:
|
kaf24@6730
|
136 if (!(VMX_LONG_GUEST(vc)))
|
kaf24@6730
|
137 /* XXX should it be GP fault */
|
kaf24@6730
|
138 domain_crash();
|
kaf24@6730
|
139 __vmread(GUEST_FS_BASE, &msr_content);
|
kaf24@6730
|
140 break;
|
kaf24@6730
|
141 case MSR_GS_BASE:
|
kaf24@6730
|
142 if (!(VMX_LONG_GUEST(vc)))
|
kaf24@6730
|
143 domain_crash();
|
kaf24@6730
|
144 __vmread(GUEST_GS_BASE, &msr_content);
|
kaf24@6730
|
145 break;
|
kaf24@6730
|
146 case MSR_SHADOW_GS_BASE:
|
kaf24@6730
|
147 msr_content = msr->shadow_gs;
|
kaf24@6730
|
148 break;
|
kaf24@5659
|
149
|
kaf24@5659
|
150 CASE_READ_MSR(STAR);
|
kaf24@5659
|
151 CASE_READ_MSR(LSTAR);
|
kaf24@5659
|
152 CASE_READ_MSR(CSTAR);
|
kaf24@5659
|
153 CASE_READ_MSR(SYSCALL_MASK);
|
kaf24@6730
|
154 default:
|
kaf24@6730
|
155 return 0;
|
kaf24@5659
|
156 }
|
kaf24@5659
|
157 VMX_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", msr_content);
|
kaf24@5659
|
158 regs->eax = msr_content & 0xffffffff;
|
kaf24@5659
|
159 regs->edx = msr_content >> 32;
|
kaf24@5659
|
160 return 1;
|
kaf24@5659
|
161 }
|
kaf24@5659
|
162
|
kaf24@5659
|
163 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
|
kaf24@5659
|
164 {
|
kaf24@5659
|
165 u64 msr_content = regs->eax | ((u64)regs->edx << 32);
|
kaf24@5659
|
166 struct vcpu *vc = current;
|
kaf24@5659
|
167 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
|
kaf24@5659
|
168 struct msr_state * host_state =
|
kaf24@6730
|
169 &percpu_msr[smp_processor_id()];
|
kaf24@5659
|
170
|
kaf24@5659
|
171 VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n",
|
kaf24@5659
|
172 regs->ecx, msr_content);
|
kaf24@5659
|
173
|
kaf24@5659
|
174 switch (regs->ecx){
|
kaf24@6730
|
175 case MSR_EFER:
|
kaf24@6730
|
176 if ((msr_content & EFER_LME) ^
|
kaf24@6730
|
177 test_bit(VMX_CPU_STATE_LME_ENABLED,
|
kaf24@6730
|
178 &vc->arch.arch_vmx.cpu_state)){
|
kaf24@6730
|
179 if (test_bit(VMX_CPU_STATE_PG_ENABLED,
|
kaf24@6730
|
180 &vc->arch.arch_vmx.cpu_state) ||
|
kaf24@6730
|
181 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
|
kaf24@6730
|
182 &vc->arch.arch_vmx.cpu_state)){
|
kaf24@6730
|
183 vmx_inject_exception(vc, TRAP_gp_fault, 0);
|
kaf24@5659
|
184 }
|
kaf24@6730
|
185 }
|
kaf24@6730
|
186 if (msr_content & EFER_LME)
|
kaf24@6730
|
187 set_bit(VMX_CPU_STATE_LME_ENABLED,
|
kaf24@6730
|
188 &vc->arch.arch_vmx.cpu_state);
|
kaf24@6730
|
189 /* No update for LME/LMA since it have no effect */
|
kaf24@6730
|
190 msr->msr_items[VMX_INDEX_MSR_EFER] =
|
kaf24@6730
|
191 msr_content;
|
kaf24@6730
|
192 if (msr_content & ~(EFER_LME | EFER_LMA)){
|
kaf24@6730
|
193 msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
|
kaf24@6730
|
194 if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){
|
kaf24@6730
|
195 rdmsrl(MSR_EFER,
|
kaf24@6730
|
196 host_state->msr_items[VMX_INDEX_MSR_EFER]);
|
kaf24@6730
|
197 set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
|
kaf24@6730
|
198 set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
|
kaf24@6730
|
199 wrmsrl(MSR_EFER, msr_content);
|
kaf24@5659
|
200 }
|
kaf24@6730
|
201 }
|
kaf24@6730
|
202 break;
|
kaf24@5659
|
203
|
kaf24@6730
|
204 case MSR_FS_BASE:
|
kaf24@6730
|
205 case MSR_GS_BASE:
|
kaf24@6730
|
206 if (!(VMX_LONG_GUEST(vc)))
|
kaf24@6730
|
207 domain_crash();
|
kaf24@6730
|
208 if (!IS_CANO_ADDRESS(msr_content)){
|
kaf24@6730
|
209 VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
|
kaf24@6730
|
210 vmx_inject_exception(vc, TRAP_gp_fault, 0);
|
kaf24@6730
|
211 }
|
kaf24@6730
|
212 if (regs->ecx == MSR_FS_BASE)
|
kaf24@6730
|
213 __vmwrite(GUEST_FS_BASE, msr_content);
|
kaf24@6730
|
214 else
|
kaf24@6730
|
215 __vmwrite(GUEST_GS_BASE, msr_content);
|
kaf24@6730
|
216 break;
|
kaf24@5659
|
217
|
kaf24@6730
|
218 case MSR_SHADOW_GS_BASE:
|
kaf24@6730
|
219 if (!(VMX_LONG_GUEST(vc)))
|
kaf24@6730
|
220 domain_crash();
|
kaf24@6730
|
221 vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
|
kaf24@6730
|
222 wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
|
kaf24@6730
|
223 break;
|
kaf24@5659
|
224
|
kaf24@6730
|
225 CASE_WRITE_MSR(STAR);
|
kaf24@6730
|
226 CASE_WRITE_MSR(LSTAR);
|
kaf24@6730
|
227 CASE_WRITE_MSR(CSTAR);
|
kaf24@6730
|
228 CASE_WRITE_MSR(SYSCALL_MASK);
|
kaf24@6730
|
229 default:
|
kaf24@6730
|
230 return 0;
|
kaf24@5659
|
231 }
|
kaf24@5659
|
232 return 1;
|
kaf24@5659
|
233 }
|
kaf24@5659
|
234
|
kaf24@5659
|
235 void
|
kaf24@5659
|
236 vmx_restore_msrs(struct vcpu *d)
|
kaf24@5659
|
237 {
|
kaf24@5659
|
238 int i = 0;
|
kaf24@5659
|
239 struct msr_state *guest_state;
|
kaf24@5659
|
240 struct msr_state *host_state;
|
kaf24@5659
|
241 unsigned long guest_flags ;
|
kaf24@5659
|
242
|
kaf24@5659
|
243 guest_state = &d->arch.arch_vmx.msr_content;;
|
kaf24@5659
|
244 host_state = &percpu_msr[smp_processor_id()];
|
kaf24@5659
|
245
|
kaf24@5659
|
246 wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
|
kaf24@5659
|
247 guest_flags = guest_state->flags;
|
kaf24@5659
|
248 if (!guest_flags)
|
kaf24@5659
|
249 return;
|
kaf24@5659
|
250
|
kaf24@5659
|
251 while (guest_flags){
|
kaf24@5659
|
252 i = find_first_set_bit(guest_flags);
|
kaf24@5659
|
253
|
kaf24@5659
|
254 VMX_DBG_LOG(DBG_LEVEL_2,
|
kaf24@6730
|
255 "restore guest's index %d msr %lx with %lx\n",
|
kaf24@6730
|
256 i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]);
|
kaf24@5659
|
257 set_bit(i, &host_state->flags);
|
kaf24@5659
|
258 wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
|
kaf24@5659
|
259 clear_bit(i, &guest_flags);
|
kaf24@5659
|
260 }
|
kaf24@5659
|
261 }
|
kaf24@5659
|
262
|
kaf24@5659
|
263 #else /* __i386__ */
|
kaf24@5659
|
264 #define vmx_save_init_msrs() ((void)0)
|
kaf24@5659
|
265
|
kaf24@5659
|
266 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs){
|
kaf24@5659
|
267 return 0;
|
kaf24@5659
|
268 }
|
kaf24@5659
|
269 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs){
|
kaf24@5659
|
270 return 0;
|
kaf24@5659
|
271 }
|
kaf24@5659
|
272 #endif
|
kaf24@5659
|
273
|
kaf24@3356
|
274 extern long evtchn_send(int lport);
|
kaf24@3356
|
275 extern long do_block(void);
|
kaf24@4683
|
276 void do_nmi(struct cpu_user_regs *, unsigned long);
|
iap10@3566
|
277
|
kaf24@5775
|
278 static int check_vmx_controls(ctrls, msr)
|
kaf24@5775
|
279 {
|
kaf24@5775
|
280 u32 vmx_msr_low, vmx_msr_high;
|
kaf24@5775
|
281
|
kaf24@5775
|
282 rdmsr(msr, vmx_msr_low, vmx_msr_high);
|
kaf24@5775
|
283 if (ctrls < vmx_msr_low || ctrls > vmx_msr_high) {
|
kaf24@5775
|
284 printk("Insufficient VMX capability 0x%x, "
|
kaf24@5775
|
285 "msr=0x%x,low=0x%8x,high=0x%x\n",
|
kaf24@5775
|
286 ctrls, msr, vmx_msr_low, vmx_msr_high);
|
kaf24@5775
|
287 return 0;
|
kaf24@5775
|
288 }
|
kaf24@5775
|
289 return 1;
|
kaf24@5775
|
290 }
|
kaf24@5775
|
291
|
kaf24@5146
|
292 int start_vmx(void)
|
iap10@3290
|
293 {
|
iap10@3290
|
294 struct vmcs_struct *vmcs;
|
kaf24@5059
|
295 u32 ecx;
|
kaf24@5059
|
296 u32 eax, edx;
|
iap10@3290
|
297 u64 phys_vmcs; /* debugging */
|
iap10@3290
|
298
|
iap10@3290
|
299 /*
|
iap10@3290
|
300 * Xen does not fill x86_capability words except 0.
|
iap10@3290
|
301 */
|
iap10@3290
|
302 ecx = cpuid_ecx(1);
|
iap10@3290
|
303 boot_cpu_data.x86_capability[4] = ecx;
|
iap10@3290
|
304
|
iap10@3290
|
305 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
|
iap10@3290
|
306 return 0;
|
kaf24@5059
|
307
|
kaf24@5059
|
308 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
|
kaf24@5059
|
309
|
kaf24@5059
|
310 if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
|
kaf24@5059
|
311 if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
|
kaf24@6730
|
312 printk("VMX disabled by Feature Control MSR.\n");
|
kaf24@6730
|
313 return 0;
|
kaf24@5059
|
314 }
|
kaf24@5059
|
315 }
|
kaf24@5146
|
316 else {
|
kaf24@5059
|
317 wrmsr(IA32_FEATURE_CONTROL_MSR,
|
kaf24@5146
|
318 IA32_FEATURE_CONTROL_MSR_LOCK |
|
kaf24@5146
|
319 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
|
kaf24@5146
|
320 }
|
iap10@3290
|
321
|
kaf24@5775
|
322 if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
|
kaf24@6730
|
323 MSR_IA32_VMX_PINBASED_CTLS_MSR))
|
kaf24@5775
|
324 return 0;
|
kaf24@5775
|
325 if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
|
kaf24@6730
|
326 MSR_IA32_VMX_PROCBASED_CTLS_MSR))
|
kaf24@5775
|
327 return 0;
|
kaf24@5775
|
328 if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
|
kaf24@6730
|
329 MSR_IA32_VMX_EXIT_CTLS_MSR))
|
kaf24@5775
|
330 return 0;
|
kaf24@5775
|
331 if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
|
kaf24@6730
|
332 MSR_IA32_VMX_ENTRY_CTLS_MSR))
|
kaf24@5775
|
333 return 0;
|
kaf24@5775
|
334
|
iap10@3290
|
335 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
|
iap10@3290
|
336
|
iap10@3290
|
337 if (!(vmcs = alloc_vmcs())) {
|
iap10@3290
|
338 printk("Failed to allocate VMCS\n");
|
iap10@3290
|
339 return 0;
|
iap10@3290
|
340 }
|
iap10@3290
|
341
|
iap10@3290
|
342 phys_vmcs = (u64) virt_to_phys(vmcs);
|
iap10@3290
|
343
|
iap10@3290
|
344 if (!(__vmxon(phys_vmcs))) {
|
iap10@3290
|
345 printk("VMXON is done\n");
|
iap10@3290
|
346 }
|
iap10@3290
|
347
|
kaf24@5659
|
348 vmx_save_init_msrs();
|
kaf24@5659
|
349
|
iap10@6703
|
350 hvm_enabled = 1;
|
iap10@6703
|
351
|
iap10@3290
|
352 return 1;
|
iap10@3290
|
353 }
|
iap10@3290
|
354
|
kaf24@5146
|
355 void stop_vmx(void)
|
iap10@3290
|
356 {
|
iap10@3388
|
357 if (read_cr4() & X86_CR4_VMXE)
|
iap10@3290
|
358 __vmxoff();
|
iap10@3290
|
359 }
|
iap10@3290
|
360
|
iap10@3290
|
361 /*
|
kaf24@5356
|
362 * Not all cases receive valid value in the VM-exit instruction length field.
|
iap10@3290
|
363 */
|
iap10@3290
|
364 #define __get_instruction_length(len) \
|
adsharma@6526
|
365 __vmread(VM_EXIT_INSTRUCTION_LEN, &(len)); \
|
iap10@3290
|
366 if ((len) < 1 || (len) > 15) \
|
iap10@3290
|
367 __vmx_bug(®s);
|
iap10@3290
|
368
|
iap10@3290
|
369 static void inline __update_guest_eip(unsigned long inst_len)
|
iap10@3290
|
370 {
|
iap10@3290
|
371 unsigned long current_eip;
|
iap10@3290
|
372
|
kaf24@5414
|
373 __vmread(GUEST_RIP, ¤t_eip);
|
kaf24@5414
|
374 __vmwrite(GUEST_RIP, current_eip + inst_len);
|
iap10@3290
|
375 }
|
iap10@3290
|
376
|
iap10@3290
|
377
|
kaf24@4683
|
378 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
|
iap10@3290
|
379 {
|
iap10@3756
|
380 unsigned long eip;
|
mafetter@4591
|
381 unsigned long gpa; /* FIXME: PAE */
|
iap10@3290
|
382 int result;
|
iap10@3290
|
383
|
iap10@3290
|
384 #if VMX_DEBUG
|
iap10@3290
|
385 {
|
kaf24@5414
|
386 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
387 VMX_DBG_LOG(DBG_LEVEL_VMMU,
|
kaf24@6730
|
388 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
|
kaf24@6730
|
389 va, eip, (unsigned long)regs->error_code);
|
iap10@3290
|
390 }
|
iap10@3290
|
391 #endif
|
iap10@3756
|
392
|
arun@5609
|
393 if (!vmx_paging_enabled(current)){
|
arun@5186
|
394 handle_mmio(va, va);
|
adsharma@6535
|
395 TRACE_VMEXIT (2,2);
|
arun@5609
|
396 return 1;
|
arun@5609
|
397 }
|
kaf24@5659
|
398 gpa = gva_to_gpa(va);
|
iap10@3290
|
399
|
arun@3909
|
400 /* Use 1:1 page table to identify MMIO address space */
|
kaf24@5659
|
401 if ( mmio_space(gpa) ){
|
kaf24@5659
|
402 if (gpa >= 0xFEE00000) { /* workaround for local APIC */
|
kaf24@5659
|
403 u32 inst_len;
|
adsharma@6526
|
404 __vmread(VM_EXIT_INSTRUCTION_LEN, &(inst_len));
|
kaf24@5659
|
405 __update_guest_eip(inst_len);
|
kaf24@5659
|
406 return 1;
|
kaf24@5659
|
407 }
|
adsharma@6535
|
408 TRACE_VMEXIT (2,2);
|
iap10@3756
|
409 handle_mmio(va, gpa);
|
arun@5609
|
410 return 1;
|
arun@5609
|
411 }
|
iap10@3290
|
412
|
maf46@3928
|
413 result = shadow_fault(va, regs);
|
adsharma@6535
|
414 TRACE_VMEXIT (2,result);
|
maf46@3928
|
415 #if 0
|
maf46@3928
|
416 if ( !result )
|
maf46@3928
|
417 {
|
kaf24@5414
|
418 __vmread(GUEST_RIP, &eip);
|
kaf24@6582
|
419 printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
|
maf46@3928
|
420 }
|
maf46@3928
|
421 #endif
|
maf46@3928
|
422
|
maf46@3928
|
423 return result;
|
iap10@3290
|
424 }
|
iap10@3290
|
425
|
kaf24@5146
|
426 static void vmx_do_no_device_fault(void)
|
cl349@4856
|
427 {
|
cl349@4856
|
428 unsigned long cr0;
|
cl349@4856
|
429
|
cl349@4856
|
430 clts();
|
cl349@4856
|
431 setup_fpu(current);
|
cl349@4856
|
432 __vmread(CR0_READ_SHADOW, &cr0);
|
cl349@4856
|
433 if (!(cr0 & X86_CR0_TS)) {
|
cl349@4856
|
434 __vmread(GUEST_CR0, &cr0);
|
cl349@4856
|
435 cr0 &= ~X86_CR0_TS;
|
cl349@4856
|
436 __vmwrite(GUEST_CR0, cr0);
|
cl349@4856
|
437 }
|
arun@4999
|
438 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
|
cl349@4856
|
439 }
|
cl349@4856
|
440
|
iap10@3290
|
441
|
kaf24@4683
|
442 static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs)
|
iap10@3290
|
443 {
|
riel@3992
|
444 unsigned int eax, ebx, ecx, edx;
|
iap10@3290
|
445 unsigned long eip;
|
iap10@3290
|
446
|
kaf24@5414
|
447 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
448
|
iap10@3290
|
449 VMX_DBG_LOG(DBG_LEVEL_1,
|
kaf24@3597
|
450 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
|
maf46@3855
|
451 " (esi) %lx, (edi) %lx",
|
kaf24@4654
|
452 (unsigned long)regs->eax, (unsigned long)regs->ebx,
|
kaf24@4654
|
453 (unsigned long)regs->ecx, (unsigned long)regs->edx,
|
kaf24@4654
|
454 (unsigned long)regs->esi, (unsigned long)regs->edi);
|
iap10@3290
|
455
|
iap10@3290
|
456 cpuid(input, &eax, &ebx, &ecx, &edx);
|
iap10@3290
|
457
|
iap10@3290
|
458 if (input == 1) {
|
kaf24@5659
|
459 #ifdef __i386__
|
iap10@3290
|
460 clear_bit(X86_FEATURE_PSE, &edx);
|
iap10@3290
|
461 clear_bit(X86_FEATURE_PAE, &edx);
|
iap10@3290
|
462 clear_bit(X86_FEATURE_PSE36, &edx);
|
kaf24@6582
|
463 #else
|
kaf24@6582
|
464 struct vcpu *d = current;
|
kaf24@6582
|
465 if (d->domain->arch.ops->guest_paging_levels == PAGING_L2)
|
kaf24@6582
|
466 {
|
kaf24@6582
|
467 clear_bit(X86_FEATURE_PSE, &edx);
|
kaf24@6582
|
468 clear_bit(X86_FEATURE_PAE, &edx);
|
kaf24@6582
|
469 clear_bit(X86_FEATURE_PSE36, &edx);
|
kaf24@6582
|
470 }
|
kaf24@5659
|
471 #endif
|
kaf24@6582
|
472
|
iap10@3290
|
473 }
|
iap10@3290
|
474
|
iap10@3290
|
475 regs->eax = (unsigned long) eax;
|
iap10@3290
|
476 regs->ebx = (unsigned long) ebx;
|
iap10@3290
|
477 regs->ecx = (unsigned long) ecx;
|
iap10@3290
|
478 regs->edx = (unsigned long) edx;
|
iap10@3290
|
479
|
iap10@3290
|
480 VMX_DBG_LOG(DBG_LEVEL_1,
|
kaf24@6730
|
481 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
|
kaf24@6730
|
482 eip, input, eax, ebx, ecx, edx);
|
iap10@3290
|
483
|
iap10@3290
|
484 }
|
iap10@3290
|
485
|
iap10@3290
|
486 #define CASE_GET_REG_P(REG, reg) \
|
cl349@3784
|
487 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
|
iap10@3290
|
488
|
kaf24@4683
|
489 static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
|
iap10@3290
|
490 {
|
iap10@3290
|
491 unsigned int reg;
|
kaf24@3597
|
492 unsigned long *reg_p = 0;
|
kaf24@5289
|
493 struct vcpu *v = current;
|
kaf24@3597
|
494 unsigned long eip;
|
iap10@3290
|
495
|
kaf24@5414
|
496 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
497
|
iap10@3290
|
498 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
|
iap10@3290
|
499
|
iap10@3290
|
500 VMX_DBG_LOG(DBG_LEVEL_1,
|
maf46@3855
|
501 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
|
iap10@3290
|
502 eip, reg, exit_qualification);
|
iap10@3290
|
503
|
iap10@3290
|
504 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
|
iap10@3290
|
505 CASE_GET_REG_P(EAX, eax);
|
iap10@3290
|
506 CASE_GET_REG_P(ECX, ecx);
|
iap10@3290
|
507 CASE_GET_REG_P(EDX, edx);
|
iap10@3290
|
508 CASE_GET_REG_P(EBX, ebx);
|
iap10@3290
|
509 CASE_GET_REG_P(EBP, ebp);
|
iap10@3290
|
510 CASE_GET_REG_P(ESI, esi);
|
iap10@3290
|
511 CASE_GET_REG_P(EDI, edi);
|
iap10@3290
|
512 case REG_ESP:
|
iap10@3290
|
513 break;
|
iap10@3290
|
514 default:
|
iap10@3290
|
515 __vmx_bug(regs);
|
iap10@3290
|
516 }
|
iap10@3290
|
517
|
iap10@3290
|
518 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
|
iap10@3290
|
519 case TYPE_MOV_TO_DR:
|
iap10@3290
|
520 /* don't need to check the range */
|
iap10@3290
|
521 if (reg != REG_ESP)
|
kaf24@5289
|
522 v->arch.guest_context.debugreg[reg] = *reg_p;
|
iap10@3290
|
523 else {
|
iap10@3290
|
524 unsigned long value;
|
kaf24@5414
|
525 __vmread(GUEST_RSP, &value);
|
kaf24@5289
|
526 v->arch.guest_context.debugreg[reg] = value;
|
iap10@3290
|
527 }
|
iap10@3290
|
528 break;
|
iap10@3290
|
529 case TYPE_MOV_FROM_DR:
|
iap10@3290
|
530 if (reg != REG_ESP)
|
kaf24@5289
|
531 *reg_p = v->arch.guest_context.debugreg[reg];
|
iap10@3290
|
532 else {
|
kaf24@5414
|
533 __vmwrite(GUEST_RSP, v->arch.guest_context.debugreg[reg]);
|
iap10@3290
|
534 }
|
iap10@3290
|
535 break;
|
iap10@3290
|
536 }
|
iap10@3290
|
537 }
|
iap10@3290
|
538
|
iap10@3290
|
539 /*
|
iap10@3290
|
540 * Invalidate the TLB for va. Invalidate the shadow page corresponding
|
iap10@3290
|
541 * the address va.
|
iap10@3290
|
542 */
|
iap10@3290
|
543 static void vmx_vmexit_do_invlpg(unsigned long va)
|
iap10@3290
|
544 {
|
iap10@3290
|
545 unsigned long eip;
|
kaf24@5289
|
546 struct vcpu *v = current;
|
iap10@3290
|
547
|
kaf24@5414
|
548 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
549
|
kaf24@4654
|
550 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
|
mafetter@4141
|
551 eip, va);
|
iap10@3290
|
552
|
iap10@3290
|
553 /*
|
iap10@3290
|
554 * We do the safest things first, then try to update the shadow
|
iap10@3290
|
555 * copying from guest
|
iap10@3290
|
556 */
|
kaf24@5289
|
557 shadow_invlpg(v, va);
|
iap10@3290
|
558 }
|
iap10@3290
|
559
|
leendert@5180
|
560 static int check_for_null_selector(unsigned long eip)
|
leendert@5180
|
561 {
|
leendert@5180
|
562 unsigned char inst[MAX_INST_LEN];
|
leendert@5180
|
563 unsigned long sel;
|
leendert@5180
|
564 int i, inst_len;
|
leendert@5180
|
565 int inst_copy_from_guest(unsigned char *, unsigned long, int);
|
leendert@5180
|
566
|
adsharma@6526
|
567 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
|
leendert@5180
|
568 memset(inst, 0, MAX_INST_LEN);
|
leendert@5180
|
569 if (inst_copy_from_guest(inst, eip, inst_len) != inst_len) {
|
leendert@5180
|
570 printf("check_for_null_selector: get guest instruction failed\n");
|
leendert@5180
|
571 domain_crash_synchronous();
|
leendert@5180
|
572 }
|
leendert@5180
|
573
|
leendert@5180
|
574 for (i = 0; i < inst_len; i++) {
|
leendert@5180
|
575 switch (inst[i]) {
|
leendert@5180
|
576 case 0xf3: /* REPZ */
|
leendert@5180
|
577 case 0xf2: /* REPNZ */
|
leendert@5180
|
578 case 0xf0: /* LOCK */
|
leendert@5180
|
579 case 0x66: /* data32 */
|
leendert@5180
|
580 case 0x67: /* addr32 */
|
leendert@5180
|
581 continue;
|
leendert@5180
|
582 case 0x2e: /* CS */
|
leendert@5180
|
583 __vmread(GUEST_CS_SELECTOR, &sel);
|
leendert@5180
|
584 break;
|
leendert@5180
|
585 case 0x36: /* SS */
|
leendert@5180
|
586 __vmread(GUEST_SS_SELECTOR, &sel);
|
leendert@5180
|
587 break;
|
leendert@5180
|
588 case 0x26: /* ES */
|
leendert@5180
|
589 __vmread(GUEST_ES_SELECTOR, &sel);
|
leendert@5180
|
590 break;
|
leendert@5180
|
591 case 0x64: /* FS */
|
leendert@5180
|
592 __vmread(GUEST_FS_SELECTOR, &sel);
|
leendert@5180
|
593 break;
|
leendert@5180
|
594 case 0x65: /* GS */
|
leendert@5180
|
595 __vmread(GUEST_GS_SELECTOR, &sel);
|
leendert@5180
|
596 break;
|
leendert@5180
|
597 case 0x3e: /* DS */
|
leendert@5180
|
598 /* FALLTHROUGH */
|
leendert@5180
|
599 default:
|
leendert@5180
|
600 /* DS is the default */
|
leendert@5180
|
601 __vmread(GUEST_DS_SELECTOR, &sel);
|
leendert@5180
|
602 }
|
leendert@5180
|
603 return sel == 0 ? 1 : 0;
|
leendert@5180
|
604 }
|
leendert@5180
|
605
|
leendert@5180
|
606 return 0;
|
leendert@5180
|
607 }
|
leendert@5180
|
608
|
kaf24@6594
|
609 void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
|
kaf24@6730
|
610 unsigned long count, int size, long value, int dir, int pvalid)
|
kaf24@6594
|
611 {
|
kaf24@6594
|
612 struct vcpu *v = current;
|
kaf24@6594
|
613 vcpu_iodata_t *vio;
|
kaf24@6594
|
614 ioreq_t *p;
|
kaf24@6594
|
615
|
kaf24@6594
|
616 vio = get_vio(v->domain, v->vcpu_id);
|
kaf24@6594
|
617 if (vio == NULL) {
|
kaf24@6594
|
618 printk("bad shared page: %lx\n", (unsigned long) vio);
|
kaf24@6594
|
619 domain_crash_synchronous();
|
kaf24@6594
|
620 }
|
kaf24@6594
|
621
|
kaf24@6594
|
622 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
|
kaf24@6730
|
623 printf("VMX I/O has not yet completed\n");
|
kaf24@6730
|
624 domain_crash_synchronous();
|
kaf24@6594
|
625 }
|
kaf24@6594
|
626 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
|
kaf24@6594
|
627
|
kaf24@6594
|
628 p = &vio->vp_ioreq;
|
kaf24@6594
|
629 p->dir = dir;
|
kaf24@6594
|
630 p->pdata_valid = pvalid;
|
kaf24@6594
|
631
|
kaf24@6594
|
632 p->type = IOREQ_TYPE_PIO;
|
kaf24@6594
|
633 p->size = size;
|
kaf24@6594
|
634 p->addr = port;
|
kaf24@6594
|
635 p->count = count;
|
kaf24@6594
|
636 p->df = regs->eflags & EF_DF ? 1 : 0;
|
kaf24@6594
|
637
|
kaf24@6594
|
638 if (pvalid) {
|
kaf24@6594
|
639 if (vmx_paging_enabled(current))
|
kaf24@6594
|
640 p->u.pdata = (void *) gva_to_gpa(value);
|
kaf24@6594
|
641 else
|
kaf24@6594
|
642 p->u.pdata = (void *) value; /* guest VA == guest PA */
|
kaf24@6594
|
643 } else
|
kaf24@6594
|
644 p->u.data = value;
|
kaf24@6594
|
645
|
kaf24@6594
|
646 p->state = STATE_IOREQ_READY;
|
kaf24@6594
|
647
|
kaf24@6594
|
648 if (vmx_portio_intercept(p)) {
|
kaf24@6594
|
649 /* no blocking & no evtchn notification */
|
kaf24@6594
|
650 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
|
kaf24@6594
|
651 return;
|
kaf24@6594
|
652 }
|
kaf24@6594
|
653
|
kaf24@6594
|
654 evtchn_send(iopacket_port(v->domain));
|
kaf24@6594
|
655 vmx_wait_io();
|
kaf24@6594
|
656 }
|
kaf24@6594
|
657
|
kaf24@4683
|
658 static void vmx_io_instruction(struct cpu_user_regs *regs,
|
kaf24@6730
|
659 unsigned long exit_qualification, unsigned long inst_len)
|
iap10@3290
|
660 {
|
kaf24@6594
|
661 struct mi_per_cpu_info *mpcip;
|
leendert@4442
|
662 unsigned long eip, cs, eflags;
|
kaf24@6594
|
663 unsigned long port, size, dir;
|
leendert@4442
|
664 int vm86;
|
iap10@3290
|
665
|
kaf24@6594
|
666 mpcip = ¤t->domain->arch.vmx_platform.mpci;
|
kaf24@6594
|
667 mpcip->instr = INSTR_PIO;
|
kaf24@6594
|
668 mpcip->flags = 0;
|
kaf24@6594
|
669
|
kaf24@5414
|
670 __vmread(GUEST_RIP, &eip);
|
leendert@4442
|
671 __vmread(GUEST_CS_SELECTOR, &cs);
|
kaf24@5414
|
672 __vmread(GUEST_RFLAGS, &eflags);
|
leendert@4442
|
673 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
|
iap10@3290
|
674
|
iap10@3290
|
675 VMX_DBG_LOG(DBG_LEVEL_1,
|
kaf24@4654
|
676 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
|
kaf24@4654
|
677 "exit_qualification = %lx",
|
kaf24@4654
|
678 vm86, cs, eip, exit_qualification);
|
iap10@3290
|
679
|
iap10@3290
|
680 if (test_bit(6, &exit_qualification))
|
kaf24@6594
|
681 port = (exit_qualification >> 16) & 0xFFFF;
|
iap10@3290
|
682 else
|
kaf24@6594
|
683 port = regs->edx & 0xffff;
|
kaf24@6594
|
684 TRACE_VMEXIT(2, port);
|
kaf24@6594
|
685 size = (exit_qualification & 7) + 1;
|
kaf24@6594
|
686 dir = test_bit(3, &exit_qualification); /* direction */
|
iap10@3290
|
687
|
leendert@5180
|
688 if (test_bit(4, &exit_qualification)) { /* string instruction */
|
kaf24@6730
|
689 unsigned long addr, count = 1;
|
kaf24@6730
|
690 int sign = regs->eflags & EF_DF ? -1 : 1;
|
leendert@4442
|
691
|
kaf24@6730
|
692 __vmread(GUEST_LINEAR_ADDRESS, &addr);
|
kaf24@6594
|
693
|
leendert@5180
|
694 /*
|
leendert@5180
|
695 * In protected mode, guest linear address is invalid if the
|
leendert@5180
|
696 * selector is null.
|
leendert@5180
|
697 */
|
kaf24@6594
|
698 if (!vm86 && check_for_null_selector(eip))
|
kaf24@6594
|
699 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
|
arun@5186
|
700
|
kaf24@6594
|
701 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
|
kaf24@6730
|
702 mpcip->flags |= REPZ;
|
kaf24@6730
|
703 count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
|
kaf24@6730
|
704 }
|
leendert@4442
|
705
|
kaf24@6730
|
706 /*
|
kaf24@6730
|
707 * Handle string pio instructions that cross pages or that
|
kaf24@6730
|
708 * are unaligned. See the comments in vmx_platform.c/handle_mmio()
|
kaf24@6730
|
709 */
|
kaf24@6730
|
710 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
|
kaf24@6730
|
711 unsigned long value = 0;
|
leendert@5180
|
712
|
kaf24@6730
|
713 mpcip->flags |= OVERLAP;
|
kaf24@6730
|
714 if (dir == IOREQ_WRITE)
|
kaf24@6730
|
715 vmx_copy(&value, addr, size, VMX_COPY_IN);
|
kaf24@6730
|
716 send_pio_req(regs, port, 1, size, value, dir, 0);
|
kaf24@6730
|
717 } else {
|
kaf24@6730
|
718 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
|
kaf24@6594
|
719 if (sign > 0)
|
kaf24@6594
|
720 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
|
kaf24@6594
|
721 else
|
kaf24@6594
|
722 count = (addr & ~PAGE_MASK) / size;
|
kaf24@6730
|
723 } else
|
kaf24@6730
|
724 __update_guest_eip(inst_len);
|
kaf24@6594
|
725
|
kaf24@6730
|
726 send_pio_req(regs, port, count, size, addr, dir, 1);
|
kaf24@6730
|
727 }
|
kaf24@6594
|
728 } else {
|
iap10@3290
|
729 __update_guest_eip(inst_len);
|
kaf24@6730
|
730 send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
|
kaf24@6594
|
731 }
|
iap10@3290
|
732 }
|
iap10@3290
|
733
|
kaf24@6591
|
734 int
|
leendert@4602
|
735 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
|
leendert@4602
|
736 {
|
kaf24@6634
|
737 unsigned long gpa, mfn;
|
kaf24@5356
|
738 char *addr;
|
kaf24@6591
|
739 int count;
|
kaf24@6591
|
740
|
kaf24@6591
|
741 while (size > 0) {
|
kaf24@6730
|
742 count = PAGE_SIZE - (laddr & ~PAGE_MASK);
|
kaf24@6730
|
743 if (count > size)
|
kaf24@6730
|
744 count = size;
|
leendert@4602
|
745
|
kaf24@6730
|
746 if (vmx_paging_enabled(current)) {
|
kaf24@6730
|
747 gpa = gva_to_gpa(laddr);
|
kaf24@6730
|
748 mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
|
kaf24@6730
|
749 } else
|
kaf24@6730
|
750 mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
|
kaf24@6730
|
751 if (mfn == INVALID_MFN)
|
kaf24@6730
|
752 return 0;
|
kaf24@6634
|
753
|
kaf24@6730
|
754 addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
|
kaf24@6591
|
755
|
kaf24@6730
|
756 if (dir == VMX_COPY_IN)
|
kaf24@6730
|
757 memcpy(buf, addr, count);
|
kaf24@6730
|
758 else
|
kaf24@6730
|
759 memcpy(addr, buf, count);
|
kaf24@6591
|
760
|
kaf24@6730
|
761 unmap_domain_page(addr);
|
kaf24@6591
|
762
|
kaf24@6730
|
763 laddr += count;
|
kaf24@6730
|
764 buf += count;
|
kaf24@6730
|
765 size -= count;
|
leendert@4602
|
766 }
|
leendert@4602
|
767
|
leendert@4602
|
768 return 1;
|
leendert@4602
|
769 }
|
leendert@4602
|
770
|
leendert@4602
|
771 int
|
kaf24@5289
|
772 vmx_world_save(struct vcpu *d, struct vmx_assist_context *c)
|
leendert@4602
|
773 {
|
leendert@4602
|
774 unsigned long inst_len;
|
leendert@4602
|
775 int error = 0;
|
leendert@4602
|
776
|
adsharma@6526
|
777 error |= __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
|
kaf24@5414
|
778 error |= __vmread(GUEST_RIP, &c->eip);
|
leendert@4602
|
779 c->eip += inst_len; /* skip transition instruction */
|
kaf24@5414
|
780 error |= __vmread(GUEST_RSP, &c->esp);
|
kaf24@5414
|
781 error |= __vmread(GUEST_RFLAGS, &c->eflags);
|
leendert@4602
|
782
|
leendert@4602
|
783 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
|
leendert@4602
|
784 c->cr3 = d->arch.arch_vmx.cpu_cr3;
|
leendert@4602
|
785 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
|
leendert@4602
|
786
|
leendert@4602
|
787 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
|
leendert@4602
|
788 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
|
leendert@4602
|
789
|
leendert@4602
|
790 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
|
leendert@4602
|
791 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
|
leendert@4602
|
792
|
leendert@4602
|
793 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
|
leendert@4602
|
794 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
|
leendert@4602
|
795 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
|
leendert@4602
|
796 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
|
leendert@4602
|
797
|
leendert@4602
|
798 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
|
leendert@4602
|
799 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
|
leendert@4602
|
800 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
|
leendert@4602
|
801 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
|
leendert@4602
|
802
|
leendert@4602
|
803 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
|
leendert@4602
|
804 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
|
leendert@4602
|
805 error |= __vmread(GUEST_ES_BASE, &c->es_base);
|
leendert@4602
|
806 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
|
leendert@4602
|
807
|
leendert@4602
|
808 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
|
leendert@4602
|
809 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
|
leendert@4602
|
810 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
|
leendert@4602
|
811 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
|
leendert@4602
|
812
|
leendert@4602
|
813 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
|
leendert@4602
|
814 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
|
leendert@4602
|
815 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
|
leendert@4602
|
816 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
|
leendert@4602
|
817
|
leendert@4602
|
818 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
|
leendert@4602
|
819 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
|
leendert@4602
|
820 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
|
leendert@4602
|
821 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
|
leendert@4602
|
822
|
leendert@4602
|
823 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
|
leendert@4602
|
824 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
|
leendert@4602
|
825 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
|
leendert@4602
|
826 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
|
leendert@4602
|
827
|
leendert@4602
|
828 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
|
leendert@4602
|
829 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
|
leendert@4602
|
830 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
|
leendert@4602
|
831 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
|
leendert@4602
|
832
|
leendert@4602
|
833 return !error;
|
leendert@4602
|
834 }
|
leendert@4602
|
835
|
leendert@4602
|
836 int
|
kaf24@5289
|
837 vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c)
|
leendert@4442
|
838 {
|
leendert@4602
|
839 unsigned long mfn, old_cr4;
|
leendert@4602
|
840 int error = 0;
|
leendert@4602
|
841
|
kaf24@5414
|
842 error |= __vmwrite(GUEST_RIP, c->eip);
|
kaf24@5414
|
843 error |= __vmwrite(GUEST_RSP, c->esp);
|
kaf24@5414
|
844 error |= __vmwrite(GUEST_RFLAGS, c->eflags);
|
leendert@4602
|
845
|
leendert@4602
|
846 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
|
leendert@4602
|
847
|
arun@5186
|
848 if (!vmx_paging_enabled(d)) {
|
kaf24@6730
|
849 VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
|
kaf24@6730
|
850 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
|
arun@5186
|
851 goto skip_cr3;
|
arun@5186
|
852 }
|
arun@5186
|
853
|
leendert@4602
|
854 if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
|
kaf24@6730
|
855 /*
|
kaf24@6730
|
856 * This is simple TLB flush, implying the guest has
|
kaf24@6730
|
857 * removed some translation or changed page attributes.
|
kaf24@6730
|
858 * We simply invalidate the shadow.
|
kaf24@6730
|
859 */
|
kaf24@6730
|
860 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
|
kaf24@6730
|
861 if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
|
kaf24@6730
|
862 printk("Invalid CR3 value=%x", c->cr3);
|
kaf24@6730
|
863 domain_crash_synchronous();
|
kaf24@6730
|
864 return 0;
|
kaf24@6730
|
865 }
|
kaf24@6730
|
866 shadow_sync_all(d->domain);
|
leendert@4602
|
867 } else {
|
kaf24@6730
|
868 /*
|
kaf24@6730
|
869 * If different, make a shadow. Check if the PDBR is valid
|
kaf24@6730
|
870 * first.
|
kaf24@6730
|
871 */
|
kaf24@6730
|
872 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
|
kaf24@6730
|
873 if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
|
kaf24@6730
|
874 printk("Invalid CR3 value=%x", c->cr3);
|
kaf24@6730
|
875 domain_crash_synchronous();
|
kaf24@6730
|
876 return 0;
|
kaf24@6730
|
877 }
|
kaf24@6730
|
878 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
|
kaf24@6730
|
879 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
|
kaf24@6730
|
880 update_pagetables(d);
|
kaf24@6730
|
881 /*
|
kaf24@6730
|
882 * arch.shadow_table should now hold the next CR3 for shadow
|
kaf24@6730
|
883 */
|
kaf24@6730
|
884 d->arch.arch_vmx.cpu_cr3 = c->cr3;
|
kaf24@6730
|
885 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
|
kaf24@6730
|
886 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
|
leendert@4602
|
887 }
|
leendert@4602
|
888
|
kaf24@6730
|
889 skip_cr3:
|
arun@5186
|
890
|
leendert@4602
|
891 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
|
kaf24@5774
|
892 error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
|
leendert@4602
|
893 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
|
leendert@4602
|
894
|
leendert@4602
|
895 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
|
leendert@4602
|
896 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
|
leendert@4602
|
897
|
leendert@4602
|
898 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
|
leendert@4602
|
899 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
|
leendert@4602
|
900
|
leendert@4602
|
901 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
|
leendert@4602
|
902 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
|
leendert@4602
|
903 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
|
leendert@4602
|
904 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
|
leendert@4602
|
905
|
leendert@4602
|
906 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
|
leendert@4602
|
907 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
|
leendert@4602
|
908 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
|
leendert@4602
|
909 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
|
leendert@4602
|
910
|
leendert@4602
|
911 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
|
leendert@4602
|
912 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
|
leendert@4602
|
913 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
|
leendert@4602
|
914 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
|
leendert@4602
|
915
|
leendert@4602
|
916 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
|
leendert@4602
|
917 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
|
leendert@4602
|
918 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
|
leendert@4602
|
919 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
|
leendert@4602
|
920
|
leendert@4602
|
921 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
|
leendert@4602
|
922 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
|
leendert@4602
|
923 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
|
leendert@4602
|
924 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
|
leendert@4602
|
925
|
leendert@4602
|
926 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
|
leendert@4602
|
927 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
|
leendert@4602
|
928 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
|
leendert@4602
|
929 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
|
leendert@4602
|
930
|
leendert@4602
|
931 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
|
leendert@4602
|
932 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
|
leendert@4602
|
933 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
|
leendert@4602
|
934 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
|
leendert@4602
|
935
|
leendert@4602
|
936 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
|
leendert@4602
|
937 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
|
leendert@4602
|
938 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
|
leendert@4602
|
939 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
|
leendert@4602
|
940
|
leendert@4602
|
941 return !error;
|
leendert@4602
|
942 }
|
leendert@4602
|
943
|
leendert@4602
|
944 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
|
leendert@4602
|
945
|
leendert@4602
|
946 int
|
kaf24@5289
|
947 vmx_assist(struct vcpu *d, int mode)
|
leendert@4602
|
948 {
|
leendert@4602
|
949 struct vmx_assist_context c;
|
arun@5613
|
950 u32 magic;
|
kaf24@5727
|
951 u32 cp;
|
leendert@4602
|
952
|
leendert@4602
|
953 /* make sure vmxassist exists (this is not an error) */
|
kaf24@6591
|
954 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), VMX_COPY_IN))
|
kaf24@6730
|
955 return 0;
|
leendert@4602
|
956 if (magic != VMXASSIST_MAGIC)
|
kaf24@6730
|
957 return 0;
|
leendert@4602
|
958
|
leendert@4602
|
959 switch (mode) {
|
kaf24@6730
|
960 /*
|
kaf24@6730
|
961 * Transfer control to vmxassist.
|
kaf24@6730
|
962 * Store the current context in VMXASSIST_OLD_CONTEXT and load
|
kaf24@6730
|
963 * the new VMXASSIST_NEW_CONTEXT context. This context was created
|
kaf24@6730
|
964 * by vmxassist and will transfer control to it.
|
kaf24@6730
|
965 */
|
leendert@4602
|
966 case VMX_ASSIST_INVOKE:
|
kaf24@6730
|
967 /* save the old context */
|
kaf24@6730
|
968 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
|
kaf24@6730
|
969 goto error;
|
kaf24@6730
|
970 if (cp != 0) {
|
kaf24@6730
|
971 if (!vmx_world_save(d, &c))
|
kaf24@6730
|
972 goto error;
|
kaf24@6730
|
973 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT))
|
kaf24@6730
|
974 goto error;
|
kaf24@6730
|
975 }
|
leendert@4602
|
976
|
kaf24@6730
|
977 /* restore the new context, this should activate vmxassist */
|
kaf24@6730
|
978 if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN))
|
kaf24@6730
|
979 goto error;
|
kaf24@6730
|
980 if (cp != 0) {
|
kaf24@6591
|
981 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
|
kaf24@6730
|
982 goto error;
|
kaf24@6730
|
983 if (!vmx_world_restore(d, &c))
|
kaf24@6730
|
984 goto error;
|
kaf24@6730
|
985 return 1;
|
kaf24@6730
|
986 }
|
kaf24@6730
|
987 break;
|
leendert@4602
|
988
|
kaf24@6730
|
989 /*
|
kaf24@6730
|
990 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
|
kaf24@6730
|
991 * above.
|
kaf24@6730
|
992 */
|
leendert@4602
|
993 case VMX_ASSIST_RESTORE:
|
kaf24@6730
|
994 /* save the old context */
|
kaf24@6730
|
995 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
|
kaf24@6730
|
996 goto error;
|
kaf24@6730
|
997 if (cp != 0) {
|
kaf24@6591
|
998 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
|
kaf24@6730
|
999 goto error;
|
kaf24@6730
|
1000 if (!vmx_world_restore(d, &c))
|
kaf24@6730
|
1001 goto error;
|
kaf24@6730
|
1002 return 1;
|
kaf24@6730
|
1003 }
|
kaf24@6730
|
1004 break;
|
leendert@4602
|
1005 }
|
leendert@4602
|
1006
|
kaf24@6730
|
1007 error:
|
leendert@4602
|
1008 printf("Failed to transfer to vmxassist\n");
|
leendert@4602
|
1009 domain_crash_synchronous();
|
leendert@4442
|
1010 return 0;
|
leendert@4442
|
1011 }
|
leendert@4442
|
1012
|
leendert@4652
|
1013 static int vmx_set_cr0(unsigned long value)
|
leendert@4652
|
1014 {
|
kaf24@5289
|
1015 struct vcpu *d = current;
|
kaf24@5659
|
1016 unsigned long mfn;
|
leendert@4652
|
1017 unsigned long eip;
|
arun@5186
|
1018 int paging_enabled;
|
kaf24@5659
|
1019 unsigned long vm_entry_value;
|
leendert@4652
|
1020 /*
|
leendert@4652
|
1021 * CR0: We don't want to lose PE and PG.
|
leendert@4652
|
1022 */
|
arun@5186
|
1023 paging_enabled = vmx_paging_enabled(d);
|
leendert@4652
|
1024 __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
|
arun@5186
|
1025 __vmwrite(CR0_READ_SHADOW, value);
|
leendert@4652
|
1026
|
arun@5186
|
1027 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
|
leendert@5194
|
1028
|
leendert@5194
|
1029 if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) {
|
leendert@4652
|
1030 /*
|
leendert@4652
|
1031 * The guest CR3 must be pointing to the guest physical.
|
leendert@4652
|
1032 */
|
kaf24@6481
|
1033 if ( !VALID_MFN(mfn = get_mfn_from_pfn(
|
kaf24@6730
|
1034 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
|
leendert@4652
|
1035 !get_page(pfn_to_page(mfn), d->domain) )
|
leendert@4652
|
1036 {
|
arun@5186
|
1037 printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3);
|
leendert@4652
|
1038 domain_crash_synchronous(); /* need to take a clean path */
|
leendert@4652
|
1039 }
|
kaf24@5659
|
1040
|
kaf24@5659
|
1041 #if defined(__x86_64__)
|
kaf24@5659
|
1042 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
|
kaf24@6730
|
1043 &d->arch.arch_vmx.cpu_state) &&
|
kaf24@6730
|
1044 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
|
kaf24@6730
|
1045 &d->arch.arch_vmx.cpu_state)){
|
kaf24@5659
|
1046 VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
|
kaf24@5659
|
1047 vmx_inject_exception(d, TRAP_gp_fault, 0);
|
kaf24@5659
|
1048 }
|
kaf24@5659
|
1049 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
|
kaf24@6730
|
1050 &d->arch.arch_vmx.cpu_state)){
|
kaf24@5659
|
1051 /* Here the PAE is should to be opened */
|
kaf24@5659
|
1052 VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
|
kaf24@5659
|
1053 set_bit(VMX_CPU_STATE_LMA_ENABLED,
|
kaf24@6730
|
1054 &d->arch.arch_vmx.cpu_state);
|
kaf24@5659
|
1055 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
|
kaf24@5775
|
1056 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
|
kaf24@5659
|
1057 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
|
kaf24@5659
|
1058
|
kaf24@5722
|
1059 #if CONFIG_PAGING_LEVELS >= 4
|
kaf24@5722
|
1060 if(!shadow_set_guest_paging_levels(d->domain, 4)) {
|
kaf24@5722
|
1061 printk("Unsupported guest paging levels\n");
|
kaf24@5722
|
1062 domain_crash_synchronous(); /* need to take a clean path */
|
kaf24@5722
|
1063 }
|
kaf24@5722
|
1064 #endif
|
kaf24@5659
|
1065 }
|
kaf24@6582
|
1066 else
|
kaf24@6582
|
1067 {
|
kaf24@6582
|
1068 #if CONFIG_PAGING_LEVELS >= 4
|
kaf24@6582
|
1069 if(!shadow_set_guest_paging_levels(d->domain, 2)) {
|
kaf24@6582
|
1070 printk("Unsupported guest paging levels\n");
|
kaf24@6582
|
1071 domain_crash_synchronous(); /* need to take a clean path */
|
kaf24@6582
|
1072 }
|
kaf24@6582
|
1073 #endif
|
kaf24@6582
|
1074 }
|
kaf24@5659
|
1075
|
kaf24@6730
|
1076 unsigned long crn;
|
kaf24@5659
|
1077 /* update CR4's PAE if needed */
|
kaf24@5659
|
1078 __vmread(GUEST_CR4, &crn);
|
kaf24@5659
|
1079 if ( (!(crn & X86_CR4_PAE)) &&
|
kaf24@6730
|
1080 test_bit(VMX_CPU_STATE_PAE_ENABLED,
|
kaf24@6730
|
1081 &d->arch.arch_vmx.cpu_state)){
|
kaf24@5659
|
1082 VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
|
kaf24@5659
|
1083 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
|
kaf24@5659
|
1084 }
|
kaf24@5659
|
1085 #endif
|
leendert@4652
|
1086 /*
|
leendert@4652
|
1087 * Now arch.guest_table points to machine physical.
|
leendert@4652
|
1088 */
|
leendert@4652
|
1089 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
|
leendert@4652
|
1090 update_pagetables(d);
|
leendert@4652
|
1091
|
leendert@4652
|
1092 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
|
kaf24@6730
|
1093 (unsigned long) (mfn << PAGE_SHIFT));
|
leendert@4652
|
1094
|
kaf24@5250
|
1095 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
|
leendert@4652
|
1096 /*
|
leendert@4652
|
1097 * arch->shadow_table should hold the next CR3 for shadow
|
leendert@4652
|
1098 */
|
leendert@4652
|
1099 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
|
kaf24@6730
|
1100 d->arch.arch_vmx.cpu_cr3, mfn);
|
leendert@5194
|
1101 }
|
leendert@5194
|
1102
|
leendert@5194
|
1103 /*
|
leendert@5194
|
1104 * VMX does not implement real-mode virtualization. We emulate
|
leendert@5194
|
1105 * real-mode by performing a world switch to VMXAssist whenever
|
leendert@5194
|
1106 * a partition disables the CR0.PE bit.
|
leendert@5194
|
1107 */
|
leendert@5194
|
1108 if ((value & X86_CR0_PE) == 0) {
|
kaf24@5659
|
1109 if ( value & X86_CR0_PG ) {
|
kaf24@5659
|
1110 /* inject GP here */
|
kaf24@5659
|
1111 vmx_inject_exception(d, TRAP_gp_fault, 0);
|
kaf24@5659
|
1112 return 0;
|
kaf24@5659
|
1113 } else {
|
kaf24@5659
|
1114 /*
|
kaf24@5659
|
1115 * Disable paging here.
|
kaf24@5659
|
1116 * Same to PE == 1 && PG == 0
|
kaf24@5659
|
1117 */
|
kaf24@5659
|
1118 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
|
kaf24@5659
|
1119 &d->arch.arch_vmx.cpu_state)){
|
kaf24@5659
|
1120 clear_bit(VMX_CPU_STATE_LMA_ENABLED,
|
kaf24@5659
|
1121 &d->arch.arch_vmx.cpu_state);
|
kaf24@5659
|
1122 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
|
kaf24@5775
|
1123 vm_entry_value &= ~VM_ENTRY_CONTROLS_IA32E_MODE;
|
kaf24@5659
|
1124 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
|
kaf24@5659
|
1125 }
|
kaf24@5659
|
1126 }
|
kaf24@6730
|
1127 __vmread(GUEST_RIP, &eip);
|
kaf24@6730
|
1128 VMX_DBG_LOG(DBG_LEVEL_1,
|
kaf24@6730
|
1129 "Disabling CR0.PE at %%eip 0x%lx\n", eip);
|
kaf24@6730
|
1130 if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
|
kaf24@6730
|
1131 set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
|
kaf24@6730
|
1132 __vmread(GUEST_RIP, &eip);
|
kaf24@6730
|
1133 VMX_DBG_LOG(DBG_LEVEL_1,
|
kaf24@6730
|
1134 "Transfering control to vmxassist %%eip 0x%lx\n", eip);
|
kaf24@6730
|
1135 return 0; /* do not update eip! */
|
kaf24@6730
|
1136 }
|
leendert@5194
|
1137 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
|
kaf24@6730
|
1138 &d->arch.arch_vmx.cpu_state)) {
|
kaf24@6730
|
1139 __vmread(GUEST_RIP, &eip);
|
kaf24@6730
|
1140 VMX_DBG_LOG(DBG_LEVEL_1,
|
kaf24@6730
|
1141 "Enabling CR0.PE at %%eip 0x%lx\n", eip);
|
kaf24@6730
|
1142 if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
|
kaf24@6730
|
1143 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
|
kaf24@6730
|
1144 &d->arch.arch_vmx.cpu_state);
|
kaf24@6730
|
1145 __vmread(GUEST_RIP, &eip);
|
kaf24@6730
|
1146 VMX_DBG_LOG(DBG_LEVEL_1,
|
kaf24@6730
|
1147 "Restoring to %%eip 0x%lx\n", eip);
|
kaf24@6730
|
1148 return 0; /* do not update eip! */
|
kaf24@6730
|
1149 }
|
leendert@4652
|
1150 }
|
leendert@5194
|
1151
|
leendert@4652
|
1152 return 1;
|
leendert@4652
|
1153 }
|
leendert@4652
|
1154
|
iap10@3290
|
1155 #define CASE_GET_REG(REG, reg) \
|
iap10@3290
|
1156 case REG_ ## REG: value = regs->reg; break
|
iap10@3290
|
1157
|
kaf24@5659
|
1158 #define CASE_EXTEND_SET_REG \
|
kaf24@5659
|
1159 CASE_EXTEND_REG(S)
|
kaf24@5659
|
1160 #define CASE_EXTEND_GET_REG \
|
kaf24@5659
|
1161 CASE_EXTEND_REG(G)
|
kaf24@5659
|
1162
|
kaf24@5659
|
1163 #ifdef __i386__
|
kaf24@5659
|
1164 #define CASE_EXTEND_REG(T)
|
kaf24@5659
|
1165 #else
|
kaf24@5659
|
1166 #define CASE_EXTEND_REG(T) \
|
kaf24@5659
|
1167 CASE_ ## T ## ET_REG(R8, r8); \
|
kaf24@5659
|
1168 CASE_ ## T ## ET_REG(R9, r9); \
|
kaf24@5659
|
1169 CASE_ ## T ## ET_REG(R10, r10); \
|
kaf24@5659
|
1170 CASE_ ## T ## ET_REG(R11, r11); \
|
kaf24@5659
|
1171 CASE_ ## T ## ET_REG(R12, r12); \
|
kaf24@5659
|
1172 CASE_ ## T ## ET_REG(R13, r13); \
|
kaf24@5659
|
1173 CASE_ ## T ## ET_REG(R14, r14); \
|
kaf24@5659
|
1174 CASE_ ## T ## ET_REG(R15, r15);
|
kaf24@5659
|
1175 #endif
|
kaf24@5659
|
1176
|
kaf24@5659
|
1177
|
iap10@3290
|
1178 /*
|
iap10@3290
|
1179 * Write to control registers
|
iap10@3290
|
1180 */
|
kaf24@4683
|
1181 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
|
iap10@3290
|
1182 {
|
iap10@3290
|
1183 unsigned long value;
|
iap10@3290
|
1184 unsigned long old_cr;
|
kaf24@5289
|
1185 struct vcpu *d = current;
|
iap10@3290
|
1186
|
iap10@3290
|
1187 switch (gp) {
|
iap10@3290
|
1188 CASE_GET_REG(EAX, eax);
|
iap10@3290
|
1189 CASE_GET_REG(ECX, ecx);
|
iap10@3290
|
1190 CASE_GET_REG(EDX, edx);
|
iap10@3290
|
1191 CASE_GET_REG(EBX, ebx);
|
iap10@3290
|
1192 CASE_GET_REG(EBP, ebp);
|
iap10@3290
|
1193 CASE_GET_REG(ESI, esi);
|
iap10@3290
|
1194 CASE_GET_REG(EDI, edi);
|
kaf24@5659
|
1195 CASE_EXTEND_GET_REG
|
kaf24@6730
|
1196 case REG_ESP:
|
kaf24@6730
|
1197 __vmread(GUEST_RSP, &value);
|
iap10@3290
|
1198 break;
|
iap10@3290
|
1199 default:
|
iap10@3290
|
1200 printk("invalid gp: %d\n", gp);
|
iap10@3290
|
1201 __vmx_bug(regs);
|
iap10@3290
|
1202 }
|
iap10@3290
|
1203
|
maf46@3855
|
1204 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
|
maf46@3855
|
1205 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
|
iap10@3290
|
1206
|
iap10@3290
|
1207 switch(cr) {
|
iap10@3290
|
1208 case 0:
|
iap10@3290
|
1209 {
|
kaf24@6730
|
1210 return vmx_set_cr0(value);
|
iap10@3290
|
1211 }
|
iap10@3290
|
1212 case 3:
|
iap10@3290
|
1213 {
|
maf46@4621
|
1214 unsigned long old_base_mfn, mfn;
|
iap10@3290
|
1215
|
iap10@3290
|
1216 /*
|
iap10@3823
|
1217 * If paging is not enabled yet, simply copy the value to CR3.
|
iap10@3290
|
1218 */
|
arun@5186
|
1219 if (!vmx_paging_enabled(d)) {
|
kaf24@3677
|
1220 d->arch.arch_vmx.cpu_cr3 = value;
|
iap10@3823
|
1221 break;
|
iap10@3290
|
1222 }
|
iap10@3290
|
1223
|
iap10@3290
|
1224 /*
|
iap10@3290
|
1225 * We make a new one if the shadow does not exist.
|
iap10@3290
|
1226 */
|
kaf24@3677
|
1227 if (value == d->arch.arch_vmx.cpu_cr3) {
|
iap10@3290
|
1228 /*
|
iap10@3290
|
1229 * This is simple TLB flush, implying the guest has
|
iap10@3290
|
1230 * removed some translation or changed page attributes.
|
iap10@3290
|
1231 * We simply invalidate the shadow.
|
iap10@3290
|
1232 */
|
kaf24@6481
|
1233 mfn = get_mfn_from_pfn(value >> PAGE_SHIFT);
|
kaf24@5237
|
1234 if (mfn != pagetable_get_pfn(d->arch.guest_table))
|
iap10@3290
|
1235 __vmx_bug(regs);
|
mafetter@4141
|
1236 shadow_sync_all(d->domain);
|
iap10@3290
|
1237 } else {
|
iap10@3290
|
1238 /*
|
iap10@3290
|
1239 * If different, make a shadow. Check if the PDBR is valid
|
iap10@3290
|
1240 * first.
|
iap10@3290
|
1241 */
|
maf46@3855
|
1242 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
|
maf46@4621
|
1243 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
|
kaf24@6481
|
1244 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
|
maf46@4621
|
1245 !get_page(pfn_to_page(mfn), d->domain) )
|
iap10@3290
|
1246 {
|
arun@5186
|
1247 printk("Invalid CR3 value=%lx", value);
|
kaf24@4325
|
1248 domain_crash_synchronous(); /* need to take a clean path */
|
iap10@3290
|
1249 }
|
mafetter@4799
|
1250 old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
|
leendert@4652
|
1251 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
|
leendert@4652
|
1252 if (old_base_mfn)
|
maf46@4621
|
1253 put_page(pfn_to_page(old_base_mfn));
|
maf46@4527
|
1254 update_pagetables(d);
|
iap10@3290
|
1255 /*
|
iap10@3823
|
1256 * arch.shadow_table should now hold the next CR3 for shadow
|
iap10@3290
|
1257 */
|
kaf24@3677
|
1258 d->arch.arch_vmx.cpu_cr3 = value;
|
maf46@3855
|
1259 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
|
kaf24@6730
|
1260 value);
|
kaf24@5250
|
1261 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
|
iap10@3290
|
1262 }
|
iap10@3290
|
1263 break;
|
iap10@3290
|
1264 }
|
iap10@3290
|
1265 case 4:
|
kaf24@5659
|
1266 {
|
iap10@3290
|
1267 /* CR4 */
|
kaf24@5659
|
1268 unsigned long old_guest_cr;
|
kaf24@5659
|
1269
|
kaf24@5659
|
1270 __vmread(GUEST_CR4, &old_guest_cr);
|
kaf24@5659
|
1271 if (value & X86_CR4_PAE){
|
kaf24@5659
|
1272 set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
|
kaf24@5659
|
1273 } else {
|
kaf24@5659
|
1274 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
|
kaf24@5659
|
1275 &d->arch.arch_vmx.cpu_state)){
|
kaf24@5659
|
1276 vmx_inject_exception(d, TRAP_gp_fault, 0);
|
kaf24@5659
|
1277 }
|
kaf24@5659
|
1278 clear_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
|
kaf24@5659
|
1279 }
|
kaf24@5659
|
1280
|
iap10@3290
|
1281 __vmread(CR4_READ_SHADOW, &old_cr);
|
kaf24@5659
|
1282
|
kaf24@5774
|
1283 __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
|
iap10@3290
|
1284 __vmwrite(CR4_READ_SHADOW, value);
|
iap10@3290
|
1285
|
iap10@3290
|
1286 /*
|
iap10@3290
|
1287 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
|
iap10@3290
|
1288 * all TLB entries except global entries.
|
iap10@3290
|
1289 */
|
iap10@3290
|
1290 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
|
leendert@4602
|
1291 shadow_sync_all(d->domain);
|
iap10@3290
|
1292 }
|
iap10@3290
|
1293 break;
|
kaf24@5659
|
1294 }
|
iap10@3290
|
1295 default:
|
iap10@3290
|
1296 printk("invalid cr: %d\n", gp);
|
iap10@3290
|
1297 __vmx_bug(regs);
|
iap10@3290
|
1298 }
|
leendert@4442
|
1299
|
leendert@4442
|
1300 return 1;
|
leendert@4442
|
1301 }
|
iap10@3290
|
1302
|
iap10@3290
|
1303 #define CASE_SET_REG(REG, reg) \
|
iap10@3290
|
1304 case REG_ ## REG: \
|
iap10@3290
|
1305 regs->reg = value; \
|
iap10@3290
|
1306 break
|
iap10@3290
|
1307
|
iap10@3290
|
1308 /*
|
iap10@3290
|
1309 * Read from control registers. CR0 and CR4 are read from the shadow.
|
iap10@3290
|
1310 */
|
kaf24@4683
|
1311 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
|
iap10@3290
|
1312 {
|
iap10@3290
|
1313 unsigned long value;
|
kaf24@5289
|
1314 struct vcpu *d = current;
|
iap10@3290
|
1315
|
iap10@3290
|
1316 if (cr != 3)
|
iap10@3290
|
1317 __vmx_bug(regs);
|
iap10@3290
|
1318
|
kaf24@3677
|
1319 value = (unsigned long) d->arch.arch_vmx.cpu_cr3;
|
iap10@3290
|
1320
|
iap10@3290
|
1321 switch (gp) {
|
iap10@3290
|
1322 CASE_SET_REG(EAX, eax);
|
iap10@3290
|
1323 CASE_SET_REG(ECX, ecx);
|
iap10@3290
|
1324 CASE_SET_REG(EDX, edx);
|
iap10@3290
|
1325 CASE_SET_REG(EBX, ebx);
|
iap10@3290
|
1326 CASE_SET_REG(EBP, ebp);
|
iap10@3290
|
1327 CASE_SET_REG(ESI, esi);
|
iap10@3290
|
1328 CASE_SET_REG(EDI, edi);
|
kaf24@5819
|
1329 CASE_EXTEND_SET_REG
|
kaf24@6730
|
1330 case REG_ESP:
|
kaf24@6730
|
1331 __vmwrite(GUEST_RSP, value);
|
iap10@3290
|
1332 regs->esp = value;
|
iap10@3290
|
1333 break;
|
iap10@3290
|
1334 default:
|
iap10@3290
|
1335 printk("invalid gp: %d\n", gp);
|
iap10@3290
|
1336 __vmx_bug(regs);
|
iap10@3290
|
1337 }
|
iap10@3290
|
1338
|
maf46@3855
|
1339 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
|
iap10@3290
|
1340 }
|
iap10@3290
|
1341
|
kaf24@4683
|
1342 static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
|
iap10@3290
|
1343 {
|
iap10@3290
|
1344 unsigned int gp, cr;
|
iap10@3290
|
1345 unsigned long value;
|
iap10@3290
|
1346
|
iap10@3290
|
1347 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
|
iap10@3290
|
1348 case TYPE_MOV_TO_CR:
|
iap10@3290
|
1349 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
|
iap10@3290
|
1350 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
|
adsharma@6535
|
1351 TRACE_VMEXIT(1,TYPE_MOV_TO_CR);
|
adsharma@6535
|
1352 TRACE_VMEXIT(2,cr);
|
adsharma@6535
|
1353 TRACE_VMEXIT(3,gp);
|
leendert@4442
|
1354 return mov_to_cr(gp, cr, regs);
|
iap10@3290
|
1355 case TYPE_MOV_FROM_CR:
|
iap10@3290
|
1356 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
|
iap10@3290
|
1357 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
|
adsharma@6535
|
1358 TRACE_VMEXIT(1,TYPE_MOV_FROM_CR);
|
adsharma@6535
|
1359 TRACE_VMEXIT(2,cr);
|
adsharma@6535
|
1360 TRACE_VMEXIT(3,gp);
|
iap10@3290
|
1361 mov_from_cr(cr, gp, regs);
|
iap10@3290
|
1362 break;
|
iap10@3290
|
1363 case TYPE_CLTS:
|
adsharma@6535
|
1364 TRACE_VMEXIT(1,TYPE_CLTS);
|
cl349@4856
|
1365 clts();
|
cl349@4856
|
1366 setup_fpu(current);
|
cl349@4856
|
1367
|
iap10@3290
|
1368 __vmread(GUEST_CR0, &value);
|
iap10@3290
|
1369 value &= ~X86_CR0_TS; /* clear TS */
|
iap10@3290
|
1370 __vmwrite(GUEST_CR0, value);
|
iap10@3290
|
1371
|
iap10@3290
|
1372 __vmread(CR0_READ_SHADOW, &value);
|
iap10@3290
|
1373 value &= ~X86_CR0_TS; /* clear TS */
|
iap10@3290
|
1374 __vmwrite(CR0_READ_SHADOW, value);
|
iap10@3290
|
1375 break;
|
leendert@4652
|
1376 case TYPE_LMSW:
|
adsharma@6535
|
1377 TRACE_VMEXIT(1,TYPE_LMSW);
|
arun@5187
|
1378 __vmread(CR0_READ_SHADOW, &value);
|
kaf24@6730
|
1379 value = (value & ~0xF) |
|
kaf24@6730
|
1380 (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
|
kaf24@6730
|
1381 return vmx_set_cr0(value);
|
leendert@4652
|
1382 break;
|
iap10@3290
|
1383 default:
|
iap10@3290
|
1384 __vmx_bug(regs);
|
iap10@3290
|
1385 break;
|
iap10@3290
|
1386 }
|
leendert@4442
|
1387 return 1;
|
iap10@3290
|
1388 }
|
iap10@3290
|
1389
|
kaf24@4683
|
1390 static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
|
iap10@3290
|
1391 {
|
kaf24@6720
|
1392 u64 msr_content = 0;
|
kaf24@6720
|
1393
|
kaf24@3597
|
1394 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
|
kaf24@4654
|
1395 (unsigned long)regs->ecx, (unsigned long)regs->eax,
|
kaf24@4654
|
1396 (unsigned long)regs->edx);
|
arun@5554
|
1397 switch (regs->ecx) {
|
kaf24@6730
|
1398 case MSR_IA32_SYSENTER_CS:
|
kaf24@6730
|
1399 __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
|
kaf24@6730
|
1400 break;
|
kaf24@6730
|
1401 case MSR_IA32_SYSENTER_ESP:
|
kaf24@6730
|
1402 __vmread(GUEST_SYSENTER_ESP, &msr_content);
|
kaf24@6730
|
1403 break;
|
kaf24@6730
|
1404 case MSR_IA32_SYSENTER_EIP:
|
kaf24@6730
|
1405 __vmread(GUEST_SYSENTER_EIP, &msr_content);
|
kaf24@6730
|
1406 break;
|
kaf24@6730
|
1407 default:
|
kaf24@6730
|
1408 if(long_mode_do_msr_read(regs))
|
kaf24@6730
|
1409 return;
|
kaf24@6730
|
1410 rdmsr_user(regs->ecx, regs->eax, regs->edx);
|
kaf24@6730
|
1411 break;
|
arun@5554
|
1412 }
|
iap10@3290
|
1413
|
kaf24@6720
|
1414 regs->eax = msr_content & 0xFFFFFFFF;
|
kaf24@6720
|
1415 regs->edx = msr_content >> 32;
|
kaf24@6720
|
1416
|
kaf24@3597
|
1417 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
|
kaf24@3597
|
1418 "ecx=%lx, eax=%lx, edx=%lx",
|
kaf24@4654
|
1419 (unsigned long)regs->ecx, (unsigned long)regs->eax,
|
kaf24@4654
|
1420 (unsigned long)regs->edx);
|
iap10@3290
|
1421 }
|
iap10@3290
|
1422
|
arun@5554
|
1423 static inline void vmx_do_msr_write(struct cpu_user_regs *regs)
|
arun@5554
|
1424 {
|
kaf24@6720
|
1425 u64 msr_content;
|
kaf24@6720
|
1426
|
arun@5554
|
1427 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write: ecx=%lx, eax=%lx, edx=%lx",
|
arun@5554
|
1428 (unsigned long)regs->ecx, (unsigned long)regs->eax,
|
arun@5554
|
1429 (unsigned long)regs->edx);
|
kaf24@6720
|
1430
|
kaf24@6720
|
1431 msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
|
kaf24@6720
|
1432
|
arun@5554
|
1433 switch (regs->ecx) {
|
kaf24@6730
|
1434 case MSR_IA32_SYSENTER_CS:
|
kaf24@6730
|
1435 __vmwrite(GUEST_SYSENTER_CS, msr_content);
|
kaf24@6730
|
1436 break;
|
kaf24@6730
|
1437 case MSR_IA32_SYSENTER_ESP:
|
kaf24@6730
|
1438 __vmwrite(GUEST_SYSENTER_ESP, msr_content);
|
kaf24@6730
|
1439 break;
|
kaf24@6730
|
1440 case MSR_IA32_SYSENTER_EIP:
|
kaf24@6730
|
1441 __vmwrite(GUEST_SYSENTER_EIP, msr_content);
|
kaf24@6730
|
1442 break;
|
kaf24@6730
|
1443 default:
|
kaf24@6730
|
1444 long_mode_do_msr_write(regs);
|
kaf24@6730
|
1445 break;
|
arun@5554
|
1446 }
|
arun@5554
|
1447
|
arun@5554
|
1448 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: "
|
arun@5554
|
1449 "ecx=%lx, eax=%lx, edx=%lx",
|
arun@5554
|
1450 (unsigned long)regs->ecx, (unsigned long)regs->eax,
|
arun@5554
|
1451 (unsigned long)regs->edx);
|
arun@5554
|
1452 }
|
arun@5554
|
1453
|
iap10@3290
|
1454 /*
|
leendert@4442
|
1455 * Need to use this exit to reschedule
|
iap10@3290
|
1456 */
|
kaf24@4325
|
1457 static inline void vmx_vmexit_do_hlt(void)
|
iap10@3290
|
1458 {
|
iap10@3290
|
1459 #if VMX_DEBUG
|
iap10@3290
|
1460 unsigned long eip;
|
kaf24@5414
|
1461 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
1462 #endif
|
kaf24@4654
|
1463 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
|
kaf24@4325
|
1464 raise_softirq(SCHEDULE_SOFTIRQ);
|
iap10@3290
|
1465 }
|
iap10@3290
|
1466
|
kaf24@5788
|
1467 static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
|
kaf24@5788
|
1468 {
|
kaf24@5788
|
1469 unsigned int vector;
|
kaf24@5788
|
1470 int error;
|
kaf24@5788
|
1471
|
kaf24@5788
|
1472 asmlinkage void do_IRQ(struct cpu_user_regs *);
|
kaf24@5788
|
1473 void smp_apic_timer_interrupt(struct cpu_user_regs *);
|
kaf24@5788
|
1474 void timer_interrupt(int, void *, struct cpu_user_regs *);
|
kaf24@5788
|
1475 void smp_event_check_interrupt(void);
|
kaf24@5788
|
1476 void smp_invalidate_interrupt(void);
|
kaf24@5788
|
1477 void smp_call_function_interrupt(void);
|
kaf24@5788
|
1478 void smp_spurious_interrupt(struct cpu_user_regs *regs);
|
kaf24@5788
|
1479 void smp_error_interrupt(struct cpu_user_regs *regs);
|
kaf24@5788
|
1480
|
kaf24@5788
|
1481 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
|
kaf24@5788
|
1482 && !(vector & INTR_INFO_VALID_MASK))
|
kaf24@5788
|
1483 __vmx_bug(regs);
|
kaf24@5788
|
1484
|
kaf24@5788
|
1485 vector &= 0xff;
|
kaf24@5788
|
1486 local_irq_disable();
|
kaf24@5788
|
1487
|
kaf24@5788
|
1488 switch(vector) {
|
kaf24@6730
|
1489 case LOCAL_TIMER_VECTOR:
|
kaf24@6730
|
1490 smp_apic_timer_interrupt(regs);
|
kaf24@6730
|
1491 break;
|
kaf24@6730
|
1492 case EVENT_CHECK_VECTOR:
|
kaf24@6730
|
1493 smp_event_check_interrupt();
|
kaf24@6730
|
1494 break;
|
kaf24@6730
|
1495 case INVALIDATE_TLB_VECTOR:
|
kaf24@6730
|
1496 smp_invalidate_interrupt();
|
kaf24@6730
|
1497 break;
|
kaf24@6730
|
1498 case CALL_FUNCTION_VECTOR:
|
kaf24@6730
|
1499 smp_call_function_interrupt();
|
kaf24@6730
|
1500 break;
|
kaf24@6730
|
1501 case SPURIOUS_APIC_VECTOR:
|
kaf24@6730
|
1502 smp_spurious_interrupt(regs);
|
kaf24@6730
|
1503 break;
|
kaf24@6730
|
1504 case ERROR_APIC_VECTOR:
|
kaf24@6730
|
1505 smp_error_interrupt(regs);
|
kaf24@6730
|
1506 break;
|
kaf24@6730
|
1507 default:
|
kaf24@6730
|
1508 regs->entry_vector = vector;
|
kaf24@6730
|
1509 do_IRQ(regs);
|
kaf24@6730
|
1510 break;
|
kaf24@5788
|
1511 }
|
kaf24@5788
|
1512 }
|
kaf24@5788
|
1513
|
kaf24@4325
|
1514 static inline void vmx_vmexit_do_mwait(void)
|
iap10@3290
|
1515 {
|
iap10@3290
|
1516 #if VMX_DEBUG
|
iap10@3290
|
1517 unsigned long eip;
|
kaf24@5414
|
1518 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
1519 #endif
|
kaf24@4654
|
1520 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
|
kaf24@4325
|
1521 raise_softirq(SCHEDULE_SOFTIRQ);
|
iap10@3290
|
1522 }
|
iap10@3290
|
1523
|
iap10@3290
|
1524 #define BUF_SIZ 256
|
iap10@3290
|
1525 #define MAX_LINE 80
|
iap10@3290
|
1526 char print_buf[BUF_SIZ];
|
iap10@3290
|
1527 static int index;
|
iap10@3290
|
1528
|
kaf24@5289
|
1529 static void vmx_print_line(const char c, struct vcpu *d)
|
iap10@3290
|
1530 {
|
iap10@3290
|
1531
|
iap10@3290
|
1532 if (index == MAX_LINE || c == '\n') {
|
iap10@3290
|
1533 if (index == MAX_LINE) {
|
iap10@3290
|
1534 print_buf[index++] = c;
|
iap10@3290
|
1535 }
|
iap10@3290
|
1536 print_buf[index] = '\0';
|
kaf24@4877
|
1537 printk("(GUEST: %u) %s\n", d->domain->domain_id, (char *) &print_buf);
|
iap10@3290
|
1538 index = 0;
|
iap10@3290
|
1539 }
|
iap10@3290
|
1540 else
|
iap10@3290
|
1541 print_buf[index++] = c;
|
iap10@3290
|
1542 }
|
iap10@3290
|
1543
|
kaf24@4683
|
1544 void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
|
iap10@3809
|
1545 {
|
iap10@3809
|
1546 __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
|
kaf24@5414
|
1547 __vmread(GUEST_RSP, &ctxt->esp);
|
kaf24@5414
|
1548 __vmread(GUEST_RFLAGS, &ctxt->eflags);
|
iap10@3809
|
1549 __vmread(GUEST_CS_SELECTOR, &ctxt->cs);
|
kaf24@5414
|
1550 __vmread(GUEST_RIP, &ctxt->eip);
|
iap10@3809
|
1551
|
iap10@3809
|
1552 __vmread(GUEST_GS_SELECTOR, &ctxt->gs);
|
iap10@3809
|
1553 __vmread(GUEST_FS_SELECTOR, &ctxt->fs);
|
iap10@3809
|
1554 __vmread(GUEST_ES_SELECTOR, &ctxt->es);
|
iap10@3809
|
1555 __vmread(GUEST_DS_SELECTOR, &ctxt->ds);
|
iap10@3809
|
1556 }
|
iap10@3809
|
1557
|
iap10@3290
|
1558 #ifdef XEN_DEBUGGER
|
kaf24@4683
|
1559 void save_cpu_user_regs(struct cpu_user_regs *regs)
|
iap10@3290
|
1560 {
|
iap10@3290
|
1561 __vmread(GUEST_SS_SELECTOR, ®s->xss);
|
kaf24@5414
|
1562 __vmread(GUEST_RSP, ®s->esp);
|
kaf24@5414
|
1563 __vmread(GUEST_RFLAGS, ®s->eflags);
|
iap10@3290
|
1564 __vmread(GUEST_CS_SELECTOR, ®s->xcs);
|
kaf24@5414
|
1565 __vmread(GUEST_RIP, ®s->eip);
|
iap10@3290
|
1566
|
iap10@3290
|
1567 __vmread(GUEST_GS_SELECTOR, ®s->xgs);
|
iap10@3290
|
1568 __vmread(GUEST_FS_SELECTOR, ®s->xfs);
|
iap10@3290
|
1569 __vmread(GUEST_ES_SELECTOR, ®s->xes);
|
iap10@3290
|
1570 __vmread(GUEST_DS_SELECTOR, ®s->xds);
|
iap10@3290
|
1571 }
|
iap10@3290
|
1572
|
kaf24@4683
|
1573 void restore_cpu_user_regs(struct cpu_user_regs *regs)
|
iap10@3290
|
1574 {
|
iap10@3290
|
1575 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
|
kaf24@5414
|
1576 __vmwrite(GUEST_RSP, regs->esp);
|
kaf24@5414
|
1577 __vmwrite(GUEST_RFLAGS, regs->eflags);
|
iap10@3290
|
1578 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
|
kaf24@5414
|
1579 __vmwrite(GUEST_RIP, regs->eip);
|
iap10@3290
|
1580
|
iap10@3290
|
1581 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
|
iap10@3290
|
1582 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
|
iap10@3290
|
1583 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
|
iap10@3290
|
1584 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
|
iap10@3290
|
1585 }
|
iap10@3290
|
1586 #endif
|
iap10@3290
|
1587
|
kaf24@4683
|
1588 asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
|
iap10@3290
|
1589 {
|
iap10@3290
|
1590 unsigned int exit_reason, idtv_info_field;
|
iap10@3290
|
1591 unsigned long exit_qualification, eip, inst_len = 0;
|
kaf24@5289
|
1592 struct vcpu *v = current;
|
iap10@3290
|
1593 int error;
|
iap10@3290
|
1594
|
iap10@3290
|
1595 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
|
iap10@3290
|
1596 __vmx_bug(®s);
|
iap10@3290
|
1597
|
iap10@3799
|
1598 perfc_incra(vmexits, exit_reason);
|
iap10@3799
|
1599
|
iap10@3290
|
1600 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
|
iap10@3290
|
1601 if (idtv_info_field & INTR_INFO_VALID_MASK) {
|
kaf24@6730
|
1602 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
|
kaf24@6592
|
1603
|
kaf24@6730
|
1604 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
|
kaf24@6730
|
1605 if (inst_len >= 1 && inst_len <= 15)
|
kaf24@6730
|
1606 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
|
iap10@3290
|
1607
|
kaf24@6730
|
1608 if (idtv_info_field & 0x800) { /* valid error code */
|
kaf24@6730
|
1609 unsigned long error_code;
|
kaf24@6730
|
1610 __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
|
kaf24@6730
|
1611 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
|
kaf24@6730
|
1612 }
|
kaf24@6592
|
1613
|
leendert@5590
|
1614 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
|
iap10@3290
|
1615 }
|
iap10@3290
|
1616
|
iap10@3290
|
1617 /* don't bother H/W interrutps */
|
iap10@3290
|
1618 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
|
iap10@3290
|
1619 exit_reason != EXIT_REASON_VMCALL &&
|
iap10@3290
|
1620 exit_reason != EXIT_REASON_IO_INSTRUCTION)
|
maf46@3855
|
1621 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
|
iap10@3290
|
1622
|
iap10@3290
|
1623 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
|
arun@5186
|
1624 printk("Failed vm entry\n");
|
kaf24@4325
|
1625 domain_crash_synchronous();
|
iap10@3290
|
1626 return;
|
iap10@3290
|
1627 }
|
iap10@3290
|
1628
|
kaf24@5414
|
1629 __vmread(GUEST_RIP, &eip);
|
kaf24@5289
|
1630 TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
|
adsharma@6535
|
1631 TRACE_VMEXIT(0,exit_reason);
|
iap10@3807
|
1632
|
iap10@3290
|
1633 switch (exit_reason) {
|
iap10@3290
|
1634 case EXIT_REASON_EXCEPTION_NMI:
|
iap10@3290
|
1635 {
|
iap10@3290
|
1636 /*
|
iap10@3290
|
1637 * We don't set the software-interrupt exiting (INT n).
|
iap10@3290
|
1638 * (1) We can get an exception (e.g. #PG) in the guest, or
|
iap10@3290
|
1639 * (2) NMI
|
iap10@3290
|
1640 */
|
iap10@3290
|
1641 int error;
|
iap10@3290
|
1642 unsigned int vector;
|
iap10@3290
|
1643 unsigned long va;
|
iap10@3290
|
1644
|
iap10@3290
|
1645 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
|
arun@5137
|
1646 || !(vector & INTR_INFO_VALID_MASK))
|
iap10@3290
|
1647 __vmx_bug(®s);
|
iap10@3290
|
1648 vector &= 0xff;
|
iap10@3290
|
1649
|
kaf24@6730
|
1650 TRACE_VMEXIT(1,vector);
|
iap10@3799
|
1651 perfc_incra(cause_vector, vector);
|
iap10@3799
|
1652
|
kaf24@5289
|
1653 TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
|
iap10@3290
|
1654 switch (vector) {
|
iap10@3290
|
1655 #ifdef XEN_DEBUGGER
|
kaf24@3867
|
1656 case TRAP_debug:
|
iap10@3290
|
1657 {
|
kaf24@4683
|
1658 save_cpu_user_regs(®s);
|
iap10@3290
|
1659 pdb_handle_exception(1, ®s, 1);
|
kaf24@4683
|
1660 restore_cpu_user_regs(®s);
|
iap10@3290
|
1661 break;
|
iap10@3290
|
1662 }
|
kaf24@3867
|
1663 case TRAP_int3:
|
iap10@3290
|
1664 {
|
kaf24@4683
|
1665 save_cpu_user_regs(®s);
|
iap10@3290
|
1666 pdb_handle_exception(3, ®s, 1);
|
kaf24@4683
|
1667 restore_cpu_user_regs(®s);
|
iap10@3290
|
1668 break;
|
iap10@3290
|
1669 }
|
arun@4999
|
1670 #else
|
arun@4999
|
1671 case TRAP_debug:
|
arun@4999
|
1672 {
|
arun@4999
|
1673 void store_cpu_user_regs(struct cpu_user_regs *regs);
|
arun@4999
|
1674 long do_sched_op(unsigned long op);
|
arun@4999
|
1675
|
arun@4999
|
1676
|
arun@4999
|
1677 store_cpu_user_regs(®s);
|
arun@4999
|
1678 __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
|
arun@4999
|
1679
|
arun@4999
|
1680 set_bit(_VCPUF_ctrl_pause, ¤t->vcpu_flags);
|
arun@4999
|
1681 do_sched_op(SCHEDOP_yield);
|
arun@4999
|
1682
|
arun@4999
|
1683 break;
|
arun@4999
|
1684 }
|
iap10@3290
|
1685 #endif
|
cl349@4856
|
1686 case TRAP_no_device:
|
cl349@4856
|
1687 {
|
cl349@4856
|
1688 vmx_do_no_device_fault();
|
cl349@4856
|
1689 break;
|
cl349@4856
|
1690 }
|
kaf24@3867
|
1691 case TRAP_page_fault:
|
iap10@3290
|
1692 {
|
iap10@3290
|
1693 __vmread(EXIT_QUALIFICATION, &va);
|
maf46@3877
|
1694 __vmread(VM_EXIT_INTR_ERROR_CODE, ®s.error_code);
|
adsharma@6535
|
1695
|
kaf24@6730
|
1696 TRACE_VMEXIT(3,regs.error_code);
|
kaf24@6730
|
1697 TRACE_VMEXIT(4,va);
|
adsharma@6535
|
1698
|
iap10@3290
|
1699 VMX_DBG_LOG(DBG_LEVEL_VMMU,
|
kaf24@4654
|
1700 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
|
kaf24@4654
|
1701 (unsigned long)regs.eax, (unsigned long)regs.ebx,
|
kaf24@4654
|
1702 (unsigned long)regs.ecx, (unsigned long)regs.edx,
|
kaf24@4654
|
1703 (unsigned long)regs.esi, (unsigned long)regs.edi);
|
arun@5608
|
1704 v->domain->arch.vmx_platform.mpci.inst_decoder_regs = ®s;
|
iap10@3290
|
1705
|
maf46@3877
|
1706 if (!(error = vmx_do_page_fault(va, ®s))) {
|
iap10@3290
|
1707 /*
|
iap10@3290
|
1708 * Inject #PG using Interruption-Information Fields
|
iap10@3290
|
1709 */
|
kaf24@5646
|
1710 vmx_inject_exception(v, TRAP_page_fault, regs.error_code);
|
kaf24@5289
|
1711 v->arch.arch_vmx.cpu_cr2 = va;
|
kaf24@5289
|
1712 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
|
iap10@3290
|
1713 }
|
iap10@3290
|
1714 break;
|
iap10@3290
|
1715 }
|
kaf24@3867
|
1716 case TRAP_nmi:
|
kaf24@3867
|
1717 do_nmi(®s, 0);
|
kaf24@3867
|
1718 break;
|
iap10@3290
|
1719 default:
|
kaf24@5646
|
1720 vmx_reflect_exception(v);
|
iap10@3290
|
1721 break;
|
iap10@3290
|
1722 }
|
iap10@3290
|
1723 break;
|
iap10@3290
|
1724 }
|
iap10@3290
|
1725 case EXIT_REASON_EXTERNAL_INTERRUPT:
|
kaf24@5788
|
1726 vmx_vmexit_do_extint(®s);
|
iap10@3290
|
1727 break;
|
iap10@3290
|
1728 case EXIT_REASON_PENDING_INTERRUPT:
|
iap10@3290
|
1729 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
|
kaf24@6730
|
1730 MONITOR_CPU_BASED_EXEC_CONTROLS);
|
iap10@3290
|
1731 break;
|
iap10@3290
|
1732 case EXIT_REASON_TASK_SWITCH:
|
iap10@3290
|
1733 __vmx_bug(®s);
|
iap10@3290
|
1734 break;
|
iap10@3290
|
1735 case EXIT_REASON_CPUID:
|
iap10@3290
|
1736 __get_instruction_length(inst_len);
|
iap10@3290
|
1737 vmx_vmexit_do_cpuid(regs.eax, ®s);
|
iap10@3290
|
1738 __update_guest_eip(inst_len);
|
iap10@3290
|
1739 break;
|
iap10@3290
|
1740 case EXIT_REASON_HLT:
|
iap10@3290
|
1741 __get_instruction_length(inst_len);
|
iap10@3290
|
1742 __update_guest_eip(inst_len);
|
iap10@3290
|
1743 vmx_vmexit_do_hlt();
|
iap10@3290
|
1744 break;
|
iap10@3290
|
1745 case EXIT_REASON_INVLPG:
|
iap10@3290
|
1746 {
|
iap10@3290
|
1747 unsigned long va;
|
iap10@3290
|
1748
|
iap10@3290
|
1749 __vmread(EXIT_QUALIFICATION, &va);
|
iap10@3290
|
1750 vmx_vmexit_do_invlpg(va);
|
iap10@3290
|
1751 __get_instruction_length(inst_len);
|
iap10@3290
|
1752 __update_guest_eip(inst_len);
|
iap10@3290
|
1753 break;
|
iap10@3290
|
1754 }
|
iap10@3290
|
1755 case EXIT_REASON_VMCALL:
|
iap10@3290
|
1756 __get_instruction_length(inst_len);
|
kaf24@5414
|
1757 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
1758 __vmread(EXIT_QUALIFICATION, &exit_qualification);
|
iap10@3290
|
1759
|
kaf24@5289
|
1760 vmx_print_line(regs.eax, v); /* provides the current domain */
|
iap10@3290
|
1761 __update_guest_eip(inst_len);
|
iap10@3290
|
1762 break;
|
iap10@3290
|
1763 case EXIT_REASON_CR_ACCESS:
|
iap10@3290
|
1764 {
|
kaf24@5414
|
1765 __vmread(GUEST_RIP, &eip);
|
iap10@3290
|
1766 __get_instruction_length(inst_len);
|
iap10@3290
|
1767 __vmread(EXIT_QUALIFICATION, &exit_qualification);
|
iap10@3290
|
1768
|
maf46@3855
|
1769 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
|
kaf24@6730
|
1770 eip, inst_len, exit_qualification);
|
leendert@4442
|
1771 if (vmx_cr_access(exit_qualification, ®s))
|
kaf24@6730
|
1772 __update_guest_eip(inst_len);
|
kaf24@6730
|
1773 TRACE_VMEXIT(3,regs.error_code);
|
adsharma@6535
|
1774 TRACE_VMEXIT(4,exit_qualification);
|
iap10@3290
|
1775 break;
|
iap10@3290
|
1776 }
|
iap10@3290
|
1777 case EXIT_REASON_DR_ACCESS:
|
iap10@3290
|
1778 __vmread(EXIT_QUALIFICATION, &exit_qualification);
|
iap10@3290
|
1779 vmx_dr_access(exit_qualification, ®s);
|
iap10@3290
|
1780 __get_instruction_length(inst_len);
|
iap10@3290
|
1781 __update_guest_eip(inst_len);
|
iap10@3290
|
1782 break;
|
iap10@3290
|
1783 case EXIT_REASON_IO_INSTRUCTION:
|
iap10@3290
|
1784 __vmread(EXIT_QUALIFICATION, &exit_qualification);
|
iap10@3290
|
1785 __get_instruction_length(inst_len);
|
iap10@3290
|
1786 vmx_io_instruction(®s, exit_qualification, inst_len);
|
adsharma@6535
|
1787 TRACE_VMEXIT(4,exit_qualification);
|
iap10@3290
|
1788 break;
|
iap10@3290
|
1789 case EXIT_REASON_MSR_READ:
|
iap10@3290
|
1790 __get_instruction_length(inst_len);
|
iap10@3290
|
1791 vmx_do_msr_read(®s);
|
iap10@3290
|
1792 __update_guest_eip(inst_len);
|
iap10@3290
|
1793 break;
|
iap10@3290
|
1794 case EXIT_REASON_MSR_WRITE:
|
kaf24@5414
|
1795 __vmread(GUEST_RIP, &eip);
|
arun@5554
|
1796 vmx_do_msr_write(®s);
|
iap10@3290
|
1797 __get_instruction_length(inst_len);
|
iap10@3290
|
1798 __update_guest_eip(inst_len);
|
iap10@3290
|
1799 break;
|
iap10@3290
|
1800 case EXIT_REASON_MWAIT_INSTRUCTION:
|
iap10@3290
|
1801 __get_instruction_length(inst_len);
|
iap10@3290
|
1802 __update_guest_eip(inst_len);
|
iap10@3290
|
1803 vmx_vmexit_do_mwait();
|
iap10@3290
|
1804 break;
|
iap10@3290
|
1805 default:
|
iap10@3290
|
1806 __vmx_bug(®s); /* should not happen */
|
iap10@3290
|
1807 }
|
iap10@3290
|
1808 }
|
iap10@3290
|
1809
|
iap10@3290
|
1810 asmlinkage void load_cr2(void)
|
iap10@3290
|
1811 {
|
kaf24@5289
|
1812 struct vcpu *d = current;
|
iap10@3290
|
1813
|
iap10@3290
|
1814 local_irq_disable();
|
arun@4588
|
1815 #ifdef __i386__
|
kaf24@3677
|
1816 asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
|
arun@4588
|
1817 #else
|
arun@4588
|
1818 asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
|
arun@4588
|
1819 #endif
|
iap10@3290
|
1820 }
|
mafetter@3717
|
1821
|
adsharma@6535
|
1822 #ifdef TRACE_BUFFER
|
adsharma@6535
|
1823 asmlinkage void trace_vmentry (void)
|
adsharma@6535
|
1824 {
|
adsharma@6535
|
1825 TRACE_5D(TRC_VMENTRY,trace_values[current->processor][0],
|
kaf24@6730
|
1826 trace_values[current->processor][1],trace_values[current->processor][2],
|
kaf24@6730
|
1827 trace_values[current->processor][3],trace_values[current->processor][4]);
|
adsharma@6535
|
1828 TRACE_VMEXIT(0,9);
|
adsharma@6535
|
1829 TRACE_VMEXIT(1,9);
|
adsharma@6535
|
1830 TRACE_VMEXIT(2,9);
|
adsharma@6535
|
1831 TRACE_VMEXIT(3,9);
|
adsharma@6535
|
1832 TRACE_VMEXIT(4,9);
|
adsharma@6535
|
1833 return;
|
adsharma@6535
|
1834 }
|
adsharma@6535
|
1835 asmlinkage void trace_vmexit (void)
|
adsharma@6535
|
1836 {
|
adsharma@6535
|
1837 TRACE_3D(TRC_VMEXIT,0,0,0);
|
adsharma@6535
|
1838 return;
|
adsharma@6535
|
1839 }
|
adsharma@6535
|
1840 #endif
|
mafetter@3717
|
1841 #endif /* CONFIG_VMX */
|
kaf24@3914
|
1842
|
kaf24@3914
|
1843 /*
|
kaf24@3914
|
1844 * Local variables:
|
kaf24@3914
|
1845 * mode: C
|
kaf24@3914
|
1846 * c-set-style: "BSD"
|
kaf24@3914
|
1847 * c-basic-offset: 4
|
kaf24@3914
|
1848 * tab-width: 4
|
kaf24@3914
|
1849 * indent-tabs-mode: nil
|
kaf24@3988
|
1850 * End:
|
kaf24@3914
|
1851 */
|