debuggers.hg

view xen/arch/x86/vmx.c @ 6641:f27205ea60ef

merge?
author cl349@firebug.cl.cam.ac.uk
date Sat Sep 03 16:58:50 2005 +0000 (2005-09-03)
parents 291e816acbf4 20140d3fbf83
children 29808fef9148 b6c98fe62e1a
line source
1 /*
2 * vmx.c: handling VMX architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/lib.h>
23 #include <xen/trace.h>
24 #include <xen/sched.h>
25 #include <xen/irq.h>
26 #include <xen/softirq.h>
27 #include <xen/domain_page.h>
28 #include <asm/current.h>
29 #include <asm/io.h>
30 #include <asm/shadow.h>
31 #include <asm/regs.h>
32 #include <asm/cpufeature.h>
33 #include <asm/processor.h>
34 #include <asm/types.h>
35 #include <asm/msr.h>
36 #include <asm/spinlock.h>
37 #include <asm/vmx.h>
38 #include <asm/vmx_vmcs.h>
39 #include <asm/vmx_intercept.h>
40 #include <asm/shadow.h>
41 #if CONFIG_PAGING_LEVELS >= 3
42 #include <asm/shadow_64.h>
43 #endif
45 #include <public/io/ioreq.h>
47 #ifdef CONFIG_VMX
49 int vmcs_size;
50 unsigned int opt_vmx_debug_level = 0;
51 integer_param("vmx_debug", opt_vmx_debug_level);
53 #ifdef TRACE_BUFFER
54 static unsigned long trace_values[NR_CPUS][4];
55 #define TRACE_VMEXIT(index,value) trace_values[current->processor][index]=value
56 #else
57 #define TRACE_VMEXIT(index,value) ((void)0)
58 #endif
60 #ifdef __x86_64__
61 static struct msr_state percpu_msr[NR_CPUS];
63 static u32 msr_data_index[VMX_MSR_COUNT] =
64 {
65 MSR_LSTAR, MSR_STAR, MSR_CSTAR,
66 MSR_SYSCALL_MASK, MSR_EFER,
67 };
69 /*
70 * To avoid MSR save/restore at every VM exit/entry time, we restore
71 * the x86_64 specific MSRs at domain switch time. Since those MSRs are
72 * are not modified once set for generic domains, we don't save them,
73 * but simply reset them to the values set at percpu_traps_init().
74 */
75 void vmx_load_msrs(struct vcpu *n)
76 {
77 struct msr_state *host_state;
78 host_state = &percpu_msr[smp_processor_id()];
80 while (host_state->flags){
81 int i;
83 i = find_first_set_bit(host_state->flags);
84 wrmsrl(msr_data_index[i], host_state->msr_items[i]);
85 clear_bit(i, &host_state->flags);
86 }
87 }
89 static void vmx_save_init_msrs(void)
90 {
91 struct msr_state *host_state;
92 host_state = &percpu_msr[smp_processor_id()];
93 int i;
95 for (i = 0; i < VMX_MSR_COUNT; i++)
96 rdmsrl(msr_data_index[i], host_state->msr_items[i]);
97 }
99 #define CASE_READ_MSR(address) \
100 case MSR_ ## address: \
101 msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
102 break
104 #define CASE_WRITE_MSR(address) \
105 case MSR_ ## address: \
106 { \
107 msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content; \
108 if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) { \
109 set_bit(VMX_INDEX_MSR_ ## address, &msr->flags); \
110 } \
111 wrmsrl(MSR_ ## address, msr_content); \
112 set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags); \
113 } \
114 break
116 #define IS_CANO_ADDRESS(add) 1
117 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
118 {
119 u64 msr_content = 0;
120 struct vcpu *vc = current;
121 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
122 switch(regs->ecx){
123 case MSR_EFER:
124 msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
125 VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content);
126 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
127 &vc->arch.arch_vmx.cpu_state))
128 msr_content |= 1 << _EFER_LME;
130 if (VMX_LONG_GUEST(vc))
131 msr_content |= 1 << _EFER_LMA;
132 break;
133 case MSR_FS_BASE:
134 if (!(VMX_LONG_GUEST(vc)))
135 /* XXX should it be GP fault */
136 domain_crash();
137 __vmread(GUEST_FS_BASE, &msr_content);
138 break;
139 case MSR_GS_BASE:
140 if (!(VMX_LONG_GUEST(vc)))
141 domain_crash();
142 __vmread(GUEST_GS_BASE, &msr_content);
143 break;
144 case MSR_SHADOW_GS_BASE:
145 msr_content = msr->shadow_gs;
146 break;
148 CASE_READ_MSR(STAR);
149 CASE_READ_MSR(LSTAR);
150 CASE_READ_MSR(CSTAR);
151 CASE_READ_MSR(SYSCALL_MASK);
152 default:
153 return 0;
154 }
155 VMX_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", msr_content);
156 regs->eax = msr_content & 0xffffffff;
157 regs->edx = msr_content >> 32;
158 return 1;
159 }
161 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
162 {
163 u64 msr_content = regs->eax | ((u64)regs->edx << 32);
164 struct vcpu *vc = current;
165 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
166 struct msr_state * host_state =
167 &percpu_msr[smp_processor_id()];
169 VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n",
170 regs->ecx, msr_content);
172 switch (regs->ecx){
173 case MSR_EFER:
174 if ((msr_content & EFER_LME) ^
175 test_bit(VMX_CPU_STATE_LME_ENABLED,
176 &vc->arch.arch_vmx.cpu_state)){
177 if (test_bit(VMX_CPU_STATE_PG_ENABLED,
178 &vc->arch.arch_vmx.cpu_state) ||
179 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
180 &vc->arch.arch_vmx.cpu_state)){
181 vmx_inject_exception(vc, TRAP_gp_fault, 0);
182 }
183 }
184 if (msr_content & EFER_LME)
185 set_bit(VMX_CPU_STATE_LME_ENABLED,
186 &vc->arch.arch_vmx.cpu_state);
187 /* No update for LME/LMA since it have no effect */
188 msr->msr_items[VMX_INDEX_MSR_EFER] =
189 msr_content;
190 if (msr_content & ~(EFER_LME | EFER_LMA)){
191 msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
192 if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){
193 rdmsrl(MSR_EFER,
194 host_state->msr_items[VMX_INDEX_MSR_EFER]);
195 set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
196 set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
197 wrmsrl(MSR_EFER, msr_content);
198 }
199 }
200 break;
202 case MSR_FS_BASE:
203 case MSR_GS_BASE:
204 if (!(VMX_LONG_GUEST(vc)))
205 domain_crash();
206 if (!IS_CANO_ADDRESS(msr_content)){
207 VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
208 vmx_inject_exception(vc, TRAP_gp_fault, 0);
209 }
210 if (regs->ecx == MSR_FS_BASE)
211 __vmwrite(GUEST_FS_BASE, msr_content);
212 else
213 __vmwrite(GUEST_GS_BASE, msr_content);
214 break;
216 case MSR_SHADOW_GS_BASE:
217 if (!(VMX_LONG_GUEST(vc)))
218 domain_crash();
219 vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
220 wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
221 break;
223 CASE_WRITE_MSR(STAR);
224 CASE_WRITE_MSR(LSTAR);
225 CASE_WRITE_MSR(CSTAR);
226 CASE_WRITE_MSR(SYSCALL_MASK);
227 default:
228 return 0;
229 }
230 return 1;
231 }
233 void
234 vmx_restore_msrs(struct vcpu *d)
235 {
236 int i = 0;
237 struct msr_state *guest_state;
238 struct msr_state *host_state;
239 unsigned long guest_flags ;
241 guest_state = &d->arch.arch_vmx.msr_content;;
242 host_state = &percpu_msr[smp_processor_id()];
244 wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
245 guest_flags = guest_state->flags;
246 if (!guest_flags)
247 return;
249 while (guest_flags){
250 i = find_first_set_bit(guest_flags);
252 VMX_DBG_LOG(DBG_LEVEL_2,
253 "restore guest's index %d msr %lx with %lx\n",
254 i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]);
255 set_bit(i, &host_state->flags);
256 wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
257 clear_bit(i, &guest_flags);
258 }
259 }
261 #else /* __i386__ */
262 #define vmx_save_init_msrs() ((void)0)
264 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs){
265 return 0;
266 }
267 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs){
268 return 0;
269 }
270 #endif
272 extern long evtchn_send(int lport);
273 extern long do_block(void);
274 void do_nmi(struct cpu_user_regs *, unsigned long);
276 static int check_vmx_controls(ctrls, msr)
277 {
278 u32 vmx_msr_low, vmx_msr_high;
280 rdmsr(msr, vmx_msr_low, vmx_msr_high);
281 if (ctrls < vmx_msr_low || ctrls > vmx_msr_high) {
282 printk("Insufficient VMX capability 0x%x, "
283 "msr=0x%x,low=0x%8x,high=0x%x\n",
284 ctrls, msr, vmx_msr_low, vmx_msr_high);
285 return 0;
286 }
287 return 1;
288 }
290 int start_vmx(void)
291 {
292 struct vmcs_struct *vmcs;
293 u32 ecx;
294 u32 eax, edx;
295 u64 phys_vmcs; /* debugging */
297 /*
298 * Xen does not fill x86_capability words except 0.
299 */
300 ecx = cpuid_ecx(1);
301 boot_cpu_data.x86_capability[4] = ecx;
303 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
304 return 0;
306 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
308 if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
309 if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
310 printk("VMX disabled by Feature Control MSR.\n");
311 return 0;
312 }
313 }
314 else {
315 wrmsr(IA32_FEATURE_CONTROL_MSR,
316 IA32_FEATURE_CONTROL_MSR_LOCK |
317 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
318 }
320 if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
321 MSR_IA32_VMX_PINBASED_CTLS_MSR))
322 return 0;
323 if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
324 MSR_IA32_VMX_PROCBASED_CTLS_MSR))
325 return 0;
326 if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
327 MSR_IA32_VMX_EXIT_CTLS_MSR))
328 return 0;
329 if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
330 MSR_IA32_VMX_ENTRY_CTLS_MSR))
331 return 0;
333 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
335 if (!(vmcs = alloc_vmcs())) {
336 printk("Failed to allocate VMCS\n");
337 return 0;
338 }
340 phys_vmcs = (u64) virt_to_phys(vmcs);
342 if (!(__vmxon(phys_vmcs))) {
343 printk("VMXON is done\n");
344 }
346 vmx_save_init_msrs();
348 return 1;
349 }
351 void stop_vmx(void)
352 {
353 if (read_cr4() & X86_CR4_VMXE)
354 __vmxoff();
355 }
357 /*
358 * Not all cases receive valid value in the VM-exit instruction length field.
359 */
360 #define __get_instruction_length(len) \
361 __vmread(VM_EXIT_INSTRUCTION_LEN, &(len)); \
362 if ((len) < 1 || (len) > 15) \
363 __vmx_bug(&regs);
365 static void inline __update_guest_eip(unsigned long inst_len)
366 {
367 unsigned long current_eip;
369 __vmread(GUEST_RIP, &current_eip);
370 __vmwrite(GUEST_RIP, current_eip + inst_len);
371 }
374 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
375 {
376 unsigned long eip;
377 unsigned long gpa; /* FIXME: PAE */
378 int result;
380 #if VMX_DEBUG
381 {
382 __vmread(GUEST_RIP, &eip);
383 VMX_DBG_LOG(DBG_LEVEL_VMMU,
384 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
385 va, eip, (unsigned long)regs->error_code);
386 }
387 #endif
389 if (!vmx_paging_enabled(current)){
390 handle_mmio(va, va);
391 TRACE_VMEXIT (2,2);
392 return 1;
393 }
394 gpa = gva_to_gpa(va);
396 /* Use 1:1 page table to identify MMIO address space */
397 if ( mmio_space(gpa) ){
398 if (gpa >= 0xFEE00000) { /* workaround for local APIC */
399 u32 inst_len;
400 __vmread(VM_EXIT_INSTRUCTION_LEN, &(inst_len));
401 __update_guest_eip(inst_len);
402 return 1;
403 }
404 TRACE_VMEXIT (2,2);
405 handle_mmio(va, gpa);
406 return 1;
407 }
409 result = shadow_fault(va, regs);
410 TRACE_VMEXIT (2,result);
411 #if 0
412 if ( !result )
413 {
414 __vmread(GUEST_RIP, &eip);
415 printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
416 }
417 #endif
419 return result;
420 }
422 static void vmx_do_no_device_fault(void)
423 {
424 unsigned long cr0;
426 clts();
427 setup_fpu(current);
428 __vmread(CR0_READ_SHADOW, &cr0);
429 if (!(cr0 & X86_CR0_TS)) {
430 __vmread(GUEST_CR0, &cr0);
431 cr0 &= ~X86_CR0_TS;
432 __vmwrite(GUEST_CR0, cr0);
433 }
434 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
435 }
438 static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs)
439 {
440 unsigned int eax, ebx, ecx, edx;
441 unsigned long eip;
443 __vmread(GUEST_RIP, &eip);
445 VMX_DBG_LOG(DBG_LEVEL_1,
446 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
447 " (esi) %lx, (edi) %lx",
448 (unsigned long)regs->eax, (unsigned long)regs->ebx,
449 (unsigned long)regs->ecx, (unsigned long)regs->edx,
450 (unsigned long)regs->esi, (unsigned long)regs->edi);
452 cpuid(input, &eax, &ebx, &ecx, &edx);
454 if (input == 1) {
455 #ifdef __i386__
456 clear_bit(X86_FEATURE_PSE, &edx);
457 clear_bit(X86_FEATURE_PAE, &edx);
458 clear_bit(X86_FEATURE_PSE36, &edx);
459 #else
460 struct vcpu *d = current;
461 if (d->domain->arch.ops->guest_paging_levels == PAGING_L2)
462 {
463 clear_bit(X86_FEATURE_PSE, &edx);
464 clear_bit(X86_FEATURE_PAE, &edx);
465 clear_bit(X86_FEATURE_PSE36, &edx);
466 }
467 #endif
469 }
471 regs->eax = (unsigned long) eax;
472 regs->ebx = (unsigned long) ebx;
473 regs->ecx = (unsigned long) ecx;
474 regs->edx = (unsigned long) edx;
476 VMX_DBG_LOG(DBG_LEVEL_1,
477 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
478 eip, input, eax, ebx, ecx, edx);
480 }
482 #define CASE_GET_REG_P(REG, reg) \
483 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
485 static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
486 {
487 unsigned int reg;
488 unsigned long *reg_p = 0;
489 struct vcpu *v = current;
490 unsigned long eip;
492 __vmread(GUEST_RIP, &eip);
494 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
496 VMX_DBG_LOG(DBG_LEVEL_1,
497 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
498 eip, reg, exit_qualification);
500 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
501 CASE_GET_REG_P(EAX, eax);
502 CASE_GET_REG_P(ECX, ecx);
503 CASE_GET_REG_P(EDX, edx);
504 CASE_GET_REG_P(EBX, ebx);
505 CASE_GET_REG_P(EBP, ebp);
506 CASE_GET_REG_P(ESI, esi);
507 CASE_GET_REG_P(EDI, edi);
508 case REG_ESP:
509 break;
510 default:
511 __vmx_bug(regs);
512 }
514 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
515 case TYPE_MOV_TO_DR:
516 /* don't need to check the range */
517 if (reg != REG_ESP)
518 v->arch.guest_context.debugreg[reg] = *reg_p;
519 else {
520 unsigned long value;
521 __vmread(GUEST_RSP, &value);
522 v->arch.guest_context.debugreg[reg] = value;
523 }
524 break;
525 case TYPE_MOV_FROM_DR:
526 if (reg != REG_ESP)
527 *reg_p = v->arch.guest_context.debugreg[reg];
528 else {
529 __vmwrite(GUEST_RSP, v->arch.guest_context.debugreg[reg]);
530 }
531 break;
532 }
533 }
535 /*
536 * Invalidate the TLB for va. Invalidate the shadow page corresponding
537 * the address va.
538 */
539 static void vmx_vmexit_do_invlpg(unsigned long va)
540 {
541 unsigned long eip;
542 struct vcpu *v = current;
544 __vmread(GUEST_RIP, &eip);
546 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
547 eip, va);
549 /*
550 * We do the safest things first, then try to update the shadow
551 * copying from guest
552 */
553 shadow_invlpg(v, va);
554 }
556 static int check_for_null_selector(unsigned long eip)
557 {
558 unsigned char inst[MAX_INST_LEN];
559 unsigned long sel;
560 int i, inst_len;
561 int inst_copy_from_guest(unsigned char *, unsigned long, int);
563 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
564 memset(inst, 0, MAX_INST_LEN);
565 if (inst_copy_from_guest(inst, eip, inst_len) != inst_len) {
566 printf("check_for_null_selector: get guest instruction failed\n");
567 domain_crash_synchronous();
568 }
570 for (i = 0; i < inst_len; i++) {
571 switch (inst[i]) {
572 case 0xf3: /* REPZ */
573 case 0xf2: /* REPNZ */
574 case 0xf0: /* LOCK */
575 case 0x66: /* data32 */
576 case 0x67: /* addr32 */
577 continue;
578 case 0x2e: /* CS */
579 __vmread(GUEST_CS_SELECTOR, &sel);
580 break;
581 case 0x36: /* SS */
582 __vmread(GUEST_SS_SELECTOR, &sel);
583 break;
584 case 0x26: /* ES */
585 __vmread(GUEST_ES_SELECTOR, &sel);
586 break;
587 case 0x64: /* FS */
588 __vmread(GUEST_FS_SELECTOR, &sel);
589 break;
590 case 0x65: /* GS */
591 __vmread(GUEST_GS_SELECTOR, &sel);
592 break;
593 case 0x3e: /* DS */
594 /* FALLTHROUGH */
595 default:
596 /* DS is the default */
597 __vmread(GUEST_DS_SELECTOR, &sel);
598 }
599 return sel == 0 ? 1 : 0;
600 }
602 return 0;
603 }
605 void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
606 unsigned long count, int size, long value, int dir, int pvalid)
607 {
608 struct vcpu *v = current;
609 vcpu_iodata_t *vio;
610 ioreq_t *p;
612 vio = get_vio(v->domain, v->vcpu_id);
613 if (vio == NULL) {
614 printk("bad shared page: %lx\n", (unsigned long) vio);
615 domain_crash_synchronous();
616 }
618 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
619 printf("VMX I/O has not yet completed\n");
620 domain_crash_synchronous();
621 }
622 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
624 p = &vio->vp_ioreq;
625 p->dir = dir;
626 p->pdata_valid = pvalid;
628 p->type = IOREQ_TYPE_PIO;
629 p->size = size;
630 p->addr = port;
631 p->count = count;
632 p->df = regs->eflags & EF_DF ? 1 : 0;
634 if (pvalid) {
635 if (vmx_paging_enabled(current))
636 p->u.pdata = (void *) gva_to_gpa(value);
637 else
638 p->u.pdata = (void *) value; /* guest VA == guest PA */
639 } else
640 p->u.data = value;
642 p->state = STATE_IOREQ_READY;
644 if (vmx_portio_intercept(p)) {
645 /* no blocking & no evtchn notification */
646 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
647 return;
648 }
650 evtchn_send(iopacket_port(v->domain));
651 vmx_wait_io();
652 }
654 static void vmx_io_instruction(struct cpu_user_regs *regs,
655 unsigned long exit_qualification, unsigned long inst_len)
656 {
657 struct mi_per_cpu_info *mpcip;
658 unsigned long eip, cs, eflags;
659 unsigned long port, size, dir;
660 int vm86;
662 mpcip = &current->domain->arch.vmx_platform.mpci;
663 mpcip->instr = INSTR_PIO;
664 mpcip->flags = 0;
666 __vmread(GUEST_RIP, &eip);
667 __vmread(GUEST_CS_SELECTOR, &cs);
668 __vmread(GUEST_RFLAGS, &eflags);
669 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
671 VMX_DBG_LOG(DBG_LEVEL_1,
672 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
673 "exit_qualification = %lx",
674 vm86, cs, eip, exit_qualification);
676 if (test_bit(6, &exit_qualification))
677 port = (exit_qualification >> 16) & 0xFFFF;
678 else
679 port = regs->edx & 0xffff;
680 TRACE_VMEXIT(2, port);
681 size = (exit_qualification & 7) + 1;
682 dir = test_bit(3, &exit_qualification); /* direction */
684 if (test_bit(4, &exit_qualification)) { /* string instruction */
685 unsigned long addr, count = 1;
686 int sign = regs->eflags & EF_DF ? -1 : 1;
688 __vmread(GUEST_LINEAR_ADDRESS, &addr);
690 /*
691 * In protected mode, guest linear address is invalid if the
692 * selector is null.
693 */
694 if (!vm86 && check_for_null_selector(eip))
695 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
697 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
698 mpcip->flags |= REPZ;
699 count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
700 }
702 /*
703 * Handle string pio instructions that cross pages or that
704 * are unaligned. See the comments in vmx_platform.c/handle_mmio()
705 */
706 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
707 unsigned long value = 0;
709 mpcip->flags |= OVERLAP;
710 if (dir == IOREQ_WRITE)
711 vmx_copy(&value, addr, size, VMX_COPY_IN);
712 send_pio_req(regs, port, 1, size, value, dir, 0);
713 } else {
714 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
715 if (sign > 0)
716 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
717 else
718 count = (addr & ~PAGE_MASK) / size;
719 } else
720 __update_guest_eip(inst_len);
722 send_pio_req(regs, port, count, size, addr, dir, 1);
723 }
724 } else {
725 __update_guest_eip(inst_len);
726 send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
727 }
728 }
730 int
731 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
732 {
733 unsigned long mfn;
734 char *addr;
735 int count;
737 while (size > 0) {
738 count = PAGE_SIZE - (laddr & ~PAGE_MASK);
739 if (count > size)
740 count = size;
742 mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
743 /* XXX check whether laddr is valid */
744 addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
746 if (dir == VMX_COPY_IN)
747 memcpy(buf, addr, count);
748 else
749 memcpy(addr, buf, count);
751 unmap_domain_page(addr);
753 laddr += count;
754 buf += count;
755 size -= count;
756 }
758 return 1;
759 }
761 int
762 vmx_world_save(struct vcpu *d, struct vmx_assist_context *c)
763 {
764 unsigned long inst_len;
765 int error = 0;
767 error |= __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
768 error |= __vmread(GUEST_RIP, &c->eip);
769 c->eip += inst_len; /* skip transition instruction */
770 error |= __vmread(GUEST_RSP, &c->esp);
771 error |= __vmread(GUEST_RFLAGS, &c->eflags);
773 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
774 c->cr3 = d->arch.arch_vmx.cpu_cr3;
775 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
777 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
778 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
780 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
781 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
783 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
784 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
785 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
786 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
788 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
789 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
790 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
791 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
793 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
794 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
795 error |= __vmread(GUEST_ES_BASE, &c->es_base);
796 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
798 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
799 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
800 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
801 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
803 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
804 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
805 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
806 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
808 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
809 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
810 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
811 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
813 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
814 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
815 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
816 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
818 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
819 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
820 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
821 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
823 return !error;
824 }
826 int
827 vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c)
828 {
829 unsigned long mfn, old_cr4;
830 int error = 0;
832 error |= __vmwrite(GUEST_RIP, c->eip);
833 error |= __vmwrite(GUEST_RSP, c->esp);
834 error |= __vmwrite(GUEST_RFLAGS, c->eflags);
836 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
838 if (!vmx_paging_enabled(d)) {
839 VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
840 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
841 goto skip_cr3;
842 }
844 if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
845 /*
846 * This is simple TLB flush, implying the guest has
847 * removed some translation or changed page attributes.
848 * We simply invalidate the shadow.
849 */
850 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
851 if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
852 printk("Invalid CR3 value=%x", c->cr3);
853 domain_crash_synchronous();
854 return 0;
855 }
856 shadow_sync_all(d->domain);
857 } else {
858 /*
859 * If different, make a shadow. Check if the PDBR is valid
860 * first.
861 */
862 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
863 if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
864 printk("Invalid CR3 value=%x", c->cr3);
865 domain_crash_synchronous();
866 return 0;
867 }
868 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
869 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
870 update_pagetables(d);
871 /*
872 * arch.shadow_table should now hold the next CR3 for shadow
873 */
874 d->arch.arch_vmx.cpu_cr3 = c->cr3;
875 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
876 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
877 }
879 skip_cr3:
881 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
882 error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
883 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
885 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
886 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
888 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
889 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
891 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
892 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
893 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
894 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
896 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
897 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
898 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
899 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
901 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
902 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
903 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
904 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
906 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
907 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
908 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
909 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
911 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
912 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
913 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
914 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
916 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
917 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
918 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
919 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
921 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
922 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
923 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
924 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
926 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
927 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
928 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
929 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
931 return !error;
932 }
934 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
936 int
937 vmx_assist(struct vcpu *d, int mode)
938 {
939 struct vmx_assist_context c;
940 u32 magic;
941 u32 cp;
943 /* make sure vmxassist exists (this is not an error) */
944 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), VMX_COPY_IN))
945 return 0;
946 if (magic != VMXASSIST_MAGIC)
947 return 0;
949 switch (mode) {
950 /*
951 * Transfer control to vmxassist.
952 * Store the current context in VMXASSIST_OLD_CONTEXT and load
953 * the new VMXASSIST_NEW_CONTEXT context. This context was created
954 * by vmxassist and will transfer control to it.
955 */
956 case VMX_ASSIST_INVOKE:
957 /* save the old context */
958 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
959 goto error;
960 if (cp != 0) {
961 if (!vmx_world_save(d, &c))
962 goto error;
963 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT))
964 goto error;
965 }
967 /* restore the new context, this should activate vmxassist */
968 if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN))
969 goto error;
970 if (cp != 0) {
971 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
972 goto error;
973 if (!vmx_world_restore(d, &c))
974 goto error;
975 return 1;
976 }
977 break;
979 /*
980 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
981 * above.
982 */
983 case VMX_ASSIST_RESTORE:
984 /* save the old context */
985 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
986 goto error;
987 if (cp != 0) {
988 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
989 goto error;
990 if (!vmx_world_restore(d, &c))
991 goto error;
992 return 1;
993 }
994 break;
995 }
997 error:
998 printf("Failed to transfer to vmxassist\n");
999 domain_crash_synchronous();
1000 return 0;
1003 static int vmx_set_cr0(unsigned long value)
1005 struct vcpu *d = current;
1006 unsigned long mfn;
1007 unsigned long eip;
1008 int paging_enabled;
1009 unsigned long vm_entry_value;
1010 /*
1011 * CR0: We don't want to lose PE and PG.
1012 */
1013 paging_enabled = vmx_paging_enabled(d);
1014 __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
1015 __vmwrite(CR0_READ_SHADOW, value);
1017 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
1019 if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) {
1020 /*
1021 * The guest CR3 must be pointing to the guest physical.
1022 */
1023 if ( !VALID_MFN(mfn = get_mfn_from_pfn(
1024 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
1025 !get_page(pfn_to_page(mfn), d->domain) )
1027 printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3);
1028 domain_crash_synchronous(); /* need to take a clean path */
1031 #if defined(__x86_64__)
1032 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1033 &d->arch.arch_vmx.cpu_state) &&
1034 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
1035 &d->arch.arch_vmx.cpu_state)){
1036 VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
1037 vmx_inject_exception(d, TRAP_gp_fault, 0);
1039 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1040 &d->arch.arch_vmx.cpu_state)){
1041 /* Here the PAE is should to be opened */
1042 VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
1043 set_bit(VMX_CPU_STATE_LMA_ENABLED,
1044 &d->arch.arch_vmx.cpu_state);
1045 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1046 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
1047 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1049 #if CONFIG_PAGING_LEVELS >= 4
1050 if(!shadow_set_guest_paging_levels(d->domain, 4)) {
1051 printk("Unsupported guest paging levels\n");
1052 domain_crash_synchronous(); /* need to take a clean path */
1054 #endif
1056 else
1058 #if CONFIG_PAGING_LEVELS >= 4
1059 if(!shadow_set_guest_paging_levels(d->domain, 2)) {
1060 printk("Unsupported guest paging levels\n");
1061 domain_crash_synchronous(); /* need to take a clean path */
1063 #endif
1066 unsigned long crn;
1067 /* update CR4's PAE if needed */
1068 __vmread(GUEST_CR4, &crn);
1069 if ( (!(crn & X86_CR4_PAE)) &&
1070 test_bit(VMX_CPU_STATE_PAE_ENABLED,
1071 &d->arch.arch_vmx.cpu_state)){
1072 VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
1073 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
1075 #elif defined( __i386__)
1076 unsigned long old_base_mfn;
1077 old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
1078 if (old_base_mfn)
1079 put_page(pfn_to_page(old_base_mfn));
1080 #endif
1081 /*
1082 * Now arch.guest_table points to machine physical.
1083 */
1084 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1085 update_pagetables(d);
1087 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
1088 (unsigned long) (mfn << PAGE_SHIFT));
1090 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
1091 /*
1092 * arch->shadow_table should hold the next CR3 for shadow
1093 */
1094 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
1095 d->arch.arch_vmx.cpu_cr3, mfn);
1098 /*
1099 * VMX does not implement real-mode virtualization. We emulate
1100 * real-mode by performing a world switch to VMXAssist whenever
1101 * a partition disables the CR0.PE bit.
1102 */
1103 if ((value & X86_CR0_PE) == 0) {
1104 if ( value & X86_CR0_PG ) {
1105 /* inject GP here */
1106 vmx_inject_exception(d, TRAP_gp_fault, 0);
1107 return 0;
1108 } else {
1109 /*
1110 * Disable paging here.
1111 * Same to PE == 1 && PG == 0
1112 */
1113 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1114 &d->arch.arch_vmx.cpu_state)){
1115 clear_bit(VMX_CPU_STATE_LMA_ENABLED,
1116 &d->arch.arch_vmx.cpu_state);
1117 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1118 vm_entry_value &= ~VM_ENTRY_CONTROLS_IA32E_MODE;
1119 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1122 __vmread(GUEST_RIP, &eip);
1123 VMX_DBG_LOG(DBG_LEVEL_1,
1124 "Disabling CR0.PE at %%eip 0x%lx\n", eip);
1125 if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
1126 set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
1127 __vmread(GUEST_RIP, &eip);
1128 VMX_DBG_LOG(DBG_LEVEL_1,
1129 "Transfering control to vmxassist %%eip 0x%lx\n", eip);
1130 return 0; /* do not update eip! */
1132 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1133 &d->arch.arch_vmx.cpu_state)) {
1134 __vmread(GUEST_RIP, &eip);
1135 VMX_DBG_LOG(DBG_LEVEL_1,
1136 "Enabling CR0.PE at %%eip 0x%lx\n", eip);
1137 if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
1138 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1139 &d->arch.arch_vmx.cpu_state);
1140 __vmread(GUEST_RIP, &eip);
1141 VMX_DBG_LOG(DBG_LEVEL_1,
1142 "Restoring to %%eip 0x%lx\n", eip);
1143 return 0; /* do not update eip! */
1147 return 1;
1150 #define CASE_GET_REG(REG, reg) \
1151 case REG_ ## REG: value = regs->reg; break
1153 #define CASE_EXTEND_SET_REG \
1154 CASE_EXTEND_REG(S)
1155 #define CASE_EXTEND_GET_REG \
1156 CASE_EXTEND_REG(G)
1158 #ifdef __i386__
1159 #define CASE_EXTEND_REG(T)
1160 #else
1161 #define CASE_EXTEND_REG(T) \
1162 CASE_ ## T ## ET_REG(R8, r8); \
1163 CASE_ ## T ## ET_REG(R9, r9); \
1164 CASE_ ## T ## ET_REG(R10, r10); \
1165 CASE_ ## T ## ET_REG(R11, r11); \
1166 CASE_ ## T ## ET_REG(R12, r12); \
1167 CASE_ ## T ## ET_REG(R13, r13); \
1168 CASE_ ## T ## ET_REG(R14, r14); \
1169 CASE_ ## T ## ET_REG(R15, r15);
1170 #endif
1173 /*
1174 * Write to control registers
1175 */
1176 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
1178 unsigned long value;
1179 unsigned long old_cr;
1180 struct vcpu *d = current;
1182 switch (gp) {
1183 CASE_GET_REG(EAX, eax);
1184 CASE_GET_REG(ECX, ecx);
1185 CASE_GET_REG(EDX, edx);
1186 CASE_GET_REG(EBX, ebx);
1187 CASE_GET_REG(EBP, ebp);
1188 CASE_GET_REG(ESI, esi);
1189 CASE_GET_REG(EDI, edi);
1190 CASE_EXTEND_GET_REG
1191 case REG_ESP:
1192 __vmread(GUEST_RSP, &value);
1193 break;
1194 default:
1195 printk("invalid gp: %d\n", gp);
1196 __vmx_bug(regs);
1199 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
1200 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
1202 switch(cr) {
1203 case 0:
1205 return vmx_set_cr0(value);
1207 case 3:
1209 unsigned long old_base_mfn, mfn;
1211 /*
1212 * If paging is not enabled yet, simply copy the value to CR3.
1213 */
1214 if (!vmx_paging_enabled(d)) {
1215 d->arch.arch_vmx.cpu_cr3 = value;
1216 break;
1219 /*
1220 * We make a new one if the shadow does not exist.
1221 */
1222 if (value == d->arch.arch_vmx.cpu_cr3) {
1223 /*
1224 * This is simple TLB flush, implying the guest has
1225 * removed some translation or changed page attributes.
1226 * We simply invalidate the shadow.
1227 */
1228 mfn = get_mfn_from_pfn(value >> PAGE_SHIFT);
1229 if (mfn != pagetable_get_pfn(d->arch.guest_table))
1230 __vmx_bug(regs);
1231 shadow_sync_all(d->domain);
1232 } else {
1233 /*
1234 * If different, make a shadow. Check if the PDBR is valid
1235 * first.
1236 */
1237 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
1238 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
1239 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
1240 !get_page(pfn_to_page(mfn), d->domain) )
1242 printk("Invalid CR3 value=%lx", value);
1243 domain_crash_synchronous(); /* need to take a clean path */
1245 old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
1246 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1247 if (old_base_mfn)
1248 put_page(pfn_to_page(old_base_mfn));
1249 update_pagetables(d);
1250 /*
1251 * arch.shadow_table should now hold the next CR3 for shadow
1252 */
1253 d->arch.arch_vmx.cpu_cr3 = value;
1254 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
1255 value);
1256 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
1258 break;
1260 case 4:
1262 /* CR4 */
1263 unsigned long old_guest_cr;
1265 __vmread(GUEST_CR4, &old_guest_cr);
1266 if (value & X86_CR4_PAE){
1267 set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
1268 } else {
1269 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1270 &d->arch.arch_vmx.cpu_state)){
1271 vmx_inject_exception(d, TRAP_gp_fault, 0);
1273 clear_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
1276 __vmread(CR4_READ_SHADOW, &old_cr);
1278 __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
1279 __vmwrite(CR4_READ_SHADOW, value);
1281 /*
1282 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
1283 * all TLB entries except global entries.
1284 */
1285 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
1286 shadow_sync_all(d->domain);
1288 break;
1290 default:
1291 printk("invalid cr: %d\n", gp);
1292 __vmx_bug(regs);
1295 return 1;
1298 #define CASE_SET_REG(REG, reg) \
1299 case REG_ ## REG: \
1300 regs->reg = value; \
1301 break
1303 /*
1304 * Read from control registers. CR0 and CR4 are read from the shadow.
1305 */
1306 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
1308 unsigned long value;
1309 struct vcpu *d = current;
1311 if (cr != 3)
1312 __vmx_bug(regs);
1314 value = (unsigned long) d->arch.arch_vmx.cpu_cr3;
1316 switch (gp) {
1317 CASE_SET_REG(EAX, eax);
1318 CASE_SET_REG(ECX, ecx);
1319 CASE_SET_REG(EDX, edx);
1320 CASE_SET_REG(EBX, ebx);
1321 CASE_SET_REG(EBP, ebp);
1322 CASE_SET_REG(ESI, esi);
1323 CASE_SET_REG(EDI, edi);
1324 CASE_EXTEND_SET_REG
1325 case REG_ESP:
1326 __vmwrite(GUEST_RSP, value);
1327 regs->esp = value;
1328 break;
1329 default:
1330 printk("invalid gp: %d\n", gp);
1331 __vmx_bug(regs);
1334 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
1337 static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
1339 unsigned int gp, cr;
1340 unsigned long value;
1342 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
1343 case TYPE_MOV_TO_CR:
1344 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1345 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1346 TRACE_VMEXIT(1,TYPE_MOV_TO_CR);
1347 TRACE_VMEXIT(2,cr);
1348 TRACE_VMEXIT(3,gp);
1349 return mov_to_cr(gp, cr, regs);
1350 case TYPE_MOV_FROM_CR:
1351 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1352 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1353 TRACE_VMEXIT(1,TYPE_MOV_FROM_CR);
1354 TRACE_VMEXIT(2,cr);
1355 TRACE_VMEXIT(3,gp);
1356 mov_from_cr(cr, gp, regs);
1357 break;
1358 case TYPE_CLTS:
1359 TRACE_VMEXIT(1,TYPE_CLTS);
1360 clts();
1361 setup_fpu(current);
1363 __vmread(GUEST_CR0, &value);
1364 value &= ~X86_CR0_TS; /* clear TS */
1365 __vmwrite(GUEST_CR0, value);
1367 __vmread(CR0_READ_SHADOW, &value);
1368 value &= ~X86_CR0_TS; /* clear TS */
1369 __vmwrite(CR0_READ_SHADOW, value);
1370 break;
1371 case TYPE_LMSW:
1372 TRACE_VMEXIT(1,TYPE_LMSW);
1373 __vmread(CR0_READ_SHADOW, &value);
1374 value = (value & ~0xF) |
1375 (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
1376 return vmx_set_cr0(value);
1377 break;
1378 default:
1379 __vmx_bug(regs);
1380 break;
1382 return 1;
1385 static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
1387 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
1388 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1389 (unsigned long)regs->edx);
1390 switch (regs->ecx) {
1391 case MSR_IA32_SYSENTER_CS:
1392 __vmread(GUEST_SYSENTER_CS, &regs->eax);
1393 regs->edx = 0;
1394 break;
1395 case MSR_IA32_SYSENTER_ESP:
1396 __vmread(GUEST_SYSENTER_ESP, &regs->eax);
1397 regs->edx = 0;
1398 break;
1399 case MSR_IA32_SYSENTER_EIP:
1400 __vmread(GUEST_SYSENTER_EIP, &regs->eax);
1401 regs->edx = 0;
1402 break;
1403 default:
1404 if(long_mode_do_msr_read(regs))
1405 return;
1406 rdmsr_user(regs->ecx, regs->eax, regs->edx);
1407 break;
1410 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
1411 "ecx=%lx, eax=%lx, edx=%lx",
1412 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1413 (unsigned long)regs->edx);
1416 static inline void vmx_do_msr_write(struct cpu_user_regs *regs)
1418 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write: ecx=%lx, eax=%lx, edx=%lx",
1419 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1420 (unsigned long)regs->edx);
1421 switch (regs->ecx) {
1422 case MSR_IA32_SYSENTER_CS:
1423 __vmwrite(GUEST_SYSENTER_CS, regs->eax);
1424 break;
1425 case MSR_IA32_SYSENTER_ESP:
1426 __vmwrite(GUEST_SYSENTER_ESP, regs->eax);
1427 break;
1428 case MSR_IA32_SYSENTER_EIP:
1429 __vmwrite(GUEST_SYSENTER_EIP, regs->eax);
1430 break;
1431 default:
1432 long_mode_do_msr_write(regs);
1433 break;
1436 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: "
1437 "ecx=%lx, eax=%lx, edx=%lx",
1438 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1439 (unsigned long)regs->edx);
1442 /*
1443 * Need to use this exit to reschedule
1444 */
1445 static inline void vmx_vmexit_do_hlt(void)
1447 #if VMX_DEBUG
1448 unsigned long eip;
1449 __vmread(GUEST_RIP, &eip);
1450 #endif
1451 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
1452 raise_softirq(SCHEDULE_SOFTIRQ);
1455 static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
1457 unsigned int vector;
1458 int error;
1460 asmlinkage void do_IRQ(struct cpu_user_regs *);
1461 void smp_apic_timer_interrupt(struct cpu_user_regs *);
1462 void timer_interrupt(int, void *, struct cpu_user_regs *);
1463 void smp_event_check_interrupt(void);
1464 void smp_invalidate_interrupt(void);
1465 void smp_call_function_interrupt(void);
1466 void smp_spurious_interrupt(struct cpu_user_regs *regs);
1467 void smp_error_interrupt(struct cpu_user_regs *regs);
1469 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1470 && !(vector & INTR_INFO_VALID_MASK))
1471 __vmx_bug(regs);
1473 vector &= 0xff;
1474 local_irq_disable();
1476 switch(vector) {
1477 case LOCAL_TIMER_VECTOR:
1478 smp_apic_timer_interrupt(regs);
1479 break;
1480 case EVENT_CHECK_VECTOR:
1481 smp_event_check_interrupt();
1482 break;
1483 case INVALIDATE_TLB_VECTOR:
1484 smp_invalidate_interrupt();
1485 break;
1486 case CALL_FUNCTION_VECTOR:
1487 smp_call_function_interrupt();
1488 break;
1489 case SPURIOUS_APIC_VECTOR:
1490 smp_spurious_interrupt(regs);
1491 break;
1492 case ERROR_APIC_VECTOR:
1493 smp_error_interrupt(regs);
1494 break;
1495 default:
1496 regs->entry_vector = vector;
1497 do_IRQ(regs);
1498 break;
1502 static inline void vmx_vmexit_do_mwait(void)
1504 #if VMX_DEBUG
1505 unsigned long eip;
1506 __vmread(GUEST_RIP, &eip);
1507 #endif
1508 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
1509 raise_softirq(SCHEDULE_SOFTIRQ);
1512 #define BUF_SIZ 256
1513 #define MAX_LINE 80
1514 char print_buf[BUF_SIZ];
1515 static int index;
1517 static void vmx_print_line(const char c, struct vcpu *d)
1520 if (index == MAX_LINE || c == '\n') {
1521 if (index == MAX_LINE) {
1522 print_buf[index++] = c;
1524 print_buf[index] = '\0';
1525 printk("(GUEST: %u) %s\n", d->domain->domain_id, (char *) &print_buf);
1526 index = 0;
1528 else
1529 print_buf[index++] = c;
1532 void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
1534 __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
1535 __vmread(GUEST_RSP, &ctxt->esp);
1536 __vmread(GUEST_RFLAGS, &ctxt->eflags);
1537 __vmread(GUEST_CS_SELECTOR, &ctxt->cs);
1538 __vmread(GUEST_RIP, &ctxt->eip);
1540 __vmread(GUEST_GS_SELECTOR, &ctxt->gs);
1541 __vmread(GUEST_FS_SELECTOR, &ctxt->fs);
1542 __vmread(GUEST_ES_SELECTOR, &ctxt->es);
1543 __vmread(GUEST_DS_SELECTOR, &ctxt->ds);
1546 #ifdef XEN_DEBUGGER
1547 void save_cpu_user_regs(struct cpu_user_regs *regs)
1549 __vmread(GUEST_SS_SELECTOR, &regs->xss);
1550 __vmread(GUEST_RSP, &regs->esp);
1551 __vmread(GUEST_RFLAGS, &regs->eflags);
1552 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
1553 __vmread(GUEST_RIP, &regs->eip);
1555 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
1556 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
1557 __vmread(GUEST_ES_SELECTOR, &regs->xes);
1558 __vmread(GUEST_DS_SELECTOR, &regs->xds);
1561 void restore_cpu_user_regs(struct cpu_user_regs *regs)
1563 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
1564 __vmwrite(GUEST_RSP, regs->esp);
1565 __vmwrite(GUEST_RFLAGS, regs->eflags);
1566 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
1567 __vmwrite(GUEST_RIP, regs->eip);
1569 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
1570 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
1571 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
1572 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
1574 #endif
1576 asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
1578 unsigned int exit_reason, idtv_info_field;
1579 unsigned long exit_qualification, eip, inst_len = 0;
1580 struct vcpu *v = current;
1581 int error;
1583 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
1584 __vmx_bug(&regs);
1586 perfc_incra(vmexits, exit_reason);
1588 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
1589 if (idtv_info_field & INTR_INFO_VALID_MASK) {
1590 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
1592 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
1593 if (inst_len >= 1 && inst_len <= 15)
1594 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
1596 if (idtv_info_field & 0x800) { /* valid error code */
1597 unsigned long error_code;
1598 __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
1599 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1602 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
1605 /* don't bother H/W interrutps */
1606 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
1607 exit_reason != EXIT_REASON_VMCALL &&
1608 exit_reason != EXIT_REASON_IO_INSTRUCTION)
1609 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
1611 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
1612 printk("Failed vm entry\n");
1613 domain_crash_synchronous();
1614 return;
1617 __vmread(GUEST_RIP, &eip);
1618 TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
1619 TRACE_VMEXIT(0,exit_reason);
1621 switch (exit_reason) {
1622 case EXIT_REASON_EXCEPTION_NMI:
1624 /*
1625 * We don't set the software-interrupt exiting (INT n).
1626 * (1) We can get an exception (e.g. #PG) in the guest, or
1627 * (2) NMI
1628 */
1629 int error;
1630 unsigned int vector;
1631 unsigned long va;
1633 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1634 || !(vector & INTR_INFO_VALID_MASK))
1635 __vmx_bug(&regs);
1636 vector &= 0xff;
1638 TRACE_VMEXIT(1,vector);
1639 perfc_incra(cause_vector, vector);
1641 TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
1642 switch (vector) {
1643 #ifdef XEN_DEBUGGER
1644 case TRAP_debug:
1646 save_cpu_user_regs(&regs);
1647 pdb_handle_exception(1, &regs, 1);
1648 restore_cpu_user_regs(&regs);
1649 break;
1651 case TRAP_int3:
1653 save_cpu_user_regs(&regs);
1654 pdb_handle_exception(3, &regs, 1);
1655 restore_cpu_user_regs(&regs);
1656 break;
1658 #else
1659 case TRAP_debug:
1661 void store_cpu_user_regs(struct cpu_user_regs *regs);
1662 long do_sched_op(unsigned long op);
1665 store_cpu_user_regs(&regs);
1666 __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
1668 set_bit(_VCPUF_ctrl_pause, &current->vcpu_flags);
1669 do_sched_op(SCHEDOP_yield);
1671 break;
1673 #endif
1674 case TRAP_no_device:
1676 vmx_do_no_device_fault();
1677 break;
1679 case TRAP_page_fault:
1681 __vmread(EXIT_QUALIFICATION, &va);
1682 __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
1684 TRACE_VMEXIT(3,regs.error_code);
1685 TRACE_VMEXIT(4,va);
1687 VMX_DBG_LOG(DBG_LEVEL_VMMU,
1688 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
1689 (unsigned long)regs.eax, (unsigned long)regs.ebx,
1690 (unsigned long)regs.ecx, (unsigned long)regs.edx,
1691 (unsigned long)regs.esi, (unsigned long)regs.edi);
1692 v->domain->arch.vmx_platform.mpci.inst_decoder_regs = &regs;
1694 if (!(error = vmx_do_page_fault(va, &regs))) {
1695 /*
1696 * Inject #PG using Interruption-Information Fields
1697 */
1698 vmx_inject_exception(v, TRAP_page_fault, regs.error_code);
1699 v->arch.arch_vmx.cpu_cr2 = va;
1700 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
1702 break;
1704 case TRAP_nmi:
1705 do_nmi(&regs, 0);
1706 break;
1707 default:
1708 vmx_reflect_exception(v);
1709 break;
1711 break;
1713 case EXIT_REASON_EXTERNAL_INTERRUPT:
1714 vmx_vmexit_do_extint(&regs);
1715 break;
1716 case EXIT_REASON_PENDING_INTERRUPT:
1717 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
1718 MONITOR_CPU_BASED_EXEC_CONTROLS);
1719 break;
1720 case EXIT_REASON_TASK_SWITCH:
1721 __vmx_bug(&regs);
1722 break;
1723 case EXIT_REASON_CPUID:
1724 __get_instruction_length(inst_len);
1725 vmx_vmexit_do_cpuid(regs.eax, &regs);
1726 __update_guest_eip(inst_len);
1727 break;
1728 case EXIT_REASON_HLT:
1729 __get_instruction_length(inst_len);
1730 __update_guest_eip(inst_len);
1731 vmx_vmexit_do_hlt();
1732 break;
1733 case EXIT_REASON_INVLPG:
1735 unsigned long va;
1737 __vmread(EXIT_QUALIFICATION, &va);
1738 vmx_vmexit_do_invlpg(va);
1739 __get_instruction_length(inst_len);
1740 __update_guest_eip(inst_len);
1741 break;
1743 case EXIT_REASON_VMCALL:
1744 __get_instruction_length(inst_len);
1745 __vmread(GUEST_RIP, &eip);
1746 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1748 vmx_print_line(regs.eax, v); /* provides the current domain */
1749 __update_guest_eip(inst_len);
1750 break;
1751 case EXIT_REASON_CR_ACCESS:
1753 __vmread(GUEST_RIP, &eip);
1754 __get_instruction_length(inst_len);
1755 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1757 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
1758 eip, inst_len, exit_qualification);
1759 if (vmx_cr_access(exit_qualification, &regs))
1760 __update_guest_eip(inst_len);
1761 TRACE_VMEXIT(3,regs.error_code);
1762 TRACE_VMEXIT(4,exit_qualification);
1763 break;
1765 case EXIT_REASON_DR_ACCESS:
1766 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1767 vmx_dr_access(exit_qualification, &regs);
1768 __get_instruction_length(inst_len);
1769 __update_guest_eip(inst_len);
1770 break;
1771 case EXIT_REASON_IO_INSTRUCTION:
1772 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1773 __get_instruction_length(inst_len);
1774 vmx_io_instruction(&regs, exit_qualification, inst_len);
1775 TRACE_VMEXIT(4,exit_qualification);
1776 break;
1777 case EXIT_REASON_MSR_READ:
1778 __get_instruction_length(inst_len);
1779 vmx_do_msr_read(&regs);
1780 __update_guest_eip(inst_len);
1781 break;
1782 case EXIT_REASON_MSR_WRITE:
1783 __vmread(GUEST_RIP, &eip);
1784 vmx_do_msr_write(&regs);
1785 __get_instruction_length(inst_len);
1786 __update_guest_eip(inst_len);
1787 break;
1788 case EXIT_REASON_MWAIT_INSTRUCTION:
1789 __get_instruction_length(inst_len);
1790 __update_guest_eip(inst_len);
1791 vmx_vmexit_do_mwait();
1792 break;
1793 default:
1794 __vmx_bug(&regs); /* should not happen */
1798 asmlinkage void load_cr2(void)
1800 struct vcpu *d = current;
1802 local_irq_disable();
1803 #ifdef __i386__
1804 asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1805 #else
1806 asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1807 #endif
1810 #ifdef TRACE_BUFFER
1811 asmlinkage void trace_vmentry (void)
1813 TRACE_5D(TRC_VMENTRY,trace_values[current->processor][0],
1814 trace_values[current->processor][1],trace_values[current->processor][2],
1815 trace_values[current->processor][3],trace_values[current->processor][4]);
1816 TRACE_VMEXIT(0,9);
1817 TRACE_VMEXIT(1,9);
1818 TRACE_VMEXIT(2,9);
1819 TRACE_VMEXIT(3,9);
1820 TRACE_VMEXIT(4,9);
1821 return;
1823 asmlinkage void trace_vmexit (void)
1825 TRACE_3D(TRC_VMEXIT,0,0,0);
1826 return;
1828 #endif
1829 #endif /* CONFIG_VMX */
1831 /*
1832 * Local variables:
1833 * mode: C
1834 * c-set-style: "BSD"
1835 * c-basic-offset: 4
1836 * tab-width: 4
1837 * indent-tabs-mode: nil
1838 * End:
1839 */