debuggers.hg

view xen/arch/x86/vmx.c @ 6672:ef1cd7729676

Reducing LOC (always a good thing) by eliminating duplicated functionality.
vmx_platform.c/inst_copy_from_guest() now uses vmx_copy. Also shored up
vmx_copy to handle copies when paging is enabled and improved its error
handling.

Signed-Off-By: Leendert van Doorn <leendert@watson.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Sep 06 15:31:34 2005 +0000 (2005-09-06)
parents b6c98fe62e1a
children d4d69c509371
line source
1 /*
2 * vmx.c: handling VMX architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/lib.h>
23 #include <xen/trace.h>
24 #include <xen/sched.h>
25 #include <xen/irq.h>
26 #include <xen/softirq.h>
27 #include <xen/domain_page.h>
28 #include <asm/current.h>
29 #include <asm/io.h>
30 #include <asm/shadow.h>
31 #include <asm/regs.h>
32 #include <asm/cpufeature.h>
33 #include <asm/processor.h>
34 #include <asm/types.h>
35 #include <asm/msr.h>
36 #include <asm/spinlock.h>
37 #include <asm/vmx.h>
38 #include <asm/vmx_vmcs.h>
39 #include <asm/vmx_intercept.h>
40 #include <asm/shadow.h>
41 #if CONFIG_PAGING_LEVELS >= 3
42 #include <asm/shadow_64.h>
43 #endif
45 #include <public/io/ioreq.h>
47 #ifdef CONFIG_VMX
49 int vmcs_size;
50 unsigned int opt_vmx_debug_level = 0;
51 integer_param("vmx_debug", opt_vmx_debug_level);
53 #ifdef TRACE_BUFFER
54 static unsigned long trace_values[NR_CPUS][4];
55 #define TRACE_VMEXIT(index,value) trace_values[current->processor][index]=value
56 #else
57 #define TRACE_VMEXIT(index,value) ((void)0)
58 #endif
60 #ifdef __x86_64__
61 static struct msr_state percpu_msr[NR_CPUS];
63 static u32 msr_data_index[VMX_MSR_COUNT] =
64 {
65 MSR_LSTAR, MSR_STAR, MSR_CSTAR,
66 MSR_SYSCALL_MASK, MSR_EFER,
67 };
69 /*
70 * To avoid MSR save/restore at every VM exit/entry time, we restore
71 * the x86_64 specific MSRs at domain switch time. Since those MSRs are
72 * are not modified once set for generic domains, we don't save them,
73 * but simply reset them to the values set at percpu_traps_init().
74 */
75 void vmx_load_msrs(struct vcpu *n)
76 {
77 struct msr_state *host_state;
78 host_state = &percpu_msr[smp_processor_id()];
80 while (host_state->flags){
81 int i;
83 i = find_first_set_bit(host_state->flags);
84 wrmsrl(msr_data_index[i], host_state->msr_items[i]);
85 clear_bit(i, &host_state->flags);
86 }
87 }
89 static void vmx_save_init_msrs(void)
90 {
91 struct msr_state *host_state;
92 host_state = &percpu_msr[smp_processor_id()];
93 int i;
95 for (i = 0; i < VMX_MSR_COUNT; i++)
96 rdmsrl(msr_data_index[i], host_state->msr_items[i]);
97 }
99 #define CASE_READ_MSR(address) \
100 case MSR_ ## address: \
101 msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
102 break
104 #define CASE_WRITE_MSR(address) \
105 case MSR_ ## address: \
106 { \
107 msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content; \
108 if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) { \
109 set_bit(VMX_INDEX_MSR_ ## address, &msr->flags); \
110 } \
111 wrmsrl(MSR_ ## address, msr_content); \
112 set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags); \
113 } \
114 break
116 #define IS_CANO_ADDRESS(add) 1
117 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
118 {
119 u64 msr_content = 0;
120 struct vcpu *vc = current;
121 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
122 switch(regs->ecx){
123 case MSR_EFER:
124 msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
125 VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content);
126 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
127 &vc->arch.arch_vmx.cpu_state))
128 msr_content |= 1 << _EFER_LME;
130 if (VMX_LONG_GUEST(vc))
131 msr_content |= 1 << _EFER_LMA;
132 break;
133 case MSR_FS_BASE:
134 if (!(VMX_LONG_GUEST(vc)))
135 /* XXX should it be GP fault */
136 domain_crash();
137 __vmread(GUEST_FS_BASE, &msr_content);
138 break;
139 case MSR_GS_BASE:
140 if (!(VMX_LONG_GUEST(vc)))
141 domain_crash();
142 __vmread(GUEST_GS_BASE, &msr_content);
143 break;
144 case MSR_SHADOW_GS_BASE:
145 msr_content = msr->shadow_gs;
146 break;
148 CASE_READ_MSR(STAR);
149 CASE_READ_MSR(LSTAR);
150 CASE_READ_MSR(CSTAR);
151 CASE_READ_MSR(SYSCALL_MASK);
152 default:
153 return 0;
154 }
155 VMX_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", msr_content);
156 regs->eax = msr_content & 0xffffffff;
157 regs->edx = msr_content >> 32;
158 return 1;
159 }
161 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
162 {
163 u64 msr_content = regs->eax | ((u64)regs->edx << 32);
164 struct vcpu *vc = current;
165 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
166 struct msr_state * host_state =
167 &percpu_msr[smp_processor_id()];
169 VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n",
170 regs->ecx, msr_content);
172 switch (regs->ecx){
173 case MSR_EFER:
174 if ((msr_content & EFER_LME) ^
175 test_bit(VMX_CPU_STATE_LME_ENABLED,
176 &vc->arch.arch_vmx.cpu_state)){
177 if (test_bit(VMX_CPU_STATE_PG_ENABLED,
178 &vc->arch.arch_vmx.cpu_state) ||
179 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
180 &vc->arch.arch_vmx.cpu_state)){
181 vmx_inject_exception(vc, TRAP_gp_fault, 0);
182 }
183 }
184 if (msr_content & EFER_LME)
185 set_bit(VMX_CPU_STATE_LME_ENABLED,
186 &vc->arch.arch_vmx.cpu_state);
187 /* No update for LME/LMA since it have no effect */
188 msr->msr_items[VMX_INDEX_MSR_EFER] =
189 msr_content;
190 if (msr_content & ~(EFER_LME | EFER_LMA)){
191 msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
192 if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){
193 rdmsrl(MSR_EFER,
194 host_state->msr_items[VMX_INDEX_MSR_EFER]);
195 set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
196 set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
197 wrmsrl(MSR_EFER, msr_content);
198 }
199 }
200 break;
202 case MSR_FS_BASE:
203 case MSR_GS_BASE:
204 if (!(VMX_LONG_GUEST(vc)))
205 domain_crash();
206 if (!IS_CANO_ADDRESS(msr_content)){
207 VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
208 vmx_inject_exception(vc, TRAP_gp_fault, 0);
209 }
210 if (regs->ecx == MSR_FS_BASE)
211 __vmwrite(GUEST_FS_BASE, msr_content);
212 else
213 __vmwrite(GUEST_GS_BASE, msr_content);
214 break;
216 case MSR_SHADOW_GS_BASE:
217 if (!(VMX_LONG_GUEST(vc)))
218 domain_crash();
219 vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
220 wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
221 break;
223 CASE_WRITE_MSR(STAR);
224 CASE_WRITE_MSR(LSTAR);
225 CASE_WRITE_MSR(CSTAR);
226 CASE_WRITE_MSR(SYSCALL_MASK);
227 default:
228 return 0;
229 }
230 return 1;
231 }
233 void
234 vmx_restore_msrs(struct vcpu *d)
235 {
236 int i = 0;
237 struct msr_state *guest_state;
238 struct msr_state *host_state;
239 unsigned long guest_flags ;
241 guest_state = &d->arch.arch_vmx.msr_content;;
242 host_state = &percpu_msr[smp_processor_id()];
244 wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
245 guest_flags = guest_state->flags;
246 if (!guest_flags)
247 return;
249 while (guest_flags){
250 i = find_first_set_bit(guest_flags);
252 VMX_DBG_LOG(DBG_LEVEL_2,
253 "restore guest's index %d msr %lx with %lx\n",
254 i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]);
255 set_bit(i, &host_state->flags);
256 wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
257 clear_bit(i, &guest_flags);
258 }
259 }
261 #else /* __i386__ */
262 #define vmx_save_init_msrs() ((void)0)
264 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs){
265 return 0;
266 }
267 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs){
268 return 0;
269 }
270 #endif
272 extern long evtchn_send(int lport);
273 extern long do_block(void);
274 void do_nmi(struct cpu_user_regs *, unsigned long);
276 static int check_vmx_controls(ctrls, msr)
277 {
278 u32 vmx_msr_low, vmx_msr_high;
280 rdmsr(msr, vmx_msr_low, vmx_msr_high);
281 if (ctrls < vmx_msr_low || ctrls > vmx_msr_high) {
282 printk("Insufficient VMX capability 0x%x, "
283 "msr=0x%x,low=0x%8x,high=0x%x\n",
284 ctrls, msr, vmx_msr_low, vmx_msr_high);
285 return 0;
286 }
287 return 1;
288 }
290 int start_vmx(void)
291 {
292 struct vmcs_struct *vmcs;
293 u32 ecx;
294 u32 eax, edx;
295 u64 phys_vmcs; /* debugging */
297 /*
298 * Xen does not fill x86_capability words except 0.
299 */
300 ecx = cpuid_ecx(1);
301 boot_cpu_data.x86_capability[4] = ecx;
303 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
304 return 0;
306 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
308 if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
309 if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
310 printk("VMX disabled by Feature Control MSR.\n");
311 return 0;
312 }
313 }
314 else {
315 wrmsr(IA32_FEATURE_CONTROL_MSR,
316 IA32_FEATURE_CONTROL_MSR_LOCK |
317 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
318 }
320 if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
321 MSR_IA32_VMX_PINBASED_CTLS_MSR))
322 return 0;
323 if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
324 MSR_IA32_VMX_PROCBASED_CTLS_MSR))
325 return 0;
326 if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
327 MSR_IA32_VMX_EXIT_CTLS_MSR))
328 return 0;
329 if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
330 MSR_IA32_VMX_ENTRY_CTLS_MSR))
331 return 0;
333 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
335 if (!(vmcs = alloc_vmcs())) {
336 printk("Failed to allocate VMCS\n");
337 return 0;
338 }
340 phys_vmcs = (u64) virt_to_phys(vmcs);
342 if (!(__vmxon(phys_vmcs))) {
343 printk("VMXON is done\n");
344 }
346 vmx_save_init_msrs();
348 return 1;
349 }
351 void stop_vmx(void)
352 {
353 if (read_cr4() & X86_CR4_VMXE)
354 __vmxoff();
355 }
357 /*
358 * Not all cases receive valid value in the VM-exit instruction length field.
359 */
360 #define __get_instruction_length(len) \
361 __vmread(VM_EXIT_INSTRUCTION_LEN, &(len)); \
362 if ((len) < 1 || (len) > 15) \
363 __vmx_bug(&regs);
365 static void inline __update_guest_eip(unsigned long inst_len)
366 {
367 unsigned long current_eip;
369 __vmread(GUEST_RIP, &current_eip);
370 __vmwrite(GUEST_RIP, current_eip + inst_len);
371 }
374 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
375 {
376 unsigned long eip;
377 unsigned long gpa; /* FIXME: PAE */
378 int result;
380 #if VMX_DEBUG
381 {
382 __vmread(GUEST_RIP, &eip);
383 VMX_DBG_LOG(DBG_LEVEL_VMMU,
384 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
385 va, eip, (unsigned long)regs->error_code);
386 }
387 #endif
389 if (!vmx_paging_enabled(current)){
390 handle_mmio(va, va);
391 TRACE_VMEXIT (2,2);
392 return 1;
393 }
394 gpa = gva_to_gpa(va);
396 /* Use 1:1 page table to identify MMIO address space */
397 if ( mmio_space(gpa) ){
398 if (gpa >= 0xFEE00000) { /* workaround for local APIC */
399 u32 inst_len;
400 __vmread(VM_EXIT_INSTRUCTION_LEN, &(inst_len));
401 __update_guest_eip(inst_len);
402 return 1;
403 }
404 TRACE_VMEXIT (2,2);
405 handle_mmio(va, gpa);
406 return 1;
407 }
409 result = shadow_fault(va, regs);
410 TRACE_VMEXIT (2,result);
411 #if 0
412 if ( !result )
413 {
414 __vmread(GUEST_RIP, &eip);
415 printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
416 }
417 #endif
419 return result;
420 }
422 static void vmx_do_no_device_fault(void)
423 {
424 unsigned long cr0;
426 clts();
427 setup_fpu(current);
428 __vmread(CR0_READ_SHADOW, &cr0);
429 if (!(cr0 & X86_CR0_TS)) {
430 __vmread(GUEST_CR0, &cr0);
431 cr0 &= ~X86_CR0_TS;
432 __vmwrite(GUEST_CR0, cr0);
433 }
434 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
435 }
438 static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs)
439 {
440 unsigned int eax, ebx, ecx, edx;
441 unsigned long eip;
443 __vmread(GUEST_RIP, &eip);
445 VMX_DBG_LOG(DBG_LEVEL_1,
446 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
447 " (esi) %lx, (edi) %lx",
448 (unsigned long)regs->eax, (unsigned long)regs->ebx,
449 (unsigned long)regs->ecx, (unsigned long)regs->edx,
450 (unsigned long)regs->esi, (unsigned long)regs->edi);
452 cpuid(input, &eax, &ebx, &ecx, &edx);
454 if (input == 1) {
455 #ifdef __i386__
456 clear_bit(X86_FEATURE_PSE, &edx);
457 clear_bit(X86_FEATURE_PAE, &edx);
458 clear_bit(X86_FEATURE_PSE36, &edx);
459 #else
460 struct vcpu *d = current;
461 if (d->domain->arch.ops->guest_paging_levels == PAGING_L2)
462 {
463 clear_bit(X86_FEATURE_PSE, &edx);
464 clear_bit(X86_FEATURE_PAE, &edx);
465 clear_bit(X86_FEATURE_PSE36, &edx);
466 }
467 #endif
469 }
471 regs->eax = (unsigned long) eax;
472 regs->ebx = (unsigned long) ebx;
473 regs->ecx = (unsigned long) ecx;
474 regs->edx = (unsigned long) edx;
476 VMX_DBG_LOG(DBG_LEVEL_1,
477 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
478 eip, input, eax, ebx, ecx, edx);
480 }
482 #define CASE_GET_REG_P(REG, reg) \
483 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
485 static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
486 {
487 unsigned int reg;
488 unsigned long *reg_p = 0;
489 struct vcpu *v = current;
490 unsigned long eip;
492 __vmread(GUEST_RIP, &eip);
494 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
496 VMX_DBG_LOG(DBG_LEVEL_1,
497 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
498 eip, reg, exit_qualification);
500 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
501 CASE_GET_REG_P(EAX, eax);
502 CASE_GET_REG_P(ECX, ecx);
503 CASE_GET_REG_P(EDX, edx);
504 CASE_GET_REG_P(EBX, ebx);
505 CASE_GET_REG_P(EBP, ebp);
506 CASE_GET_REG_P(ESI, esi);
507 CASE_GET_REG_P(EDI, edi);
508 case REG_ESP:
509 break;
510 default:
511 __vmx_bug(regs);
512 }
514 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
515 case TYPE_MOV_TO_DR:
516 /* don't need to check the range */
517 if (reg != REG_ESP)
518 v->arch.guest_context.debugreg[reg] = *reg_p;
519 else {
520 unsigned long value;
521 __vmread(GUEST_RSP, &value);
522 v->arch.guest_context.debugreg[reg] = value;
523 }
524 break;
525 case TYPE_MOV_FROM_DR:
526 if (reg != REG_ESP)
527 *reg_p = v->arch.guest_context.debugreg[reg];
528 else {
529 __vmwrite(GUEST_RSP, v->arch.guest_context.debugreg[reg]);
530 }
531 break;
532 }
533 }
535 /*
536 * Invalidate the TLB for va. Invalidate the shadow page corresponding
537 * the address va.
538 */
539 static void vmx_vmexit_do_invlpg(unsigned long va)
540 {
541 unsigned long eip;
542 struct vcpu *v = current;
544 __vmread(GUEST_RIP, &eip);
546 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
547 eip, va);
549 /*
550 * We do the safest things first, then try to update the shadow
551 * copying from guest
552 */
553 shadow_invlpg(v, va);
554 }
556 static int check_for_null_selector(unsigned long eip)
557 {
558 unsigned char inst[MAX_INST_LEN];
559 unsigned long sel;
560 int i, inst_len;
561 int inst_copy_from_guest(unsigned char *, unsigned long, int);
563 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
564 memset(inst, 0, MAX_INST_LEN);
565 if (inst_copy_from_guest(inst, eip, inst_len) != inst_len) {
566 printf("check_for_null_selector: get guest instruction failed\n");
567 domain_crash_synchronous();
568 }
570 for (i = 0; i < inst_len; i++) {
571 switch (inst[i]) {
572 case 0xf3: /* REPZ */
573 case 0xf2: /* REPNZ */
574 case 0xf0: /* LOCK */
575 case 0x66: /* data32 */
576 case 0x67: /* addr32 */
577 continue;
578 case 0x2e: /* CS */
579 __vmread(GUEST_CS_SELECTOR, &sel);
580 break;
581 case 0x36: /* SS */
582 __vmread(GUEST_SS_SELECTOR, &sel);
583 break;
584 case 0x26: /* ES */
585 __vmread(GUEST_ES_SELECTOR, &sel);
586 break;
587 case 0x64: /* FS */
588 __vmread(GUEST_FS_SELECTOR, &sel);
589 break;
590 case 0x65: /* GS */
591 __vmread(GUEST_GS_SELECTOR, &sel);
592 break;
593 case 0x3e: /* DS */
594 /* FALLTHROUGH */
595 default:
596 /* DS is the default */
597 __vmread(GUEST_DS_SELECTOR, &sel);
598 }
599 return sel == 0 ? 1 : 0;
600 }
602 return 0;
603 }
605 void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
606 unsigned long count, int size, long value, int dir, int pvalid)
607 {
608 struct vcpu *v = current;
609 vcpu_iodata_t *vio;
610 ioreq_t *p;
612 vio = get_vio(v->domain, v->vcpu_id);
613 if (vio == NULL) {
614 printk("bad shared page: %lx\n", (unsigned long) vio);
615 domain_crash_synchronous();
616 }
618 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
619 printf("VMX I/O has not yet completed\n");
620 domain_crash_synchronous();
621 }
622 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
624 p = &vio->vp_ioreq;
625 p->dir = dir;
626 p->pdata_valid = pvalid;
628 p->type = IOREQ_TYPE_PIO;
629 p->size = size;
630 p->addr = port;
631 p->count = count;
632 p->df = regs->eflags & EF_DF ? 1 : 0;
634 if (pvalid) {
635 if (vmx_paging_enabled(current))
636 p->u.pdata = (void *) gva_to_gpa(value);
637 else
638 p->u.pdata = (void *) value; /* guest VA == guest PA */
639 } else
640 p->u.data = value;
642 p->state = STATE_IOREQ_READY;
644 if (vmx_portio_intercept(p)) {
645 /* no blocking & no evtchn notification */
646 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
647 return;
648 }
650 evtchn_send(iopacket_port(v->domain));
651 vmx_wait_io();
652 }
654 static void vmx_io_instruction(struct cpu_user_regs *regs,
655 unsigned long exit_qualification, unsigned long inst_len)
656 {
657 struct mi_per_cpu_info *mpcip;
658 unsigned long eip, cs, eflags;
659 unsigned long port, size, dir;
660 int vm86;
662 mpcip = &current->domain->arch.vmx_platform.mpci;
663 mpcip->instr = INSTR_PIO;
664 mpcip->flags = 0;
666 __vmread(GUEST_RIP, &eip);
667 __vmread(GUEST_CS_SELECTOR, &cs);
668 __vmread(GUEST_RFLAGS, &eflags);
669 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
671 VMX_DBG_LOG(DBG_LEVEL_1,
672 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
673 "exit_qualification = %lx",
674 vm86, cs, eip, exit_qualification);
676 if (test_bit(6, &exit_qualification))
677 port = (exit_qualification >> 16) & 0xFFFF;
678 else
679 port = regs->edx & 0xffff;
680 TRACE_VMEXIT(2, port);
681 size = (exit_qualification & 7) + 1;
682 dir = test_bit(3, &exit_qualification); /* direction */
684 if (test_bit(4, &exit_qualification)) { /* string instruction */
685 unsigned long addr, count = 1;
686 int sign = regs->eflags & EF_DF ? -1 : 1;
688 __vmread(GUEST_LINEAR_ADDRESS, &addr);
690 /*
691 * In protected mode, guest linear address is invalid if the
692 * selector is null.
693 */
694 if (!vm86 && check_for_null_selector(eip))
695 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
697 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
698 mpcip->flags |= REPZ;
699 count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
700 }
702 /*
703 * Handle string pio instructions that cross pages or that
704 * are unaligned. See the comments in vmx_platform.c/handle_mmio()
705 */
706 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
707 unsigned long value = 0;
709 mpcip->flags |= OVERLAP;
710 if (dir == IOREQ_WRITE)
711 vmx_copy(&value, addr, size, VMX_COPY_IN);
712 send_pio_req(regs, port, 1, size, value, dir, 0);
713 } else {
714 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
715 if (sign > 0)
716 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
717 else
718 count = (addr & ~PAGE_MASK) / size;
719 } else
720 __update_guest_eip(inst_len);
722 send_pio_req(regs, port, count, size, addr, dir, 1);
723 }
724 } else {
725 __update_guest_eip(inst_len);
726 send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
727 }
728 }
730 int
731 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
732 {
733 unsigned long gpa, mfn;
734 char *addr;
735 int count;
737 while (size > 0) {
738 count = PAGE_SIZE - (laddr & ~PAGE_MASK);
739 if (count > size)
740 count = size;
742 if (vmx_paging_enabled(current)) {
743 gpa = gva_to_gpa(laddr);
744 mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
745 } else
746 mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
747 if (mfn == INVALID_MFN)
748 return 0;
750 addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
752 if (dir == VMX_COPY_IN)
753 memcpy(buf, addr, count);
754 else
755 memcpy(addr, buf, count);
757 unmap_domain_page(addr);
759 laddr += count;
760 buf += count;
761 size -= count;
762 }
764 return 1;
765 }
767 int
768 vmx_world_save(struct vcpu *d, struct vmx_assist_context *c)
769 {
770 unsigned long inst_len;
771 int error = 0;
773 error |= __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
774 error |= __vmread(GUEST_RIP, &c->eip);
775 c->eip += inst_len; /* skip transition instruction */
776 error |= __vmread(GUEST_RSP, &c->esp);
777 error |= __vmread(GUEST_RFLAGS, &c->eflags);
779 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
780 c->cr3 = d->arch.arch_vmx.cpu_cr3;
781 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
783 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
784 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
786 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
787 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
789 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
790 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
791 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
792 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
794 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
795 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
796 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
797 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
799 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
800 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
801 error |= __vmread(GUEST_ES_BASE, &c->es_base);
802 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
804 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
805 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
806 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
807 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
809 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
810 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
811 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
812 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
814 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
815 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
816 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
817 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
819 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
820 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
821 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
822 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
824 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
825 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
826 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
827 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
829 return !error;
830 }
832 int
833 vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c)
834 {
835 unsigned long mfn, old_cr4;
836 int error = 0;
838 error |= __vmwrite(GUEST_RIP, c->eip);
839 error |= __vmwrite(GUEST_RSP, c->esp);
840 error |= __vmwrite(GUEST_RFLAGS, c->eflags);
842 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
844 if (!vmx_paging_enabled(d)) {
845 VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
846 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
847 goto skip_cr3;
848 }
850 if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
851 /*
852 * This is simple TLB flush, implying the guest has
853 * removed some translation or changed page attributes.
854 * We simply invalidate the shadow.
855 */
856 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
857 if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
858 printk("Invalid CR3 value=%x", c->cr3);
859 domain_crash_synchronous();
860 return 0;
861 }
862 shadow_sync_all(d->domain);
863 } else {
864 /*
865 * If different, make a shadow. Check if the PDBR is valid
866 * first.
867 */
868 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
869 if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
870 printk("Invalid CR3 value=%x", c->cr3);
871 domain_crash_synchronous();
872 return 0;
873 }
874 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
875 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
876 update_pagetables(d);
877 /*
878 * arch.shadow_table should now hold the next CR3 for shadow
879 */
880 d->arch.arch_vmx.cpu_cr3 = c->cr3;
881 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
882 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
883 }
885 skip_cr3:
887 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
888 error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
889 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
891 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
892 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
894 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
895 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
897 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
898 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
899 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
900 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
902 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
903 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
904 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
905 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
907 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
908 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
909 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
910 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
912 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
913 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
914 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
915 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
917 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
918 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
919 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
920 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
922 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
923 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
924 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
925 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
927 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
928 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
929 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
930 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
932 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
933 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
934 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
935 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
937 return !error;
938 }
940 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
942 int
943 vmx_assist(struct vcpu *d, int mode)
944 {
945 struct vmx_assist_context c;
946 u32 magic;
947 u32 cp;
949 /* make sure vmxassist exists (this is not an error) */
950 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), VMX_COPY_IN))
951 return 0;
952 if (magic != VMXASSIST_MAGIC)
953 return 0;
955 switch (mode) {
956 /*
957 * Transfer control to vmxassist.
958 * Store the current context in VMXASSIST_OLD_CONTEXT and load
959 * the new VMXASSIST_NEW_CONTEXT context. This context was created
960 * by vmxassist and will transfer control to it.
961 */
962 case VMX_ASSIST_INVOKE:
963 /* save the old context */
964 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
965 goto error;
966 if (cp != 0) {
967 if (!vmx_world_save(d, &c))
968 goto error;
969 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT))
970 goto error;
971 }
973 /* restore the new context, this should activate vmxassist */
974 if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN))
975 goto error;
976 if (cp != 0) {
977 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
978 goto error;
979 if (!vmx_world_restore(d, &c))
980 goto error;
981 return 1;
982 }
983 break;
985 /*
986 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
987 * above.
988 */
989 case VMX_ASSIST_RESTORE:
990 /* save the old context */
991 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
992 goto error;
993 if (cp != 0) {
994 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
995 goto error;
996 if (!vmx_world_restore(d, &c))
997 goto error;
998 return 1;
999 }
1000 break;
1003 error:
1004 printf("Failed to transfer to vmxassist\n");
1005 domain_crash_synchronous();
1006 return 0;
1009 static int vmx_set_cr0(unsigned long value)
1011 struct vcpu *d = current;
1012 unsigned long mfn;
1013 unsigned long eip;
1014 int paging_enabled;
1015 unsigned long vm_entry_value;
1016 /*
1017 * CR0: We don't want to lose PE and PG.
1018 */
1019 paging_enabled = vmx_paging_enabled(d);
1020 __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
1021 __vmwrite(CR0_READ_SHADOW, value);
1023 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
1025 if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) {
1026 /*
1027 * The guest CR3 must be pointing to the guest physical.
1028 */
1029 if ( !VALID_MFN(mfn = get_mfn_from_pfn(
1030 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
1031 !get_page(pfn_to_page(mfn), d->domain) )
1033 printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3);
1034 domain_crash_synchronous(); /* need to take a clean path */
1037 #if defined(__x86_64__)
1038 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1039 &d->arch.arch_vmx.cpu_state) &&
1040 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
1041 &d->arch.arch_vmx.cpu_state)){
1042 VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
1043 vmx_inject_exception(d, TRAP_gp_fault, 0);
1045 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1046 &d->arch.arch_vmx.cpu_state)){
1047 /* Here the PAE is should to be opened */
1048 VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
1049 set_bit(VMX_CPU_STATE_LMA_ENABLED,
1050 &d->arch.arch_vmx.cpu_state);
1051 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1052 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
1053 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1055 #if CONFIG_PAGING_LEVELS >= 4
1056 if(!shadow_set_guest_paging_levels(d->domain, 4)) {
1057 printk("Unsupported guest paging levels\n");
1058 domain_crash_synchronous(); /* need to take a clean path */
1060 #endif
1062 else
1064 #if CONFIG_PAGING_LEVELS >= 4
1065 if(!shadow_set_guest_paging_levels(d->domain, 2)) {
1066 printk("Unsupported guest paging levels\n");
1067 domain_crash_synchronous(); /* need to take a clean path */
1069 #endif
1072 unsigned long crn;
1073 /* update CR4's PAE if needed */
1074 __vmread(GUEST_CR4, &crn);
1075 if ( (!(crn & X86_CR4_PAE)) &&
1076 test_bit(VMX_CPU_STATE_PAE_ENABLED,
1077 &d->arch.arch_vmx.cpu_state)){
1078 VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
1079 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
1081 #elif defined( __i386__)
1082 unsigned long old_base_mfn;
1083 old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
1084 if (old_base_mfn)
1085 put_page(pfn_to_page(old_base_mfn));
1086 #endif
1087 /*
1088 * Now arch.guest_table points to machine physical.
1089 */
1090 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1091 update_pagetables(d);
1093 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
1094 (unsigned long) (mfn << PAGE_SHIFT));
1096 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
1097 /*
1098 * arch->shadow_table should hold the next CR3 for shadow
1099 */
1100 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
1101 d->arch.arch_vmx.cpu_cr3, mfn);
1104 /*
1105 * VMX does not implement real-mode virtualization. We emulate
1106 * real-mode by performing a world switch to VMXAssist whenever
1107 * a partition disables the CR0.PE bit.
1108 */
1109 if ((value & X86_CR0_PE) == 0) {
1110 if ( value & X86_CR0_PG ) {
1111 /* inject GP here */
1112 vmx_inject_exception(d, TRAP_gp_fault, 0);
1113 return 0;
1114 } else {
1115 /*
1116 * Disable paging here.
1117 * Same to PE == 1 && PG == 0
1118 */
1119 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1120 &d->arch.arch_vmx.cpu_state)){
1121 clear_bit(VMX_CPU_STATE_LMA_ENABLED,
1122 &d->arch.arch_vmx.cpu_state);
1123 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1124 vm_entry_value &= ~VM_ENTRY_CONTROLS_IA32E_MODE;
1125 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1128 __vmread(GUEST_RIP, &eip);
1129 VMX_DBG_LOG(DBG_LEVEL_1,
1130 "Disabling CR0.PE at %%eip 0x%lx\n", eip);
1131 if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
1132 set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
1133 __vmread(GUEST_RIP, &eip);
1134 VMX_DBG_LOG(DBG_LEVEL_1,
1135 "Transfering control to vmxassist %%eip 0x%lx\n", eip);
1136 return 0; /* do not update eip! */
1138 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1139 &d->arch.arch_vmx.cpu_state)) {
1140 __vmread(GUEST_RIP, &eip);
1141 VMX_DBG_LOG(DBG_LEVEL_1,
1142 "Enabling CR0.PE at %%eip 0x%lx\n", eip);
1143 if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
1144 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1145 &d->arch.arch_vmx.cpu_state);
1146 __vmread(GUEST_RIP, &eip);
1147 VMX_DBG_LOG(DBG_LEVEL_1,
1148 "Restoring to %%eip 0x%lx\n", eip);
1149 return 0; /* do not update eip! */
1153 return 1;
1156 #define CASE_GET_REG(REG, reg) \
1157 case REG_ ## REG: value = regs->reg; break
1159 #define CASE_EXTEND_SET_REG \
1160 CASE_EXTEND_REG(S)
1161 #define CASE_EXTEND_GET_REG \
1162 CASE_EXTEND_REG(G)
1164 #ifdef __i386__
1165 #define CASE_EXTEND_REG(T)
1166 #else
1167 #define CASE_EXTEND_REG(T) \
1168 CASE_ ## T ## ET_REG(R8, r8); \
1169 CASE_ ## T ## ET_REG(R9, r9); \
1170 CASE_ ## T ## ET_REG(R10, r10); \
1171 CASE_ ## T ## ET_REG(R11, r11); \
1172 CASE_ ## T ## ET_REG(R12, r12); \
1173 CASE_ ## T ## ET_REG(R13, r13); \
1174 CASE_ ## T ## ET_REG(R14, r14); \
1175 CASE_ ## T ## ET_REG(R15, r15);
1176 #endif
1179 /*
1180 * Write to control registers
1181 */
1182 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
1184 unsigned long value;
1185 unsigned long old_cr;
1186 struct vcpu *d = current;
1188 switch (gp) {
1189 CASE_GET_REG(EAX, eax);
1190 CASE_GET_REG(ECX, ecx);
1191 CASE_GET_REG(EDX, edx);
1192 CASE_GET_REG(EBX, ebx);
1193 CASE_GET_REG(EBP, ebp);
1194 CASE_GET_REG(ESI, esi);
1195 CASE_GET_REG(EDI, edi);
1196 CASE_EXTEND_GET_REG
1197 case REG_ESP:
1198 __vmread(GUEST_RSP, &value);
1199 break;
1200 default:
1201 printk("invalid gp: %d\n", gp);
1202 __vmx_bug(regs);
1205 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
1206 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
1208 switch(cr) {
1209 case 0:
1211 return vmx_set_cr0(value);
1213 case 3:
1215 unsigned long old_base_mfn, mfn;
1217 /*
1218 * If paging is not enabled yet, simply copy the value to CR3.
1219 */
1220 if (!vmx_paging_enabled(d)) {
1221 d->arch.arch_vmx.cpu_cr3 = value;
1222 break;
1225 /*
1226 * We make a new one if the shadow does not exist.
1227 */
1228 if (value == d->arch.arch_vmx.cpu_cr3) {
1229 /*
1230 * This is simple TLB flush, implying the guest has
1231 * removed some translation or changed page attributes.
1232 * We simply invalidate the shadow.
1233 */
1234 mfn = get_mfn_from_pfn(value >> PAGE_SHIFT);
1235 if (mfn != pagetable_get_pfn(d->arch.guest_table))
1236 __vmx_bug(regs);
1237 shadow_sync_all(d->domain);
1238 } else {
1239 /*
1240 * If different, make a shadow. Check if the PDBR is valid
1241 * first.
1242 */
1243 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
1244 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
1245 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
1246 !get_page(pfn_to_page(mfn), d->domain) )
1248 printk("Invalid CR3 value=%lx", value);
1249 domain_crash_synchronous(); /* need to take a clean path */
1251 old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
1252 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1253 if (old_base_mfn)
1254 put_page(pfn_to_page(old_base_mfn));
1255 update_pagetables(d);
1256 /*
1257 * arch.shadow_table should now hold the next CR3 for shadow
1258 */
1259 d->arch.arch_vmx.cpu_cr3 = value;
1260 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
1261 value);
1262 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
1264 break;
1266 case 4:
1268 /* CR4 */
1269 unsigned long old_guest_cr;
1271 __vmread(GUEST_CR4, &old_guest_cr);
1272 if (value & X86_CR4_PAE){
1273 set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
1274 } else {
1275 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1276 &d->arch.arch_vmx.cpu_state)){
1277 vmx_inject_exception(d, TRAP_gp_fault, 0);
1279 clear_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
1282 __vmread(CR4_READ_SHADOW, &old_cr);
1284 __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
1285 __vmwrite(CR4_READ_SHADOW, value);
1287 /*
1288 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
1289 * all TLB entries except global entries.
1290 */
1291 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
1292 shadow_sync_all(d->domain);
1294 break;
1296 default:
1297 printk("invalid cr: %d\n", gp);
1298 __vmx_bug(regs);
1301 return 1;
1304 #define CASE_SET_REG(REG, reg) \
1305 case REG_ ## REG: \
1306 regs->reg = value; \
1307 break
1309 /*
1310 * Read from control registers. CR0 and CR4 are read from the shadow.
1311 */
1312 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
1314 unsigned long value;
1315 struct vcpu *d = current;
1317 if (cr != 3)
1318 __vmx_bug(regs);
1320 value = (unsigned long) d->arch.arch_vmx.cpu_cr3;
1322 switch (gp) {
1323 CASE_SET_REG(EAX, eax);
1324 CASE_SET_REG(ECX, ecx);
1325 CASE_SET_REG(EDX, edx);
1326 CASE_SET_REG(EBX, ebx);
1327 CASE_SET_REG(EBP, ebp);
1328 CASE_SET_REG(ESI, esi);
1329 CASE_SET_REG(EDI, edi);
1330 CASE_EXTEND_SET_REG
1331 case REG_ESP:
1332 __vmwrite(GUEST_RSP, value);
1333 regs->esp = value;
1334 break;
1335 default:
1336 printk("invalid gp: %d\n", gp);
1337 __vmx_bug(regs);
1340 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
1343 static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
1345 unsigned int gp, cr;
1346 unsigned long value;
1348 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
1349 case TYPE_MOV_TO_CR:
1350 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1351 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1352 TRACE_VMEXIT(1,TYPE_MOV_TO_CR);
1353 TRACE_VMEXIT(2,cr);
1354 TRACE_VMEXIT(3,gp);
1355 return mov_to_cr(gp, cr, regs);
1356 case TYPE_MOV_FROM_CR:
1357 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1358 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1359 TRACE_VMEXIT(1,TYPE_MOV_FROM_CR);
1360 TRACE_VMEXIT(2,cr);
1361 TRACE_VMEXIT(3,gp);
1362 mov_from_cr(cr, gp, regs);
1363 break;
1364 case TYPE_CLTS:
1365 TRACE_VMEXIT(1,TYPE_CLTS);
1366 clts();
1367 setup_fpu(current);
1369 __vmread(GUEST_CR0, &value);
1370 value &= ~X86_CR0_TS; /* clear TS */
1371 __vmwrite(GUEST_CR0, value);
1373 __vmread(CR0_READ_SHADOW, &value);
1374 value &= ~X86_CR0_TS; /* clear TS */
1375 __vmwrite(CR0_READ_SHADOW, value);
1376 break;
1377 case TYPE_LMSW:
1378 TRACE_VMEXIT(1,TYPE_LMSW);
1379 __vmread(CR0_READ_SHADOW, &value);
1380 value = (value & ~0xF) |
1381 (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
1382 return vmx_set_cr0(value);
1383 break;
1384 default:
1385 __vmx_bug(regs);
1386 break;
1388 return 1;
1391 static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
1393 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
1394 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1395 (unsigned long)regs->edx);
1396 switch (regs->ecx) {
1397 case MSR_IA32_SYSENTER_CS:
1398 __vmread(GUEST_SYSENTER_CS, &regs->eax);
1399 regs->edx = 0;
1400 break;
1401 case MSR_IA32_SYSENTER_ESP:
1402 __vmread(GUEST_SYSENTER_ESP, &regs->eax);
1403 regs->edx = 0;
1404 break;
1405 case MSR_IA32_SYSENTER_EIP:
1406 __vmread(GUEST_SYSENTER_EIP, &regs->eax);
1407 regs->edx = 0;
1408 break;
1409 default:
1410 if(long_mode_do_msr_read(regs))
1411 return;
1412 rdmsr_user(regs->ecx, regs->eax, regs->edx);
1413 break;
1416 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
1417 "ecx=%lx, eax=%lx, edx=%lx",
1418 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1419 (unsigned long)regs->edx);
1422 static inline void vmx_do_msr_write(struct cpu_user_regs *regs)
1424 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write: ecx=%lx, eax=%lx, edx=%lx",
1425 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1426 (unsigned long)regs->edx);
1427 switch (regs->ecx) {
1428 case MSR_IA32_SYSENTER_CS:
1429 __vmwrite(GUEST_SYSENTER_CS, regs->eax);
1430 break;
1431 case MSR_IA32_SYSENTER_ESP:
1432 __vmwrite(GUEST_SYSENTER_ESP, regs->eax);
1433 break;
1434 case MSR_IA32_SYSENTER_EIP:
1435 __vmwrite(GUEST_SYSENTER_EIP, regs->eax);
1436 break;
1437 default:
1438 long_mode_do_msr_write(regs);
1439 break;
1442 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: "
1443 "ecx=%lx, eax=%lx, edx=%lx",
1444 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1445 (unsigned long)regs->edx);
1448 /*
1449 * Need to use this exit to reschedule
1450 */
1451 static inline void vmx_vmexit_do_hlt(void)
1453 #if VMX_DEBUG
1454 unsigned long eip;
1455 __vmread(GUEST_RIP, &eip);
1456 #endif
1457 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
1458 raise_softirq(SCHEDULE_SOFTIRQ);
1461 static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
1463 unsigned int vector;
1464 int error;
1466 asmlinkage void do_IRQ(struct cpu_user_regs *);
1467 void smp_apic_timer_interrupt(struct cpu_user_regs *);
1468 void timer_interrupt(int, void *, struct cpu_user_regs *);
1469 void smp_event_check_interrupt(void);
1470 void smp_invalidate_interrupt(void);
1471 void smp_call_function_interrupt(void);
1472 void smp_spurious_interrupt(struct cpu_user_regs *regs);
1473 void smp_error_interrupt(struct cpu_user_regs *regs);
1475 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1476 && !(vector & INTR_INFO_VALID_MASK))
1477 __vmx_bug(regs);
1479 vector &= 0xff;
1480 local_irq_disable();
1482 switch(vector) {
1483 case LOCAL_TIMER_VECTOR:
1484 smp_apic_timer_interrupt(regs);
1485 break;
1486 case EVENT_CHECK_VECTOR:
1487 smp_event_check_interrupt();
1488 break;
1489 case INVALIDATE_TLB_VECTOR:
1490 smp_invalidate_interrupt();
1491 break;
1492 case CALL_FUNCTION_VECTOR:
1493 smp_call_function_interrupt();
1494 break;
1495 case SPURIOUS_APIC_VECTOR:
1496 smp_spurious_interrupt(regs);
1497 break;
1498 case ERROR_APIC_VECTOR:
1499 smp_error_interrupt(regs);
1500 break;
1501 default:
1502 regs->entry_vector = vector;
1503 do_IRQ(regs);
1504 break;
1508 static inline void vmx_vmexit_do_mwait(void)
1510 #if VMX_DEBUG
1511 unsigned long eip;
1512 __vmread(GUEST_RIP, &eip);
1513 #endif
1514 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
1515 raise_softirq(SCHEDULE_SOFTIRQ);
1518 #define BUF_SIZ 256
1519 #define MAX_LINE 80
1520 char print_buf[BUF_SIZ];
1521 static int index;
1523 static void vmx_print_line(const char c, struct vcpu *d)
1526 if (index == MAX_LINE || c == '\n') {
1527 if (index == MAX_LINE) {
1528 print_buf[index++] = c;
1530 print_buf[index] = '\0';
1531 printk("(GUEST: %u) %s\n", d->domain->domain_id, (char *) &print_buf);
1532 index = 0;
1534 else
1535 print_buf[index++] = c;
1538 void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
1540 __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
1541 __vmread(GUEST_RSP, &ctxt->esp);
1542 __vmread(GUEST_RFLAGS, &ctxt->eflags);
1543 __vmread(GUEST_CS_SELECTOR, &ctxt->cs);
1544 __vmread(GUEST_RIP, &ctxt->eip);
1546 __vmread(GUEST_GS_SELECTOR, &ctxt->gs);
1547 __vmread(GUEST_FS_SELECTOR, &ctxt->fs);
1548 __vmread(GUEST_ES_SELECTOR, &ctxt->es);
1549 __vmread(GUEST_DS_SELECTOR, &ctxt->ds);
1552 #ifdef XEN_DEBUGGER
1553 void save_cpu_user_regs(struct cpu_user_regs *regs)
1555 __vmread(GUEST_SS_SELECTOR, &regs->xss);
1556 __vmread(GUEST_RSP, &regs->esp);
1557 __vmread(GUEST_RFLAGS, &regs->eflags);
1558 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
1559 __vmread(GUEST_RIP, &regs->eip);
1561 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
1562 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
1563 __vmread(GUEST_ES_SELECTOR, &regs->xes);
1564 __vmread(GUEST_DS_SELECTOR, &regs->xds);
1567 void restore_cpu_user_regs(struct cpu_user_regs *regs)
1569 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
1570 __vmwrite(GUEST_RSP, regs->esp);
1571 __vmwrite(GUEST_RFLAGS, regs->eflags);
1572 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
1573 __vmwrite(GUEST_RIP, regs->eip);
1575 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
1576 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
1577 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
1578 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
1580 #endif
1582 asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
1584 unsigned int exit_reason, idtv_info_field;
1585 unsigned long exit_qualification, eip, inst_len = 0;
1586 struct vcpu *v = current;
1587 int error;
1589 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
1590 __vmx_bug(&regs);
1592 perfc_incra(vmexits, exit_reason);
1594 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
1595 if (idtv_info_field & INTR_INFO_VALID_MASK) {
1596 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
1598 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
1599 if (inst_len >= 1 && inst_len <= 15)
1600 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
1602 if (idtv_info_field & 0x800) { /* valid error code */
1603 unsigned long error_code;
1604 __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
1605 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1608 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
1611 /* don't bother H/W interrutps */
1612 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
1613 exit_reason != EXIT_REASON_VMCALL &&
1614 exit_reason != EXIT_REASON_IO_INSTRUCTION)
1615 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
1617 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
1618 printk("Failed vm entry\n");
1619 domain_crash_synchronous();
1620 return;
1623 __vmread(GUEST_RIP, &eip);
1624 TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
1625 TRACE_VMEXIT(0,exit_reason);
1627 switch (exit_reason) {
1628 case EXIT_REASON_EXCEPTION_NMI:
1630 /*
1631 * We don't set the software-interrupt exiting (INT n).
1632 * (1) We can get an exception (e.g. #PG) in the guest, or
1633 * (2) NMI
1634 */
1635 int error;
1636 unsigned int vector;
1637 unsigned long va;
1639 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1640 || !(vector & INTR_INFO_VALID_MASK))
1641 __vmx_bug(&regs);
1642 vector &= 0xff;
1644 TRACE_VMEXIT(1,vector);
1645 perfc_incra(cause_vector, vector);
1647 TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
1648 switch (vector) {
1649 #ifdef XEN_DEBUGGER
1650 case TRAP_debug:
1652 save_cpu_user_regs(&regs);
1653 pdb_handle_exception(1, &regs, 1);
1654 restore_cpu_user_regs(&regs);
1655 break;
1657 case TRAP_int3:
1659 save_cpu_user_regs(&regs);
1660 pdb_handle_exception(3, &regs, 1);
1661 restore_cpu_user_regs(&regs);
1662 break;
1664 #else
1665 case TRAP_debug:
1667 void store_cpu_user_regs(struct cpu_user_regs *regs);
1668 long do_sched_op(unsigned long op);
1671 store_cpu_user_regs(&regs);
1672 __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
1674 set_bit(_VCPUF_ctrl_pause, &current->vcpu_flags);
1675 do_sched_op(SCHEDOP_yield);
1677 break;
1679 #endif
1680 case TRAP_no_device:
1682 vmx_do_no_device_fault();
1683 break;
1685 case TRAP_page_fault:
1687 __vmread(EXIT_QUALIFICATION, &va);
1688 __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
1690 TRACE_VMEXIT(3,regs.error_code);
1691 TRACE_VMEXIT(4,va);
1693 VMX_DBG_LOG(DBG_LEVEL_VMMU,
1694 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
1695 (unsigned long)regs.eax, (unsigned long)regs.ebx,
1696 (unsigned long)regs.ecx, (unsigned long)regs.edx,
1697 (unsigned long)regs.esi, (unsigned long)regs.edi);
1698 v->domain->arch.vmx_platform.mpci.inst_decoder_regs = &regs;
1700 if (!(error = vmx_do_page_fault(va, &regs))) {
1701 /*
1702 * Inject #PG using Interruption-Information Fields
1703 */
1704 vmx_inject_exception(v, TRAP_page_fault, regs.error_code);
1705 v->arch.arch_vmx.cpu_cr2 = va;
1706 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
1708 break;
1710 case TRAP_nmi:
1711 do_nmi(&regs, 0);
1712 break;
1713 default:
1714 vmx_reflect_exception(v);
1715 break;
1717 break;
1719 case EXIT_REASON_EXTERNAL_INTERRUPT:
1720 vmx_vmexit_do_extint(&regs);
1721 break;
1722 case EXIT_REASON_PENDING_INTERRUPT:
1723 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
1724 MONITOR_CPU_BASED_EXEC_CONTROLS);
1725 break;
1726 case EXIT_REASON_TASK_SWITCH:
1727 __vmx_bug(&regs);
1728 break;
1729 case EXIT_REASON_CPUID:
1730 __get_instruction_length(inst_len);
1731 vmx_vmexit_do_cpuid(regs.eax, &regs);
1732 __update_guest_eip(inst_len);
1733 break;
1734 case EXIT_REASON_HLT:
1735 __get_instruction_length(inst_len);
1736 __update_guest_eip(inst_len);
1737 vmx_vmexit_do_hlt();
1738 break;
1739 case EXIT_REASON_INVLPG:
1741 unsigned long va;
1743 __vmread(EXIT_QUALIFICATION, &va);
1744 vmx_vmexit_do_invlpg(va);
1745 __get_instruction_length(inst_len);
1746 __update_guest_eip(inst_len);
1747 break;
1749 case EXIT_REASON_VMCALL:
1750 __get_instruction_length(inst_len);
1751 __vmread(GUEST_RIP, &eip);
1752 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1754 vmx_print_line(regs.eax, v); /* provides the current domain */
1755 __update_guest_eip(inst_len);
1756 break;
1757 case EXIT_REASON_CR_ACCESS:
1759 __vmread(GUEST_RIP, &eip);
1760 __get_instruction_length(inst_len);
1761 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1763 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
1764 eip, inst_len, exit_qualification);
1765 if (vmx_cr_access(exit_qualification, &regs))
1766 __update_guest_eip(inst_len);
1767 TRACE_VMEXIT(3,regs.error_code);
1768 TRACE_VMEXIT(4,exit_qualification);
1769 break;
1771 case EXIT_REASON_DR_ACCESS:
1772 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1773 vmx_dr_access(exit_qualification, &regs);
1774 __get_instruction_length(inst_len);
1775 __update_guest_eip(inst_len);
1776 break;
1777 case EXIT_REASON_IO_INSTRUCTION:
1778 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1779 __get_instruction_length(inst_len);
1780 vmx_io_instruction(&regs, exit_qualification, inst_len);
1781 TRACE_VMEXIT(4,exit_qualification);
1782 break;
1783 case EXIT_REASON_MSR_READ:
1784 __get_instruction_length(inst_len);
1785 vmx_do_msr_read(&regs);
1786 __update_guest_eip(inst_len);
1787 break;
1788 case EXIT_REASON_MSR_WRITE:
1789 __vmread(GUEST_RIP, &eip);
1790 vmx_do_msr_write(&regs);
1791 __get_instruction_length(inst_len);
1792 __update_guest_eip(inst_len);
1793 break;
1794 case EXIT_REASON_MWAIT_INSTRUCTION:
1795 __get_instruction_length(inst_len);
1796 __update_guest_eip(inst_len);
1797 vmx_vmexit_do_mwait();
1798 break;
1799 default:
1800 __vmx_bug(&regs); /* should not happen */
1804 asmlinkage void load_cr2(void)
1806 struct vcpu *d = current;
1808 local_irq_disable();
1809 #ifdef __i386__
1810 asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1811 #else
1812 asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1813 #endif
1816 #ifdef TRACE_BUFFER
1817 asmlinkage void trace_vmentry (void)
1819 TRACE_5D(TRC_VMENTRY,trace_values[current->processor][0],
1820 trace_values[current->processor][1],trace_values[current->processor][2],
1821 trace_values[current->processor][3],trace_values[current->processor][4]);
1822 TRACE_VMEXIT(0,9);
1823 TRACE_VMEXIT(1,9);
1824 TRACE_VMEXIT(2,9);
1825 TRACE_VMEXIT(3,9);
1826 TRACE_VMEXIT(4,9);
1827 return;
1829 asmlinkage void trace_vmexit (void)
1831 TRACE_3D(TRC_VMEXIT,0,0,0);
1832 return;
1834 #endif
1835 #endif /* CONFIG_VMX */
1837 /*
1838 * Local variables:
1839 * mode: C
1840 * c-set-style: "BSD"
1841 * c-basic-offset: 4
1842 * tab-width: 4
1843 * indent-tabs-mode: nil
1844 * End:
1845 */