debuggers.hg

view xen/arch/x86/vmx.c @ 4668:e4e76c16ec78

bitkeeper revision 1.1364 (426812d6FWlbLqlBBPVfuegkt0_fyg)

Now that the shadow code is squashing the global bit in PTEs,
it's OK to re-enable the PGE capability flag for vmx domains.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author maf46@burn.cl.cam.ac.uk
date Thu Apr 21 20:53:42 2005 +0000 (2005-04-21)
parents 9a768d11cc7b
children d769cf21930d
line source
1 /*
2 * vmx.c: handling VMX architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/lib.h>
23 #include <xen/trace.h>
24 #include <xen/sched.h>
25 #include <xen/softirq.h>
26 #include <asm/current.h>
27 #include <asm/io.h>
28 #include <asm/irq.h>
29 #include <asm/shadow.h>
30 #include <asm/regs.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/types.h>
34 #include <asm/msr.h>
35 #include <asm/spinlock.h>
36 #include <asm/vmx.h>
37 #include <asm/vmx_vmcs.h>
38 #include <asm/vmx_intercept.h>
39 #include <asm/shadow.h>
40 #include <public/io/ioreq.h>
42 #ifdef CONFIG_VMX
44 int vmcs_size;
45 unsigned int opt_vmx_debug_level = 0;
47 extern long evtchn_send(int lport);
48 extern long do_block(void);
49 void do_nmi(struct xen_regs *, unsigned long);
51 int start_vmx()
52 {
53 struct vmcs_struct *vmcs;
54 unsigned long ecx;
55 u64 phys_vmcs; /* debugging */
57 vmcs_size = VMCS_SIZE;
58 /*
59 * Xen does not fill x86_capability words except 0.
60 */
61 ecx = cpuid_ecx(1);
62 boot_cpu_data.x86_capability[4] = ecx;
64 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
65 return 0;
67 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
69 if (!(vmcs = alloc_vmcs())) {
70 printk("Failed to allocate VMCS\n");
71 return 0;
72 }
74 phys_vmcs = (u64) virt_to_phys(vmcs);
76 if (!(__vmxon(phys_vmcs))) {
77 printk("VMXON is done\n");
78 }
80 return 1;
81 }
83 void stop_vmx()
84 {
85 if (read_cr4() & X86_CR4_VMXE)
86 __vmxoff();
87 }
89 /*
90 * Not all cases recevie valid value in the VM-exit instruction length field.
91 */
92 #define __get_instruction_length(len) \
93 __vmread(INSTRUCTION_LEN, &(len)); \
94 if ((len) < 1 || (len) > 15) \
95 __vmx_bug(&regs);
97 static void inline __update_guest_eip(unsigned long inst_len)
98 {
99 unsigned long current_eip;
101 __vmread(GUEST_EIP, &current_eip);
102 __vmwrite(GUEST_EIP, current_eip + inst_len);
103 }
106 #include <asm/domain_page.h>
108 static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs)
109 {
110 struct exec_domain *ed = current;
111 unsigned long eip;
112 l1_pgentry_t gpte;
113 unsigned long gpa; /* FIXME: PAE */
114 int result;
116 #if VMX_DEBUG
117 {
118 __vmread(GUEST_EIP, &eip);
119 VMX_DBG_LOG(DBG_LEVEL_VMMU,
120 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
121 va, eip, regs->error_code);
122 }
123 #endif
125 /*
126 * If vpagetable is zero, then we are still emulating 1:1 page tables,
127 * and we should have never gotten here.
128 */
129 if ( !test_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state) )
130 {
131 printk("vmx_do_page_fault while running on 1:1 page table\n");
132 return 0;
133 }
135 gpte = gva_to_gpte(va);
136 if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) )
137 return 0;
138 gpa = l1e_get_phys(gpte) + (va & ~PAGE_MASK);
140 /* Use 1:1 page table to identify MMIO address space */
141 if (mmio_space(gpa))
142 handle_mmio(va, gpa);
144 result = shadow_fault(va, regs);
146 #if 0
147 if ( !result )
148 {
149 __vmread(GUEST_EIP, &eip);
150 printk("vmx pgfault to guest va=%p eip=%p\n", va, eip);
151 }
152 #endif
154 return result;
155 }
157 static void vmx_do_general_protection_fault(struct xen_regs *regs)
158 {
159 unsigned long eip, error_code;
160 unsigned long intr_fields;
162 __vmread(GUEST_EIP, &eip);
163 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
165 VMX_DBG_LOG(DBG_LEVEL_1,
166 "vmx_general_protection_fault: eip = %lx, erro_code = %lx",
167 eip, error_code);
169 VMX_DBG_LOG(DBG_LEVEL_1,
170 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
171 regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi);
173 /* Reflect it back into the guest */
174 intr_fields = (INTR_INFO_VALID_MASK |
175 INTR_TYPE_EXCEPTION |
176 INTR_INFO_DELIEVER_CODE_MASK |
177 TRAP_gp_fault);
178 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
179 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
180 }
182 static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs)
183 {
184 unsigned int eax, ebx, ecx, edx;
185 unsigned long eip;
187 __vmread(GUEST_EIP, &eip);
189 VMX_DBG_LOG(DBG_LEVEL_1,
190 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
191 " (esi) %lx, (edi) %lx",
192 regs->eax, regs->ebx, regs->ecx, regs->edx,
193 regs->esi, regs->edi);
195 cpuid(input, &eax, &ebx, &ecx, &edx);
197 if (input == 1) {
198 clear_bit(X86_FEATURE_PSE, &edx);
199 clear_bit(X86_FEATURE_PAE, &edx);
200 clear_bit(X86_FEATURE_PSE36, &edx);
201 }
203 regs->eax = (unsigned long) eax;
204 regs->ebx = (unsigned long) ebx;
205 regs->ecx = (unsigned long) ecx;
206 regs->edx = (unsigned long) edx;
208 VMX_DBG_LOG(DBG_LEVEL_1,
209 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
210 eip, input, eax, ebx, ecx, edx);
212 }
214 #define CASE_GET_REG_P(REG, reg) \
215 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
217 static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs)
218 {
219 unsigned int reg;
220 unsigned long *reg_p = 0;
221 struct exec_domain *ed = current;
222 unsigned long eip;
224 __vmread(GUEST_EIP, &eip);
226 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
228 VMX_DBG_LOG(DBG_LEVEL_1,
229 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
230 eip, reg, exit_qualification);
232 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
233 CASE_GET_REG_P(EAX, eax);
234 CASE_GET_REG_P(ECX, ecx);
235 CASE_GET_REG_P(EDX, edx);
236 CASE_GET_REG_P(EBX, ebx);
237 CASE_GET_REG_P(EBP, ebp);
238 CASE_GET_REG_P(ESI, esi);
239 CASE_GET_REG_P(EDI, edi);
240 case REG_ESP:
241 break;
242 default:
243 __vmx_bug(regs);
244 }
246 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
247 case TYPE_MOV_TO_DR:
248 /* don't need to check the range */
249 if (reg != REG_ESP)
250 ed->arch.debugreg[reg] = *reg_p;
251 else {
252 unsigned long value;
253 __vmread(GUEST_ESP, &value);
254 ed->arch.debugreg[reg] = value;
255 }
256 break;
257 case TYPE_MOV_FROM_DR:
258 if (reg != REG_ESP)
259 *reg_p = ed->arch.debugreg[reg];
260 else {
261 __vmwrite(GUEST_ESP, ed->arch.debugreg[reg]);
262 }
263 break;
264 }
265 }
267 /*
268 * Invalidate the TLB for va. Invalidate the shadow page corresponding
269 * the address va.
270 */
271 static void vmx_vmexit_do_invlpg(unsigned long va)
272 {
273 unsigned long eip;
274 struct exec_domain *ed = current;
276 __vmread(GUEST_EIP, &eip);
278 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%p, va=%p",
279 eip, va);
281 /*
282 * We do the safest things first, then try to update the shadow
283 * copying from guest
284 */
285 shadow_invlpg(ed, va);
286 }
288 static void vmx_io_instruction(struct xen_regs *regs,
289 unsigned long exit_qualification, unsigned long inst_len)
290 {
291 struct exec_domain *d = current;
292 vcpu_iodata_t *vio;
293 ioreq_t *p;
294 unsigned long addr;
295 unsigned long eip, cs, eflags;
296 int vm86;
298 __vmread(GUEST_EIP, &eip);
299 __vmread(GUEST_CS_SELECTOR, &cs);
300 __vmread(GUEST_EFLAGS, &eflags);
301 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
303 VMX_DBG_LOG(DBG_LEVEL_1,
304 "vmx_io_instruction: vm86 %d, eip=%p:%p, exit_qualification = %lx",
305 vm86, cs, eip, exit_qualification);
307 if (test_bit(6, &exit_qualification))
308 addr = (exit_qualification >> 16) & (0xffff);
309 else
310 addr = regs->edx & 0xffff;
312 if (addr == 0x80) {
313 __update_guest_eip(inst_len);
314 return;
315 }
317 vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
318 if (vio == 0) {
319 VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx", (unsigned long) vio);
320 domain_crash_synchronous();
321 }
322 p = &vio->vp_ioreq;
323 p->dir = test_bit(3, &exit_qualification);
325 p->pdata_valid = 0;
326 p->count = 1;
327 p->size = (exit_qualification & 7) + 1;
329 if (test_bit(4, &exit_qualification)) {
330 p->df = (eflags & X86_EFLAGS_DF) ? 1 : 0;
331 p->pdata_valid = 1;
333 if (vm86) {
334 unsigned long seg;
335 if (p->dir == IOREQ_WRITE) {
336 __vmread(GUEST_DS_SELECTOR, &seg);
337 p->u.pdata = (void *)
338 ((seg << 4) | (regs->esi & 0xFFFF));
339 } else {
340 __vmread(GUEST_ES_SELECTOR, &seg);
341 p->u.pdata = (void *)
342 ((seg << 4) | (regs->edi & 0xFFFF));
343 }
344 } else {
345 p->u.pdata = (void *) ((p->dir == IOREQ_WRITE) ?
346 regs->esi : regs->edi);
347 }
348 p->u.pdata = (void *) gva_to_gpa(p->u.data);
351 if (test_bit(5, &exit_qualification))
352 p->count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
353 if ((p->u.data & PAGE_MASK) !=
354 ((p->u.data + p->count * p->size - 1) & PAGE_MASK)) {
355 printk("stringio crosses page boundary!\n");
356 if (p->u.data & (p->size - 1)) {
357 printk("Not aligned I/O!\n");
358 domain_crash_synchronous();
359 }
360 p->count = (PAGE_SIZE - (p->u.data & ~PAGE_MASK)) / p->size;
361 } else {
362 __update_guest_eip(inst_len);
363 }
364 } else if (p->dir == IOREQ_WRITE) {
365 p->u.data = regs->eax;
366 __update_guest_eip(inst_len);
367 } else
368 __update_guest_eip(inst_len);
370 p->addr = addr;
371 p->port_mm = 0;
373 /* Check if the packet needs to be intercepted */
374 if (vmx_io_intercept(p)) {
375 /* no blocking & no evtchn notification */
376 return;
377 }
379 set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
380 p->state = STATE_IOREQ_READY;
381 evtchn_send(IOPACKET_PORT);
382 do_block();
383 }
385 enum { COPY_IN = 0, COPY_OUT };
387 static inline int
388 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
389 {
390 unsigned char *addr;
391 unsigned long mfn;
393 if ((size + (laddr & (PAGE_SIZE - 1))) >= PAGE_SIZE) {
394 printf("vmx_copy exceeds page boundary\n");
395 return 0;
396 }
398 mfn = phys_to_machine_mapping(l1e_get_pfn(gva_to_gpte(laddr)));
399 addr = map_domain_mem((mfn << PAGE_SHIFT) | (laddr & ~PAGE_MASK));
401 if (dir == COPY_IN)
402 memcpy(buf, addr, size);
403 else
404 memcpy(addr, buf, size);
406 unmap_domain_mem(addr);
407 return 1;
408 }
410 int
411 vmx_world_save(struct exec_domain *d, struct vmx_assist_context *c)
412 {
413 unsigned long inst_len;
414 int error = 0;
416 error |= __vmread(INSTRUCTION_LEN, &inst_len);
417 error |= __vmread(GUEST_EIP, &c->eip);
418 c->eip += inst_len; /* skip transition instruction */
419 error |= __vmread(GUEST_ESP, &c->esp);
420 error |= __vmread(GUEST_EFLAGS, &c->eflags);
422 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
423 c->cr3 = d->arch.arch_vmx.cpu_cr3;
424 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
426 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
427 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
429 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
430 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
432 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
433 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
434 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
435 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
437 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
438 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
439 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
440 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
442 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
443 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
444 error |= __vmread(GUEST_ES_BASE, &c->es_base);
445 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
447 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
448 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
449 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
450 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
452 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
453 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
454 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
455 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
457 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
458 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
459 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
460 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
462 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
463 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
464 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
465 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
467 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
468 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
469 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
470 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
472 return !error;
473 }
475 int
476 vmx_world_restore(struct exec_domain *d, struct vmx_assist_context *c)
477 {
478 unsigned long mfn, old_cr4;
479 int error = 0;
481 error |= __vmwrite(GUEST_EIP, c->eip);
482 error |= __vmwrite(GUEST_ESP, c->esp);
483 error |= __vmwrite(GUEST_EFLAGS, c->eflags);
485 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
487 if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
488 /*
489 * This is simple TLB flush, implying the guest has
490 * removed some translation or changed page attributes.
491 * We simply invalidate the shadow.
492 */
493 mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
494 if ((mfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table)) {
495 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
496 domain_crash_synchronous();
497 return 0;
498 }
499 shadow_sync_all(d->domain);
500 } else {
501 /*
502 * If different, make a shadow. Check if the PDBR is valid
503 * first.
504 */
505 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %lx", c->cr3);
506 if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
507 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
508 domain_crash_synchronous();
509 return 0;
510 }
511 mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
512 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
513 update_pagetables(d);
514 /*
515 * arch.shadow_table should now hold the next CR3 for shadow
516 */
517 d->arch.arch_vmx.cpu_cr3 = c->cr3;
518 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", c->cr3);
519 __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
520 }
522 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
523 error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE));
524 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
526 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
527 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
529 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
530 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
532 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
533 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
534 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
535 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
537 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
538 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
539 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
540 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
542 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
543 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
544 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
545 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
547 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
548 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
549 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
550 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
552 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
553 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
554 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
555 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
557 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
558 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
559 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
560 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
562 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
563 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
564 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
565 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
567 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
568 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
569 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
570 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
572 return !error;
573 }
575 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
577 int
578 vmx_assist(struct exec_domain *d, int mode)
579 {
580 struct vmx_assist_context c;
581 unsigned long magic, cp;
583 /* make sure vmxassist exists (this is not an error) */
584 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), COPY_IN))
585 return 0;
586 if (magic != VMXASSIST_MAGIC)
587 return 0;
589 switch (mode) {
590 /*
591 * Transfer control to vmxassist.
592 * Store the current context in VMXASSIST_OLD_CONTEXT and load
593 * the new VMXASSIST_NEW_CONTEXT context. This context was created
594 * by vmxassist and will transfer control to it.
595 */
596 case VMX_ASSIST_INVOKE:
597 /* save the old context */
598 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), COPY_IN))
599 goto error;
600 if (cp != 0) {
601 if (!vmx_world_save(d, &c))
602 goto error;
603 if (!vmx_copy(&c, cp, sizeof(c), COPY_OUT))
604 goto error;
605 }
607 /* restore the new context, this should activate vmxassist */
608 if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), COPY_IN))
609 goto error;
610 if (cp != 0) {
611 if (!vmx_copy(&c, cp, sizeof(c), COPY_IN))
612 goto error;
613 if (!vmx_world_restore(d, &c))
614 goto error;
615 return 1;
616 }
617 break;
619 /*
620 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
621 * above.
622 */
623 case VMX_ASSIST_RESTORE:
624 /* save the old context */
625 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), COPY_IN))
626 goto error;
627 if (cp != 0) {
628 if (!vmx_copy(&c, cp, sizeof(c), COPY_IN))
629 goto error;
630 if (!vmx_world_restore(d, &c))
631 goto error;
632 return 1;
633 }
634 break;
635 }
637 error:
638 printf("Failed to transfer to vmxassist\n");
639 domain_crash_synchronous();
640 return 0;
641 }
643 #define CASE_GET_REG(REG, reg) \
644 case REG_ ## REG: value = regs->reg; break
646 /*
647 * Write to control registers
648 */
649 static int mov_to_cr(int gp, int cr, struct xen_regs *regs)
650 {
651 unsigned long value;
652 unsigned long old_cr;
653 unsigned long eip;
654 struct exec_domain *d = current;
656 switch (gp) {
657 CASE_GET_REG(EAX, eax);
658 CASE_GET_REG(ECX, ecx);
659 CASE_GET_REG(EDX, edx);
660 CASE_GET_REG(EBX, ebx);
661 CASE_GET_REG(EBP, ebp);
662 CASE_GET_REG(ESI, esi);
663 CASE_GET_REG(EDI, edi);
664 case REG_ESP:
665 __vmread(GUEST_ESP, &value);
666 break;
667 default:
668 printk("invalid gp: %d\n", gp);
669 __vmx_bug(regs);
670 }
672 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
673 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
675 switch(cr) {
676 case 0:
677 {
678 unsigned long old_base_mfn, mfn;
680 /*
681 * CR0:
682 * We don't want to lose PE and PG.
683 */
684 __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
685 __vmwrite(CR0_READ_SHADOW, value);
687 if (value & (X86_CR0_PE | X86_CR0_PG) &&
688 !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
689 /*
690 * Enable paging
691 */
692 set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state);
693 /*
694 * The guest CR3 must be pointing to the guest physical.
695 */
696 if ( !VALID_MFN(mfn = phys_to_machine_mapping(
697 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
698 !get_page(pfn_to_page(mfn), d->domain) )
699 {
700 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx",
701 d->arch.arch_vmx.cpu_cr3);
702 domain_crash_synchronous(); /* need to take a clean path */
703 }
704 old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
705 if ( old_base_mfn )
706 put_page(pfn_to_page(old_base_mfn));
708 /*
709 * Now arch.guest_table points to machine physical.
710 */
711 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
712 update_pagetables(d);
714 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
715 (unsigned long) (mfn << PAGE_SHIFT));
717 __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
718 /*
719 * arch->shadow_table should hold the next CR3 for shadow
720 */
721 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
722 d->arch.arch_vmx.cpu_cr3, mfn);
723 } else {
724 if ((value & X86_CR0_PE) == 0) {
725 __vmread(GUEST_EIP, &eip);
726 VMX_DBG_LOG(DBG_LEVEL_1,
727 "Disabling CR0.PE at %%eip 0x%lx", eip);
728 if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
729 set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
730 &d->arch.arch_vmx.cpu_state);
731 __vmread(GUEST_EIP, &eip);
732 VMX_DBG_LOG(DBG_LEVEL_1,
733 "Transfering control to vmxassist %%eip 0x%lx", eip);
734 return 0; /* do not update eip! */
735 }
736 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
737 &d->arch.arch_vmx.cpu_state)) {
738 __vmread(GUEST_EIP, &eip);
739 VMX_DBG_LOG(DBG_LEVEL_1,
740 "Enabling CR0.PE at %%eip 0x%lx", eip);
741 if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
742 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
743 &d->arch.arch_vmx.cpu_state);
744 __vmread(GUEST_EIP, &eip);
745 VMX_DBG_LOG(DBG_LEVEL_1,
746 "Restoring to %%eip 0x%lx", eip);
747 return 0; /* do not update eip! */
748 }
749 }
750 }
751 break;
752 }
753 case 3:
754 {
755 unsigned long old_base_mfn, mfn;
757 /*
758 * If paging is not enabled yet, simply copy the value to CR3.
759 */
760 if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
761 d->arch.arch_vmx.cpu_cr3 = value;
762 break;
763 }
765 /*
766 * We make a new one if the shadow does not exist.
767 */
768 if (value == d->arch.arch_vmx.cpu_cr3) {
769 /*
770 * This is simple TLB flush, implying the guest has
771 * removed some translation or changed page attributes.
772 * We simply invalidate the shadow.
773 */
774 mfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
775 if ((mfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table))
776 __vmx_bug(regs);
777 shadow_sync_all(d->domain);
778 } else {
779 /*
780 * If different, make a shadow. Check if the PDBR is valid
781 * first.
782 */
783 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
784 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
785 !VALID_MFN(mfn = phys_to_machine_mapping(value >> PAGE_SHIFT)) ||
786 !get_page(pfn_to_page(mfn), d->domain) )
787 {
788 VMX_DBG_LOG(DBG_LEVEL_VMMU,
789 "Invalid CR3 value=%lx", value);
790 domain_crash_synchronous(); /* need to take a clean path */
791 }
792 old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
793 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
794 if ( old_base_mfn )
795 put_page(pfn_to_page(old_base_mfn));
796 update_pagetables(d);
797 /*
798 * arch.shadow_table should now hold the next CR3 for shadow
799 */
800 d->arch.arch_vmx.cpu_cr3 = value;
801 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
802 value);
803 __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
804 }
805 break;
806 }
807 case 4:
808 /* CR4 */
809 if (value & X86_CR4_PAE)
810 __vmx_bug(regs); /* not implemented */
811 __vmread(CR4_READ_SHADOW, &old_cr);
813 __vmwrite(GUEST_CR4, (value | X86_CR4_VMXE));
814 __vmwrite(CR4_READ_SHADOW, value);
816 /*
817 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
818 * all TLB entries except global entries.
819 */
820 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
821 vmx_shadow_clear_state(d->domain);
822 shadow_sync_all(d->domain);
823 }
824 break;
825 default:
826 printk("invalid cr: %d\n", gp);
827 __vmx_bug(regs);
828 }
830 return 1;
831 }
833 #define CASE_SET_REG(REG, reg) \
834 case REG_ ## REG: \
835 regs->reg = value; \
836 break
838 /*
839 * Read from control registers. CR0 and CR4 are read from the shadow.
840 */
841 static void mov_from_cr(int cr, int gp, struct xen_regs *regs)
842 {
843 unsigned long value;
844 struct exec_domain *d = current;
846 if (cr != 3)
847 __vmx_bug(regs);
849 value = (unsigned long) d->arch.arch_vmx.cpu_cr3;
850 ASSERT(value);
852 switch (gp) {
853 CASE_SET_REG(EAX, eax);
854 CASE_SET_REG(ECX, ecx);
855 CASE_SET_REG(EDX, edx);
856 CASE_SET_REG(EBX, ebx);
857 CASE_SET_REG(EBP, ebp);
858 CASE_SET_REG(ESI, esi);
859 CASE_SET_REG(EDI, edi);
860 case REG_ESP:
861 __vmwrite(GUEST_ESP, value);
862 regs->esp = value;
863 break;
864 default:
865 printk("invalid gp: %d\n", gp);
866 __vmx_bug(regs);
867 }
869 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
870 }
872 static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs)
873 {
874 unsigned int gp, cr;
875 unsigned long value;
877 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
878 case TYPE_MOV_TO_CR:
879 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
880 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
881 return mov_to_cr(gp, cr, regs);
882 case TYPE_MOV_FROM_CR:
883 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
884 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
885 mov_from_cr(cr, gp, regs);
886 break;
887 case TYPE_CLTS:
888 __vmread(GUEST_CR0, &value);
889 value &= ~X86_CR0_TS; /* clear TS */
890 __vmwrite(GUEST_CR0, value);
892 __vmread(CR0_READ_SHADOW, &value);
893 value &= ~X86_CR0_TS; /* clear TS */
894 __vmwrite(CR0_READ_SHADOW, value);
895 break;
896 default:
897 __vmx_bug(regs);
898 break;
899 }
900 return 1;
901 }
903 static inline void vmx_do_msr_read(struct xen_regs *regs)
904 {
905 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
906 regs->ecx, regs->eax, regs->edx);
908 rdmsr(regs->ecx, regs->eax, regs->edx);
910 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
911 "ecx=%lx, eax=%lx, edx=%lx",
912 regs->ecx, regs->eax, regs->edx);
913 }
915 /*
916 * Need to use this exit to reschedule
917 */
918 static inline void vmx_vmexit_do_hlt(void)
919 {
920 #if VMX_DEBUG
921 unsigned long eip;
922 __vmread(GUEST_EIP, &eip);
923 #endif
924 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%p", eip);
925 raise_softirq(SCHEDULE_SOFTIRQ);
926 }
928 static inline void vmx_vmexit_do_mwait(void)
929 {
930 #if VMX_DEBUG
931 unsigned long eip;
932 __vmread(GUEST_EIP, &eip);
933 #endif
934 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%p", eip);
935 raise_softirq(SCHEDULE_SOFTIRQ);
936 }
938 #define BUF_SIZ 256
939 #define MAX_LINE 80
940 char print_buf[BUF_SIZ];
941 static int index;
943 static void vmx_print_line(const char c, struct exec_domain *d)
944 {
946 if (index == MAX_LINE || c == '\n') {
947 if (index == MAX_LINE) {
948 print_buf[index++] = c;
949 }
950 print_buf[index] = '\0';
951 printk("(GUEST: %u) %s\n", d->domain->id, (char *) &print_buf);
952 index = 0;
953 }
954 else
955 print_buf[index++] = c;
956 }
958 void save_vmx_execution_context(execution_context_t *ctxt)
959 {
960 __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
961 __vmread(GUEST_ESP, &ctxt->esp);
962 __vmread(GUEST_EFLAGS, &ctxt->eflags);
963 __vmread(GUEST_CS_SELECTOR, &ctxt->cs);
964 __vmread(GUEST_EIP, &ctxt->eip);
966 __vmread(GUEST_GS_SELECTOR, &ctxt->gs);
967 __vmread(GUEST_FS_SELECTOR, &ctxt->fs);
968 __vmread(GUEST_ES_SELECTOR, &ctxt->es);
969 __vmread(GUEST_DS_SELECTOR, &ctxt->ds);
970 }
972 #ifdef XEN_DEBUGGER
973 void save_xen_regs(struct xen_regs *regs)
974 {
975 __vmread(GUEST_SS_SELECTOR, &regs->xss);
976 __vmread(GUEST_ESP, &regs->esp);
977 __vmread(GUEST_EFLAGS, &regs->eflags);
978 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
979 __vmread(GUEST_EIP, &regs->eip);
981 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
982 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
983 __vmread(GUEST_ES_SELECTOR, &regs->xes);
984 __vmread(GUEST_DS_SELECTOR, &regs->xds);
985 }
987 void restore_xen_regs(struct xen_regs *regs)
988 {
989 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
990 __vmwrite(GUEST_ESP, regs->esp);
991 __vmwrite(GUEST_EFLAGS, regs->eflags);
992 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
993 __vmwrite(GUEST_EIP, regs->eip);
995 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
996 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
997 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
998 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
999 }
1000 #endif
1002 asmlinkage void vmx_vmexit_handler(struct xen_regs regs)
1004 unsigned int exit_reason, idtv_info_field;
1005 unsigned long exit_qualification, eip, inst_len = 0;
1006 struct exec_domain *ed = current;
1007 int error;
1009 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
1010 __vmx_bug(&regs);
1012 perfc_incra(vmexits, exit_reason);
1014 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
1015 if (idtv_info_field & INTR_INFO_VALID_MASK) {
1016 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
1017 if ((idtv_info_field & 0xff) == 14) {
1018 unsigned long error_code;
1020 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
1021 printk("#PG error code: %lx\n", error_code);
1023 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x",
1024 idtv_info_field);
1027 /* don't bother H/W interrutps */
1028 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
1029 exit_reason != EXIT_REASON_VMCALL &&
1030 exit_reason != EXIT_REASON_IO_INSTRUCTION)
1031 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
1033 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
1034 domain_crash_synchronous();
1035 return;
1038 __vmread(GUEST_EIP, &eip);
1039 TRACE_3D(TRC_VMX_VMEXIT, ed->domain->id, eip, exit_reason);
1041 switch (exit_reason) {
1042 case EXIT_REASON_EXCEPTION_NMI:
1044 /*
1045 * We don't set the software-interrupt exiting (INT n).
1046 * (1) We can get an exception (e.g. #PG) in the guest, or
1047 * (2) NMI
1048 */
1049 int error;
1050 unsigned int vector;
1051 unsigned long va;
1053 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1054 && !(vector & INTR_INFO_VALID_MASK))
1055 __vmx_bug(&regs);
1056 vector &= 0xff;
1058 perfc_incra(cause_vector, vector);
1060 TRACE_3D(TRC_VMX_VECTOR, ed->domain->id, eip, vector);
1061 switch (vector) {
1062 #ifdef XEN_DEBUGGER
1063 case TRAP_debug:
1065 save_xen_regs(&regs);
1066 pdb_handle_exception(1, &regs, 1);
1067 restore_xen_regs(&regs);
1068 break;
1070 case TRAP_int3:
1072 save_xen_regs(&regs);
1073 pdb_handle_exception(3, &regs, 1);
1074 restore_xen_regs(&regs);
1075 break;
1077 #endif
1078 case TRAP_gp_fault:
1080 vmx_do_general_protection_fault(&regs);
1081 break;
1083 case TRAP_page_fault:
1085 __vmread(EXIT_QUALIFICATION, &va);
1086 __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
1087 VMX_DBG_LOG(DBG_LEVEL_VMMU,
1088 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
1089 regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi,
1090 regs.edi);
1091 ed->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
1093 if (!(error = vmx_do_page_fault(va, &regs))) {
1094 /*
1095 * Inject #PG using Interruption-Information Fields
1096 */
1097 unsigned long intr_fields;
1099 intr_fields = (INTR_INFO_VALID_MASK |
1100 INTR_TYPE_EXCEPTION |
1101 INTR_INFO_DELIEVER_CODE_MASK |
1102 TRAP_page_fault);
1103 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
1104 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, regs.error_code);
1105 ed->arch.arch_vmx.cpu_cr2 = va;
1106 TRACE_3D(TRC_VMX_INT, ed->domain->id, TRAP_page_fault, va);
1108 break;
1110 case TRAP_nmi:
1111 do_nmi(&regs, 0);
1112 break;
1113 default:
1114 printk("unexpected VMexit for exception vector 0x%x\n", vector);
1115 //__vmx_bug(&regs);
1116 break;
1118 break;
1120 case EXIT_REASON_EXTERNAL_INTERRUPT:
1122 extern int vector_irq[];
1123 extern asmlinkage void do_IRQ(struct xen_regs *);
1124 extern void smp_apic_timer_interrupt(struct xen_regs *);
1125 extern void timer_interrupt(int, void *, struct xen_regs *);
1126 unsigned int vector;
1128 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1129 && !(vector & INTR_INFO_VALID_MASK))
1130 __vmx_bug(&regs);
1132 vector &= 0xff;
1133 local_irq_disable();
1135 if (vector == LOCAL_TIMER_VECTOR) {
1136 smp_apic_timer_interrupt(&regs);
1137 } else {
1138 regs.entry_vector = (vector == FIRST_DEVICE_VECTOR?
1139 0 : vector_irq[vector]);
1140 do_IRQ(&regs);
1142 break;
1144 case EXIT_REASON_PENDING_INTERRUPT:
1145 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
1146 MONITOR_CPU_BASED_EXEC_CONTROLS);
1147 vmx_intr_assist(ed);
1148 break;
1149 case EXIT_REASON_TASK_SWITCH:
1150 __vmx_bug(&regs);
1151 break;
1152 case EXIT_REASON_CPUID:
1153 __get_instruction_length(inst_len);
1154 vmx_vmexit_do_cpuid(regs.eax, &regs);
1155 __update_guest_eip(inst_len);
1156 break;
1157 case EXIT_REASON_HLT:
1158 __get_instruction_length(inst_len);
1159 __update_guest_eip(inst_len);
1160 vmx_vmexit_do_hlt();
1161 break;
1162 case EXIT_REASON_INVLPG:
1164 unsigned long va;
1166 __vmread(EXIT_QUALIFICATION, &va);
1167 vmx_vmexit_do_invlpg(va);
1168 __get_instruction_length(inst_len);
1169 __update_guest_eip(inst_len);
1170 break;
1172 case EXIT_REASON_VMCALL:
1173 __get_instruction_length(inst_len);
1174 __vmread(GUEST_EIP, &eip);
1175 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1177 vmx_print_line(regs.eax, ed); /* provides the current domain */
1178 __update_guest_eip(inst_len);
1179 break;
1180 case EXIT_REASON_CR_ACCESS:
1182 __vmread(GUEST_EIP, &eip);
1183 __get_instruction_length(inst_len);
1184 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1186 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
1187 eip, inst_len, exit_qualification);
1188 if (vmx_cr_access(exit_qualification, &regs))
1189 __update_guest_eip(inst_len);
1190 break;
1192 case EXIT_REASON_DR_ACCESS:
1193 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1194 vmx_dr_access(exit_qualification, &regs);
1195 __get_instruction_length(inst_len);
1196 __update_guest_eip(inst_len);
1197 break;
1198 case EXIT_REASON_IO_INSTRUCTION:
1199 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1200 __get_instruction_length(inst_len);
1201 vmx_io_instruction(&regs, exit_qualification, inst_len);
1202 break;
1203 case EXIT_REASON_MSR_READ:
1204 __get_instruction_length(inst_len);
1205 vmx_do_msr_read(&regs);
1206 __update_guest_eip(inst_len);
1207 break;
1208 case EXIT_REASON_MSR_WRITE:
1209 __vmread(GUEST_EIP, &eip);
1210 VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%p, eax=%p, edx=%p",
1211 eip, regs.eax, regs.edx);
1212 /* just ignore this point */
1213 __get_instruction_length(inst_len);
1214 __update_guest_eip(inst_len);
1215 break;
1216 case EXIT_REASON_MWAIT_INSTRUCTION:
1217 __get_instruction_length(inst_len);
1218 __update_guest_eip(inst_len);
1219 vmx_vmexit_do_mwait();
1220 break;
1221 default:
1222 __vmx_bug(&regs); /* should not happen */
1225 vmx_intr_assist(ed);
1226 return;
1229 asmlinkage void load_cr2(void)
1231 struct exec_domain *d = current;
1233 local_irq_disable();
1234 #ifdef __i386__
1235 asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1236 #else
1237 asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1238 #endif
1242 #endif /* CONFIG_VMX */
1244 /*
1245 * Local variables:
1246 * mode: C
1247 * c-set-style: "BSD"
1248 * c-basic-offset: 4
1249 * tab-width: 4
1250 * indent-tabs-mode: nil
1251 * End:
1252 */