debuggers.hg

view xen/arch/x86/vmx.c @ 4662:9a768d11cc7b

bitkeeper revision 1.1358 (4267e561Ml7gO0DQYGp9EYRUYPBDHA)

Merge burn.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into burn.cl.cam.ac.uk:/local/scratch-1/maf46/xen-unstable.bk

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author maf46@burn.cl.cam.ac.uk
date Thu Apr 21 17:39:45 2005 +0000 (2005-04-21)
parents 717d7dbd06ea 8e987582b901
children e4e76c16ec78
line source
1 /*
2 * vmx.c: handling VMX architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/lib.h>
23 #include <xen/trace.h>
24 #include <xen/sched.h>
25 #include <xen/softirq.h>
26 #include <asm/current.h>
27 #include <asm/io.h>
28 #include <asm/irq.h>
29 #include <asm/shadow.h>
30 #include <asm/regs.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/types.h>
34 #include <asm/msr.h>
35 #include <asm/spinlock.h>
36 #include <asm/vmx.h>
37 #include <asm/vmx_vmcs.h>
38 #include <asm/vmx_intercept.h>
39 #include <asm/shadow.h>
40 #include <public/io/ioreq.h>
42 #ifdef CONFIG_VMX
44 int vmcs_size;
45 unsigned int opt_vmx_debug_level = 0;
47 extern long evtchn_send(int lport);
48 extern long do_block(void);
49 void do_nmi(struct xen_regs *, unsigned long);
51 int start_vmx()
52 {
53 struct vmcs_struct *vmcs;
54 unsigned long ecx;
55 u64 phys_vmcs; /* debugging */
57 vmcs_size = VMCS_SIZE;
58 /*
59 * Xen does not fill x86_capability words except 0.
60 */
61 ecx = cpuid_ecx(1);
62 boot_cpu_data.x86_capability[4] = ecx;
64 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
65 return 0;
67 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
69 if (!(vmcs = alloc_vmcs())) {
70 printk("Failed to allocate VMCS\n");
71 return 0;
72 }
74 phys_vmcs = (u64) virt_to_phys(vmcs);
76 if (!(__vmxon(phys_vmcs))) {
77 printk("VMXON is done\n");
78 }
80 return 1;
81 }
83 void stop_vmx()
84 {
85 if (read_cr4() & X86_CR4_VMXE)
86 __vmxoff();
87 }
89 /*
90 * Not all cases recevie valid value in the VM-exit instruction length field.
91 */
92 #define __get_instruction_length(len) \
93 __vmread(INSTRUCTION_LEN, &(len)); \
94 if ((len) < 1 || (len) > 15) \
95 __vmx_bug(&regs);
97 static void inline __update_guest_eip(unsigned long inst_len)
98 {
99 unsigned long current_eip;
101 __vmread(GUEST_EIP, &current_eip);
102 __vmwrite(GUEST_EIP, current_eip + inst_len);
103 }
106 #include <asm/domain_page.h>
108 static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs)
109 {
110 struct exec_domain *ed = current;
111 unsigned long eip;
112 l1_pgentry_t gpte;
113 unsigned long gpa; /* FIXME: PAE */
114 int result;
116 #if VMX_DEBUG
117 {
118 __vmread(GUEST_EIP, &eip);
119 VMX_DBG_LOG(DBG_LEVEL_VMMU,
120 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
121 va, eip, regs->error_code);
122 }
123 #endif
125 /*
126 * If vpagetable is zero, then we are still emulating 1:1 page tables,
127 * and we should have never gotten here.
128 */
129 if ( !test_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state) )
130 {
131 printk("vmx_do_page_fault while running on 1:1 page table\n");
132 return 0;
133 }
135 gpte = gva_to_gpte(va);
136 if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) )
137 return 0;
138 gpa = l1e_get_phys(gpte) + (va & ~PAGE_MASK);
140 /* Use 1:1 page table to identify MMIO address space */
141 if (mmio_space(gpa))
142 handle_mmio(va, gpa);
144 result = shadow_fault(va, regs);
146 #if 0
147 if ( !result )
148 {
149 __vmread(GUEST_EIP, &eip);
150 printk("vmx pgfault to guest va=%p eip=%p\n", va, eip);
151 }
152 #endif
154 return result;
155 }
157 static void vmx_do_general_protection_fault(struct xen_regs *regs)
158 {
159 unsigned long eip, error_code;
160 unsigned long intr_fields;
162 __vmread(GUEST_EIP, &eip);
163 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
165 VMX_DBG_LOG(DBG_LEVEL_1,
166 "vmx_general_protection_fault: eip = %lx, erro_code = %lx",
167 eip, error_code);
169 VMX_DBG_LOG(DBG_LEVEL_1,
170 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
171 regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi);
173 /* Reflect it back into the guest */
174 intr_fields = (INTR_INFO_VALID_MASK |
175 INTR_TYPE_EXCEPTION |
176 INTR_INFO_DELIEVER_CODE_MASK |
177 TRAP_gp_fault);
178 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
179 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
180 }
182 static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs)
183 {
184 unsigned int eax, ebx, ecx, edx;
185 unsigned long eip;
187 __vmread(GUEST_EIP, &eip);
189 VMX_DBG_LOG(DBG_LEVEL_1,
190 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
191 " (esi) %lx, (edi) %lx",
192 regs->eax, regs->ebx, regs->ecx, regs->edx,
193 regs->esi, regs->edi);
195 cpuid(input, &eax, &ebx, &ecx, &edx);
197 if (input == 1) {
198 clear_bit(X86_FEATURE_PGE, &edx); /* temporarily disabled */
199 clear_bit(X86_FEATURE_PSE, &edx);
200 clear_bit(X86_FEATURE_PAE, &edx);
201 clear_bit(X86_FEATURE_PSE36, &edx);
202 }
204 regs->eax = (unsigned long) eax;
205 regs->ebx = (unsigned long) ebx;
206 regs->ecx = (unsigned long) ecx;
207 regs->edx = (unsigned long) edx;
209 VMX_DBG_LOG(DBG_LEVEL_1,
210 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
211 eip, input, eax, ebx, ecx, edx);
213 }
215 #define CASE_GET_REG_P(REG, reg) \
216 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
218 static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs)
219 {
220 unsigned int reg;
221 unsigned long *reg_p = 0;
222 struct exec_domain *ed = current;
223 unsigned long eip;
225 __vmread(GUEST_EIP, &eip);
227 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
229 VMX_DBG_LOG(DBG_LEVEL_1,
230 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
231 eip, reg, exit_qualification);
233 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
234 CASE_GET_REG_P(EAX, eax);
235 CASE_GET_REG_P(ECX, ecx);
236 CASE_GET_REG_P(EDX, edx);
237 CASE_GET_REG_P(EBX, ebx);
238 CASE_GET_REG_P(EBP, ebp);
239 CASE_GET_REG_P(ESI, esi);
240 CASE_GET_REG_P(EDI, edi);
241 case REG_ESP:
242 break;
243 default:
244 __vmx_bug(regs);
245 }
247 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
248 case TYPE_MOV_TO_DR:
249 /* don't need to check the range */
250 if (reg != REG_ESP)
251 ed->arch.debugreg[reg] = *reg_p;
252 else {
253 unsigned long value;
254 __vmread(GUEST_ESP, &value);
255 ed->arch.debugreg[reg] = value;
256 }
257 break;
258 case TYPE_MOV_FROM_DR:
259 if (reg != REG_ESP)
260 *reg_p = ed->arch.debugreg[reg];
261 else {
262 __vmwrite(GUEST_ESP, ed->arch.debugreg[reg]);
263 }
264 break;
265 }
266 }
268 /*
269 * Invalidate the TLB for va. Invalidate the shadow page corresponding
270 * the address va.
271 */
272 static void vmx_vmexit_do_invlpg(unsigned long va)
273 {
274 unsigned long eip;
275 struct exec_domain *ed = current;
277 __vmread(GUEST_EIP, &eip);
279 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%p, va=%p",
280 eip, va);
282 /*
283 * We do the safest things first, then try to update the shadow
284 * copying from guest
285 */
286 shadow_invlpg(ed, va);
287 }
289 static void vmx_io_instruction(struct xen_regs *regs,
290 unsigned long exit_qualification, unsigned long inst_len)
291 {
292 struct exec_domain *d = current;
293 vcpu_iodata_t *vio;
294 ioreq_t *p;
295 unsigned long addr;
296 unsigned long eip, cs, eflags;
297 int vm86;
299 __vmread(GUEST_EIP, &eip);
300 __vmread(GUEST_CS_SELECTOR, &cs);
301 __vmread(GUEST_EFLAGS, &eflags);
302 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
304 VMX_DBG_LOG(DBG_LEVEL_1,
305 "vmx_io_instruction: vm86 %d, eip=%p:%p, exit_qualification = %lx",
306 vm86, cs, eip, exit_qualification);
308 if (test_bit(6, &exit_qualification))
309 addr = (exit_qualification >> 16) & (0xffff);
310 else
311 addr = regs->edx & 0xffff;
313 if (addr == 0x80) {
314 __update_guest_eip(inst_len);
315 return;
316 }
318 vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
319 if (vio == 0) {
320 VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx", (unsigned long) vio);
321 domain_crash_synchronous();
322 }
323 p = &vio->vp_ioreq;
324 p->dir = test_bit(3, &exit_qualification);
326 p->pdata_valid = 0;
327 p->count = 1;
328 p->size = (exit_qualification & 7) + 1;
330 if (test_bit(4, &exit_qualification)) {
331 p->df = (eflags & X86_EFLAGS_DF) ? 1 : 0;
332 p->pdata_valid = 1;
334 if (vm86) {
335 unsigned long seg;
336 if (p->dir == IOREQ_WRITE) {
337 __vmread(GUEST_DS_SELECTOR, &seg);
338 p->u.pdata = (void *)
339 ((seg << 4) | (regs->esi & 0xFFFF));
340 } else {
341 __vmread(GUEST_ES_SELECTOR, &seg);
342 p->u.pdata = (void *)
343 ((seg << 4) | (regs->edi & 0xFFFF));
344 }
345 } else {
346 p->u.pdata = (void *) ((p->dir == IOREQ_WRITE) ?
347 regs->esi : regs->edi);
348 }
349 p->u.pdata = (void *) gva_to_gpa(p->u.data);
352 if (test_bit(5, &exit_qualification))
353 p->count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
354 if ((p->u.data & PAGE_MASK) !=
355 ((p->u.data + p->count * p->size - 1) & PAGE_MASK)) {
356 printk("stringio crosses page boundary!\n");
357 if (p->u.data & (p->size - 1)) {
358 printk("Not aligned I/O!\n");
359 domain_crash_synchronous();
360 }
361 p->count = (PAGE_SIZE - (p->u.data & ~PAGE_MASK)) / p->size;
362 } else {
363 __update_guest_eip(inst_len);
364 }
365 } else if (p->dir == IOREQ_WRITE) {
366 p->u.data = regs->eax;
367 __update_guest_eip(inst_len);
368 } else
369 __update_guest_eip(inst_len);
371 p->addr = addr;
372 p->port_mm = 0;
374 /* Check if the packet needs to be intercepted */
375 if (vmx_io_intercept(p)) {
376 /* no blocking & no evtchn notification */
377 return;
378 }
380 set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
381 p->state = STATE_IOREQ_READY;
382 evtchn_send(IOPACKET_PORT);
383 do_block();
384 }
386 enum { COPY_IN = 0, COPY_OUT };
388 static inline int
389 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
390 {
391 unsigned char *addr;
392 unsigned long mfn;
394 if ((size + (laddr & (PAGE_SIZE - 1))) >= PAGE_SIZE) {
395 printf("vmx_copy exceeds page boundary\n");
396 return 0;
397 }
399 mfn = phys_to_machine_mapping(l1e_get_pfn(gva_to_gpte(laddr)));
400 addr = map_domain_mem((mfn << PAGE_SHIFT) | (laddr & ~PAGE_MASK));
402 if (dir == COPY_IN)
403 memcpy(buf, addr, size);
404 else
405 memcpy(addr, buf, size);
407 unmap_domain_mem(addr);
408 return 1;
409 }
411 int
412 vmx_world_save(struct exec_domain *d, struct vmx_assist_context *c)
413 {
414 unsigned long inst_len;
415 int error = 0;
417 error |= __vmread(INSTRUCTION_LEN, &inst_len);
418 error |= __vmread(GUEST_EIP, &c->eip);
419 c->eip += inst_len; /* skip transition instruction */
420 error |= __vmread(GUEST_ESP, &c->esp);
421 error |= __vmread(GUEST_EFLAGS, &c->eflags);
423 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
424 c->cr3 = d->arch.arch_vmx.cpu_cr3;
425 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
427 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
428 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
430 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
431 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
433 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
434 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
435 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
436 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
438 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
439 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
440 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
441 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
443 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
444 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
445 error |= __vmread(GUEST_ES_BASE, &c->es_base);
446 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
448 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
449 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
450 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
451 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
453 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
454 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
455 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
456 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
458 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
459 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
460 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
461 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
463 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
464 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
465 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
466 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
468 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
469 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
470 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
471 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
473 return !error;
474 }
476 int
477 vmx_world_restore(struct exec_domain *d, struct vmx_assist_context *c)
478 {
479 unsigned long mfn, old_cr4;
480 int error = 0;
482 error |= __vmwrite(GUEST_EIP, c->eip);
483 error |= __vmwrite(GUEST_ESP, c->esp);
484 error |= __vmwrite(GUEST_EFLAGS, c->eflags);
486 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
488 if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
489 /*
490 * This is simple TLB flush, implying the guest has
491 * removed some translation or changed page attributes.
492 * We simply invalidate the shadow.
493 */
494 mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
495 if ((mfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table)) {
496 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
497 domain_crash_synchronous();
498 return 0;
499 }
500 shadow_sync_all(d->domain);
501 } else {
502 /*
503 * If different, make a shadow. Check if the PDBR is valid
504 * first.
505 */
506 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %lx", c->cr3);
507 if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
508 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
509 domain_crash_synchronous();
510 return 0;
511 }
512 mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
513 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
514 update_pagetables(d);
515 /*
516 * arch.shadow_table should now hold the next CR3 for shadow
517 */
518 d->arch.arch_vmx.cpu_cr3 = c->cr3;
519 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", c->cr3);
520 __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
521 }
523 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
524 error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE));
525 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
527 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
528 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
530 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
531 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
533 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
534 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
535 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
536 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
538 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
539 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
540 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
541 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
543 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
544 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
545 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
546 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
548 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
549 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
550 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
551 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
553 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
554 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
555 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
556 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
558 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
559 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
560 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
561 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
563 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
564 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
565 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
566 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
568 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
569 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
570 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
571 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
573 return !error;
574 }
576 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
578 int
579 vmx_assist(struct exec_domain *d, int mode)
580 {
581 struct vmx_assist_context c;
582 unsigned long magic, cp;
584 /* make sure vmxassist exists (this is not an error) */
585 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), COPY_IN))
586 return 0;
587 if (magic != VMXASSIST_MAGIC)
588 return 0;
590 switch (mode) {
591 /*
592 * Transfer control to vmxassist.
593 * Store the current context in VMXASSIST_OLD_CONTEXT and load
594 * the new VMXASSIST_NEW_CONTEXT context. This context was created
595 * by vmxassist and will transfer control to it.
596 */
597 case VMX_ASSIST_INVOKE:
598 /* save the old context */
599 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), COPY_IN))
600 goto error;
601 if (cp != 0) {
602 if (!vmx_world_save(d, &c))
603 goto error;
604 if (!vmx_copy(&c, cp, sizeof(c), COPY_OUT))
605 goto error;
606 }
608 /* restore the new context, this should activate vmxassist */
609 if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), COPY_IN))
610 goto error;
611 if (cp != 0) {
612 if (!vmx_copy(&c, cp, sizeof(c), COPY_IN))
613 goto error;
614 if (!vmx_world_restore(d, &c))
615 goto error;
616 return 1;
617 }
618 break;
620 /*
621 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
622 * above.
623 */
624 case VMX_ASSIST_RESTORE:
625 /* save the old context */
626 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), COPY_IN))
627 goto error;
628 if (cp != 0) {
629 if (!vmx_copy(&c, cp, sizeof(c), COPY_IN))
630 goto error;
631 if (!vmx_world_restore(d, &c))
632 goto error;
633 return 1;
634 }
635 break;
636 }
638 error:
639 printf("Failed to transfer to vmxassist\n");
640 domain_crash_synchronous();
641 return 0;
642 }
644 #define CASE_GET_REG(REG, reg) \
645 case REG_ ## REG: value = regs->reg; break
647 /*
648 * Write to control registers
649 */
650 static int mov_to_cr(int gp, int cr, struct xen_regs *regs)
651 {
652 unsigned long value;
653 unsigned long old_cr;
654 unsigned long eip;
655 struct exec_domain *d = current;
657 switch (gp) {
658 CASE_GET_REG(EAX, eax);
659 CASE_GET_REG(ECX, ecx);
660 CASE_GET_REG(EDX, edx);
661 CASE_GET_REG(EBX, ebx);
662 CASE_GET_REG(EBP, ebp);
663 CASE_GET_REG(ESI, esi);
664 CASE_GET_REG(EDI, edi);
665 case REG_ESP:
666 __vmread(GUEST_ESP, &value);
667 break;
668 default:
669 printk("invalid gp: %d\n", gp);
670 __vmx_bug(regs);
671 }
673 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
674 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
676 switch(cr) {
677 case 0:
678 {
679 unsigned long old_base_mfn, mfn;
681 /*
682 * CR0:
683 * We don't want to lose PE and PG.
684 */
685 __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
686 __vmwrite(CR0_READ_SHADOW, value);
688 if (value & (X86_CR0_PE | X86_CR0_PG) &&
689 !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
690 /*
691 * Enable paging
692 */
693 set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state);
694 /*
695 * The guest CR3 must be pointing to the guest physical.
696 */
697 if ( !VALID_MFN(mfn = phys_to_machine_mapping(
698 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
699 !get_page(pfn_to_page(mfn), d->domain) )
700 {
701 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx",
702 d->arch.arch_vmx.cpu_cr3);
703 domain_crash_synchronous(); /* need to take a clean path */
704 }
705 old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
706 if ( old_base_mfn )
707 put_page(pfn_to_page(old_base_mfn));
709 /*
710 * Now arch.guest_table points to machine physical.
711 */
712 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
713 update_pagetables(d);
715 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
716 (unsigned long) (mfn << PAGE_SHIFT));
718 __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
719 /*
720 * arch->shadow_table should hold the next CR3 for shadow
721 */
722 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
723 d->arch.arch_vmx.cpu_cr3, mfn);
724 } else {
725 if ((value & X86_CR0_PE) == 0) {
726 __vmread(GUEST_EIP, &eip);
727 VMX_DBG_LOG(DBG_LEVEL_1,
728 "Disabling CR0.PE at %%eip 0x%lx", eip);
729 if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
730 set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
731 &d->arch.arch_vmx.cpu_state);
732 __vmread(GUEST_EIP, &eip);
733 VMX_DBG_LOG(DBG_LEVEL_1,
734 "Transfering control to vmxassist %%eip 0x%lx", eip);
735 return 0; /* do not update eip! */
736 }
737 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
738 &d->arch.arch_vmx.cpu_state)) {
739 __vmread(GUEST_EIP, &eip);
740 VMX_DBG_LOG(DBG_LEVEL_1,
741 "Enabling CR0.PE at %%eip 0x%lx", eip);
742 if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
743 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
744 &d->arch.arch_vmx.cpu_state);
745 __vmread(GUEST_EIP, &eip);
746 VMX_DBG_LOG(DBG_LEVEL_1,
747 "Restoring to %%eip 0x%lx", eip);
748 return 0; /* do not update eip! */
749 }
750 }
751 }
752 break;
753 }
754 case 3:
755 {
756 unsigned long old_base_mfn, mfn;
758 /*
759 * If paging is not enabled yet, simply copy the value to CR3.
760 */
761 if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
762 d->arch.arch_vmx.cpu_cr3 = value;
763 break;
764 }
766 /*
767 * We make a new one if the shadow does not exist.
768 */
769 if (value == d->arch.arch_vmx.cpu_cr3) {
770 /*
771 * This is simple TLB flush, implying the guest has
772 * removed some translation or changed page attributes.
773 * We simply invalidate the shadow.
774 */
775 mfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
776 if ((mfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table))
777 __vmx_bug(regs);
778 shadow_sync_all(d->domain);
779 } else {
780 /*
781 * If different, make a shadow. Check if the PDBR is valid
782 * first.
783 */
784 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
785 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
786 !VALID_MFN(mfn = phys_to_machine_mapping(value >> PAGE_SHIFT)) ||
787 !get_page(pfn_to_page(mfn), d->domain) )
788 {
789 VMX_DBG_LOG(DBG_LEVEL_VMMU,
790 "Invalid CR3 value=%lx", value);
791 domain_crash_synchronous(); /* need to take a clean path */
792 }
793 old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
794 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
795 if ( old_base_mfn )
796 put_page(pfn_to_page(old_base_mfn));
797 update_pagetables(d);
798 /*
799 * arch.shadow_table should now hold the next CR3 for shadow
800 */
801 d->arch.arch_vmx.cpu_cr3 = value;
802 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
803 value);
804 __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
805 }
806 break;
807 }
808 case 4:
809 /* CR4 */
810 if (value & X86_CR4_PAE)
811 __vmx_bug(regs); /* not implemented */
812 __vmread(CR4_READ_SHADOW, &old_cr);
814 __vmwrite(GUEST_CR4, (value | X86_CR4_VMXE));
815 __vmwrite(CR4_READ_SHADOW, value);
817 /*
818 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
819 * all TLB entries except global entries.
820 */
821 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
822 vmx_shadow_clear_state(d->domain);
823 shadow_sync_all(d->domain);
824 }
825 break;
826 default:
827 printk("invalid cr: %d\n", gp);
828 __vmx_bug(regs);
829 }
831 return 1;
832 }
834 #define CASE_SET_REG(REG, reg) \
835 case REG_ ## REG: \
836 regs->reg = value; \
837 break
839 /*
840 * Read from control registers. CR0 and CR4 are read from the shadow.
841 */
842 static void mov_from_cr(int cr, int gp, struct xen_regs *regs)
843 {
844 unsigned long value;
845 struct exec_domain *d = current;
847 if (cr != 3)
848 __vmx_bug(regs);
850 value = (unsigned long) d->arch.arch_vmx.cpu_cr3;
851 ASSERT(value);
853 switch (gp) {
854 CASE_SET_REG(EAX, eax);
855 CASE_SET_REG(ECX, ecx);
856 CASE_SET_REG(EDX, edx);
857 CASE_SET_REG(EBX, ebx);
858 CASE_SET_REG(EBP, ebp);
859 CASE_SET_REG(ESI, esi);
860 CASE_SET_REG(EDI, edi);
861 case REG_ESP:
862 __vmwrite(GUEST_ESP, value);
863 regs->esp = value;
864 break;
865 default:
866 printk("invalid gp: %d\n", gp);
867 __vmx_bug(regs);
868 }
870 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
871 }
873 static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs)
874 {
875 unsigned int gp, cr;
876 unsigned long value;
878 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
879 case TYPE_MOV_TO_CR:
880 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
881 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
882 return mov_to_cr(gp, cr, regs);
883 case TYPE_MOV_FROM_CR:
884 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
885 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
886 mov_from_cr(cr, gp, regs);
887 break;
888 case TYPE_CLTS:
889 __vmread(GUEST_CR0, &value);
890 value &= ~X86_CR0_TS; /* clear TS */
891 __vmwrite(GUEST_CR0, value);
893 __vmread(CR0_READ_SHADOW, &value);
894 value &= ~X86_CR0_TS; /* clear TS */
895 __vmwrite(CR0_READ_SHADOW, value);
896 break;
897 default:
898 __vmx_bug(regs);
899 break;
900 }
901 return 1;
902 }
904 static inline void vmx_do_msr_read(struct xen_regs *regs)
905 {
906 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
907 regs->ecx, regs->eax, regs->edx);
909 rdmsr(regs->ecx, regs->eax, regs->edx);
911 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
912 "ecx=%lx, eax=%lx, edx=%lx",
913 regs->ecx, regs->eax, regs->edx);
914 }
916 /*
917 * Need to use this exit to reschedule
918 */
919 static inline void vmx_vmexit_do_hlt(void)
920 {
921 #if VMX_DEBUG
922 unsigned long eip;
923 __vmread(GUEST_EIP, &eip);
924 #endif
925 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%p", eip);
926 raise_softirq(SCHEDULE_SOFTIRQ);
927 }
929 static inline void vmx_vmexit_do_mwait(void)
930 {
931 #if VMX_DEBUG
932 unsigned long eip;
933 __vmread(GUEST_EIP, &eip);
934 #endif
935 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%p", eip);
936 raise_softirq(SCHEDULE_SOFTIRQ);
937 }
939 #define BUF_SIZ 256
940 #define MAX_LINE 80
941 char print_buf[BUF_SIZ];
942 static int index;
944 static void vmx_print_line(const char c, struct exec_domain *d)
945 {
947 if (index == MAX_LINE || c == '\n') {
948 if (index == MAX_LINE) {
949 print_buf[index++] = c;
950 }
951 print_buf[index] = '\0';
952 printk("(GUEST: %u) %s\n", d->domain->id, (char *) &print_buf);
953 index = 0;
954 }
955 else
956 print_buf[index++] = c;
957 }
959 void save_vmx_execution_context(execution_context_t *ctxt)
960 {
961 __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
962 __vmread(GUEST_ESP, &ctxt->esp);
963 __vmread(GUEST_EFLAGS, &ctxt->eflags);
964 __vmread(GUEST_CS_SELECTOR, &ctxt->cs);
965 __vmread(GUEST_EIP, &ctxt->eip);
967 __vmread(GUEST_GS_SELECTOR, &ctxt->gs);
968 __vmread(GUEST_FS_SELECTOR, &ctxt->fs);
969 __vmread(GUEST_ES_SELECTOR, &ctxt->es);
970 __vmread(GUEST_DS_SELECTOR, &ctxt->ds);
971 }
973 #ifdef XEN_DEBUGGER
974 void save_xen_regs(struct xen_regs *regs)
975 {
976 __vmread(GUEST_SS_SELECTOR, &regs->xss);
977 __vmread(GUEST_ESP, &regs->esp);
978 __vmread(GUEST_EFLAGS, &regs->eflags);
979 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
980 __vmread(GUEST_EIP, &regs->eip);
982 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
983 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
984 __vmread(GUEST_ES_SELECTOR, &regs->xes);
985 __vmread(GUEST_DS_SELECTOR, &regs->xds);
986 }
988 void restore_xen_regs(struct xen_regs *regs)
989 {
990 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
991 __vmwrite(GUEST_ESP, regs->esp);
992 __vmwrite(GUEST_EFLAGS, regs->eflags);
993 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
994 __vmwrite(GUEST_EIP, regs->eip);
996 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
997 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
998 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
999 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
1001 #endif
1003 asmlinkage void vmx_vmexit_handler(struct xen_regs regs)
1005 unsigned int exit_reason, idtv_info_field;
1006 unsigned long exit_qualification, eip, inst_len = 0;
1007 struct exec_domain *ed = current;
1008 int error;
1010 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
1011 __vmx_bug(&regs);
1013 perfc_incra(vmexits, exit_reason);
1015 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
1016 if (idtv_info_field & INTR_INFO_VALID_MASK) {
1017 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
1018 if ((idtv_info_field & 0xff) == 14) {
1019 unsigned long error_code;
1021 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
1022 printk("#PG error code: %lx\n", error_code);
1024 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x",
1025 idtv_info_field);
1028 /* don't bother H/W interrutps */
1029 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
1030 exit_reason != EXIT_REASON_VMCALL &&
1031 exit_reason != EXIT_REASON_IO_INSTRUCTION)
1032 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
1034 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
1035 domain_crash_synchronous();
1036 return;
1039 __vmread(GUEST_EIP, &eip);
1040 TRACE_3D(TRC_VMX_VMEXIT, ed->domain->id, eip, exit_reason);
1042 switch (exit_reason) {
1043 case EXIT_REASON_EXCEPTION_NMI:
1045 /*
1046 * We don't set the software-interrupt exiting (INT n).
1047 * (1) We can get an exception (e.g. #PG) in the guest, or
1048 * (2) NMI
1049 */
1050 int error;
1051 unsigned int vector;
1052 unsigned long va;
1054 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1055 && !(vector & INTR_INFO_VALID_MASK))
1056 __vmx_bug(&regs);
1057 vector &= 0xff;
1059 perfc_incra(cause_vector, vector);
1061 TRACE_3D(TRC_VMX_VECTOR, ed->domain->id, eip, vector);
1062 switch (vector) {
1063 #ifdef XEN_DEBUGGER
1064 case TRAP_debug:
1066 save_xen_regs(&regs);
1067 pdb_handle_exception(1, &regs, 1);
1068 restore_xen_regs(&regs);
1069 break;
1071 case TRAP_int3:
1073 save_xen_regs(&regs);
1074 pdb_handle_exception(3, &regs, 1);
1075 restore_xen_regs(&regs);
1076 break;
1078 #endif
1079 case TRAP_gp_fault:
1081 vmx_do_general_protection_fault(&regs);
1082 break;
1084 case TRAP_page_fault:
1086 __vmread(EXIT_QUALIFICATION, &va);
1087 __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
1088 VMX_DBG_LOG(DBG_LEVEL_VMMU,
1089 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
1090 regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi,
1091 regs.edi);
1092 ed->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
1094 if (!(error = vmx_do_page_fault(va, &regs))) {
1095 /*
1096 * Inject #PG using Interruption-Information Fields
1097 */
1098 unsigned long intr_fields;
1100 intr_fields = (INTR_INFO_VALID_MASK |
1101 INTR_TYPE_EXCEPTION |
1102 INTR_INFO_DELIEVER_CODE_MASK |
1103 TRAP_page_fault);
1104 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
1105 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, regs.error_code);
1106 ed->arch.arch_vmx.cpu_cr2 = va;
1107 TRACE_3D(TRC_VMX_INT, ed->domain->id, TRAP_page_fault, va);
1109 break;
1111 case TRAP_nmi:
1112 do_nmi(&regs, 0);
1113 break;
1114 default:
1115 printk("unexpected VMexit for exception vector 0x%x\n", vector);
1116 //__vmx_bug(&regs);
1117 break;
1119 break;
1121 case EXIT_REASON_EXTERNAL_INTERRUPT:
1123 extern int vector_irq[];
1124 extern asmlinkage void do_IRQ(struct xen_regs *);
1125 extern void smp_apic_timer_interrupt(struct xen_regs *);
1126 extern void timer_interrupt(int, void *, struct xen_regs *);
1127 unsigned int vector;
1129 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1130 && !(vector & INTR_INFO_VALID_MASK))
1131 __vmx_bug(&regs);
1133 vector &= 0xff;
1134 local_irq_disable();
1136 if (vector == LOCAL_TIMER_VECTOR) {
1137 smp_apic_timer_interrupt(&regs);
1138 } else {
1139 regs.entry_vector = (vector == FIRST_DEVICE_VECTOR?
1140 0 : vector_irq[vector]);
1141 do_IRQ(&regs);
1143 break;
1145 case EXIT_REASON_PENDING_INTERRUPT:
1146 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
1147 MONITOR_CPU_BASED_EXEC_CONTROLS);
1148 vmx_intr_assist(ed);
1149 break;
1150 case EXIT_REASON_TASK_SWITCH:
1151 __vmx_bug(&regs);
1152 break;
1153 case EXIT_REASON_CPUID:
1154 __get_instruction_length(inst_len);
1155 vmx_vmexit_do_cpuid(regs.eax, &regs);
1156 __update_guest_eip(inst_len);
1157 break;
1158 case EXIT_REASON_HLT:
1159 __get_instruction_length(inst_len);
1160 __update_guest_eip(inst_len);
1161 vmx_vmexit_do_hlt();
1162 break;
1163 case EXIT_REASON_INVLPG:
1165 unsigned long va;
1167 __vmread(EXIT_QUALIFICATION, &va);
1168 vmx_vmexit_do_invlpg(va);
1169 __get_instruction_length(inst_len);
1170 __update_guest_eip(inst_len);
1171 break;
1173 case EXIT_REASON_VMCALL:
1174 __get_instruction_length(inst_len);
1175 __vmread(GUEST_EIP, &eip);
1176 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1178 vmx_print_line(regs.eax, ed); /* provides the current domain */
1179 __update_guest_eip(inst_len);
1180 break;
1181 case EXIT_REASON_CR_ACCESS:
1183 __vmread(GUEST_EIP, &eip);
1184 __get_instruction_length(inst_len);
1185 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1187 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
1188 eip, inst_len, exit_qualification);
1189 if (vmx_cr_access(exit_qualification, &regs))
1190 __update_guest_eip(inst_len);
1191 break;
1193 case EXIT_REASON_DR_ACCESS:
1194 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1195 vmx_dr_access(exit_qualification, &regs);
1196 __get_instruction_length(inst_len);
1197 __update_guest_eip(inst_len);
1198 break;
1199 case EXIT_REASON_IO_INSTRUCTION:
1200 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1201 __get_instruction_length(inst_len);
1202 vmx_io_instruction(&regs, exit_qualification, inst_len);
1203 break;
1204 case EXIT_REASON_MSR_READ:
1205 __get_instruction_length(inst_len);
1206 vmx_do_msr_read(&regs);
1207 __update_guest_eip(inst_len);
1208 break;
1209 case EXIT_REASON_MSR_WRITE:
1210 __vmread(GUEST_EIP, &eip);
1211 VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%p, eax=%p, edx=%p",
1212 eip, regs.eax, regs.edx);
1213 /* just ignore this point */
1214 __get_instruction_length(inst_len);
1215 __update_guest_eip(inst_len);
1216 break;
1217 case EXIT_REASON_MWAIT_INSTRUCTION:
1218 __get_instruction_length(inst_len);
1219 __update_guest_eip(inst_len);
1220 vmx_vmexit_do_mwait();
1221 break;
1222 default:
1223 __vmx_bug(&regs); /* should not happen */
1226 vmx_intr_assist(ed);
1227 return;
1230 asmlinkage void load_cr2(void)
1232 struct exec_domain *d = current;
1234 local_irq_disable();
1235 #ifdef __i386__
1236 asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1237 #else
1238 asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1239 #endif
1243 #endif /* CONFIG_VMX */
1245 /*
1246 * Local variables:
1247 * mode: C
1248 * c-set-style: "BSD"
1249 * c-basic-offset: 4
1250 * tab-width: 4
1251 * indent-tabs-mode: nil
1252 * End:
1253 */