debuggers.hg

annotate xen/arch/x86/vmx.c @ 3635:ed902e5c4b49

bitkeeper revision 1.1159.212.62 (41fff40aESe4aWS82z_rLHeonXpxuQ)

More x86/64 stuff.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 01 21:26:34 2005 +0000 (2005-02-01)
parents d9cdcc864e90
children bbe8541361dd d93748c50893
rev   line source
iap10@3328 1 /*
iap10@3328 2 * vmx.c: handling VMX architecture-related VM exits
iap10@3328 3 * Copyright (c) 2004, Intel Corporation.
iap10@3328 4 *
iap10@3328 5 * This program is free software; you can redistribute it and/or modify it
iap10@3328 6 * under the terms and conditions of the GNU General Public License,
iap10@3328 7 * version 2, as published by the Free Software Foundation.
iap10@3328 8 *
iap10@3328 9 * This program is distributed in the hope it will be useful, but WITHOUT
iap10@3328 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iap10@3328 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
iap10@3328 12 * more details.
iap10@3328 13 *
iap10@3328 14 * You should have received a copy of the GNU General Public License along with
iap10@3328 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
iap10@3328 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
iap10@3328 17 *
iap10@3328 18 */
iap10@3328 19
iap10@3328 20 #include <xen/config.h>
iap10@3328 21 #include <xen/init.h>
iap10@3328 22 #include <xen/lib.h>
iap10@3328 23 #include <xen/sched.h>
iap10@3328 24 #include <asm/current.h>
iap10@3328 25 #include <asm/io.h>
iap10@3328 26 #include <asm/irq.h>
iap10@3328 27 #include <asm/shadow.h>
iap10@3328 28 #include <asm/regs.h>
iap10@3328 29 #include <asm/cpufeature.h>
iap10@3328 30 #include <asm/processor.h>
iap10@3328 31 #include <asm/types.h>
iap10@3328 32 #include <asm/msr.h>
iap10@3328 33 #include <asm/spinlock.h>
iap10@3328 34 #include <asm/vmx.h>
iap10@3328 35 #include <asm/vmx_vmcs.h>
iap10@3328 36 #include <public/io/ioreq.h>
iap10@3328 37
iap10@3328 38 int vmcs_size;
iap10@3328 39 unsigned int opt_vmx_debug_level;
iap10@3328 40
kaf24@3394 41 extern long evtchn_send(int lport);
kaf24@3394 42 extern long do_block(void);
kaf24@3394 43
iap10@3604 44 #define VECTOR_DB 1
iap10@3604 45 #define VECTOR_BP 3
iap10@3604 46 #define VECTOR_GP 13
iap10@3604 47 #define VECTOR_PG 14
iap10@3604 48
iap10@3328 49 int start_vmx()
iap10@3328 50 {
iap10@3328 51 struct vmcs_struct *vmcs;
iap10@3328 52 unsigned long ecx;
iap10@3328 53 u64 phys_vmcs; /* debugging */
iap10@3328 54
iap10@3328 55 vmcs_size = VMCS_SIZE;
iap10@3328 56 /*
iap10@3328 57 * Xen does not fill x86_capability words except 0.
iap10@3328 58 */
iap10@3328 59 ecx = cpuid_ecx(1);
iap10@3328 60 boot_cpu_data.x86_capability[4] = ecx;
iap10@3328 61
iap10@3328 62 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
iap10@3328 63 return 0;
iap10@3328 64
iap10@3328 65 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
iap10@3328 66
iap10@3328 67 if (!(vmcs = alloc_vmcs())) {
iap10@3328 68 printk("Failed to allocate VMCS\n");
iap10@3328 69 return 0;
iap10@3328 70 }
iap10@3328 71
iap10@3328 72 phys_vmcs = (u64) virt_to_phys(vmcs);
iap10@3328 73
iap10@3328 74 if (!(__vmxon(phys_vmcs))) {
iap10@3328 75 printk("VMXON is done\n");
iap10@3328 76 }
iap10@3328 77
iap10@3328 78 return 1;
iap10@3328 79 }
iap10@3328 80
iap10@3328 81 void stop_vmx()
iap10@3328 82 {
iap10@3426 83 if (read_cr4() & X86_CR4_VMXE)
iap10@3328 84 __vmxoff();
iap10@3328 85 }
iap10@3328 86
iap10@3328 87 /*
iap10@3328 88 * Not all cases recevie valid value in the VM-exit instruction length field.
iap10@3328 89 */
iap10@3328 90 #define __get_instruction_length(len) \
iap10@3328 91 __vmread(INSTRUCTION_LEN, &(len)); \
iap10@3328 92 if ((len) < 1 || (len) > 15) \
iap10@3328 93 __vmx_bug(&regs);
iap10@3328 94
iap10@3328 95 static void inline __update_guest_eip(unsigned long inst_len)
iap10@3328 96 {
iap10@3328 97 unsigned long current_eip;
iap10@3328 98
iap10@3328 99 __vmread(GUEST_EIP, &current_eip);
iap10@3328 100 __vmwrite(GUEST_EIP, current_eip + inst_len);
iap10@3328 101 }
iap10@3328 102
iap10@3328 103
iap10@3328 104 #include <asm/domain_page.h>
iap10@3328 105
iap10@3328 106 static int vmx_do_page_fault(unsigned long va, unsigned long error_code)
iap10@3328 107 {
iap10@3328 108 unsigned long eip, pfn;
iap10@3328 109 unsigned int index;
iap10@3607 110 unsigned long gpde = 0, gpte, gpa;
iap10@3328 111 int result;
iap10@3328 112 struct exec_domain *ed = current;
iap10@3328 113 struct mm_struct *m = &ed->mm;
iap10@3328 114
iap10@3328 115 #if VMX_DEBUG
iap10@3328 116 {
iap10@3328 117 __vmread(GUEST_EIP, &eip);
iap10@3328 118 VMX_DBG_LOG(DBG_LEVEL_VMMU,
iap10@3328 119 "vmx_do_page_fault = 0x%lx, eip = %lx, erro_code = %lx\n",
iap10@3328 120 va, eip, error_code);
iap10@3328 121 }
iap10@3328 122 #endif
iap10@3328 123 /*
iap10@3328 124 * Set up guest page directory cache to make linear_pt_table[] work.
iap10@3328 125 */
iap10@3328 126 __guest_get_pl2e(m, va, &gpde);
iap10@3328 127 if (!(gpde & _PAGE_PRESENT))
iap10@3328 128 return 0;
iap10@3328 129
iap10@3328 130 index = (va >> L2_PAGETABLE_SHIFT);
iap10@3328 131 if (!l2_pgentry_val(m->guest_pl2e_cache[index])) {
iap10@3328 132 pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
iap10@3328 133
iap10@3328 134 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n",
iap10@3328 135 pagetable_val(m->pagetable));
iap10@3328 136
iap10@3328 137 m->guest_pl2e_cache[index] =
iap10@3328 138 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
iap10@3328 139 }
iap10@3607 140
iap10@3607 141 if (unlikely(__get_user(gpte, (unsigned long *)
iap10@3607 142 &linear_pg_table[va >> PAGE_SHIFT])))
iap10@3607 143 return 0;
iap10@3607 144
iap10@3607 145 gpa = (gpte & PAGE_MASK) | (va & (PAGE_SIZE - 1));
iap10@3607 146
iap10@3607 147 if (mmio_space(gpa))
iap10@3607 148 handle_mmio(va, gpte, gpa);
iap10@3328 149
iap10@3328 150 if ((result = shadow_fault(va, error_code)))
iap10@3328 151 return result;
iap10@3328 152
iap10@3328 153 return 0; /* failed to resolve, i.e raise #PG */
iap10@3328 154 }
iap10@3328 155
iap10@3328 156 static void vmx_do_general_protection_fault(struct xen_regs *regs)
iap10@3328 157 {
iap10@3328 158 unsigned long eip, error_code;
iap10@3604 159 unsigned long intr_fields;
iap10@3328 160
iap10@3328 161 __vmread(GUEST_EIP, &eip);
iap10@3328 162 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
iap10@3328 163
iap10@3604 164 VMX_DBG_LOG(DBG_LEVEL_1,
iap10@3328 165 "vmx_general_protection_fault: eip = %lx, erro_code = %lx\n",
iap10@3328 166 eip, error_code);
iap10@3328 167
iap10@3604 168 VMX_DBG_LOG(DBG_LEVEL_1,
kaf24@3635 169 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n",
iap10@3328 170 regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi);
iap10@3328 171
iap10@3604 172 /* Reflect it back into the guest */
iap10@3604 173 intr_fields = (INTR_INFO_VALID_MASK |
iap10@3604 174 INTR_TYPE_EXCEPTION |
iap10@3604 175 INTR_INFO_DELIEVER_CODE_MASK |
iap10@3604 176 VECTOR_GP);
iap10@3604 177 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
iap10@3604 178 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
iap10@3328 179 }
iap10@3328 180
iap10@3328 181 static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs)
iap10@3328 182 {
iap10@3328 183 int eax, ebx, ecx, edx;
iap10@3328 184 unsigned long eip;
iap10@3328 185
iap10@3328 186 __vmread(GUEST_EIP, &eip);
iap10@3328 187
iap10@3328 188 VMX_DBG_LOG(DBG_LEVEL_1,
kaf24@3635 189 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
kaf24@3635 190 " (esi) %lx, (edi) %lx\n",
kaf24@3635 191 regs->eax, regs->ebx, regs->ecx, regs->edx,
kaf24@3635 192 regs->esi, regs->edi);
iap10@3328 193
iap10@3328 194 cpuid(input, &eax, &ebx, &ecx, &edx);
iap10@3328 195
iap10@3328 196 if (input == 1) {
iap10@3328 197 clear_bit(X86_FEATURE_PSE, &edx);
iap10@3328 198 clear_bit(X86_FEATURE_PAE, &edx);
iap10@3328 199 clear_bit(X86_FEATURE_PSE36, &edx);
iap10@3328 200 }
iap10@3328 201
iap10@3328 202 regs->eax = (unsigned long) eax;
iap10@3328 203 regs->ebx = (unsigned long) ebx;
iap10@3328 204 regs->ecx = (unsigned long) ecx;
iap10@3328 205 regs->edx = (unsigned long) edx;
iap10@3328 206
iap10@3328 207 VMX_DBG_LOG(DBG_LEVEL_1,
iap10@3328 208 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x\n",
iap10@3328 209 eip, input, eax, ebx, ecx, edx);
iap10@3328 210
iap10@3328 211 }
iap10@3328 212
iap10@3328 213 #define CASE_GET_REG_P(REG, reg) \
iap10@3328 214 case REG_ ## REG: reg_p = &(regs->reg); break
iap10@3328 215
iap10@3328 216 static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs)
iap10@3328 217 {
iap10@3328 218 unsigned int reg;
kaf24@3635 219 unsigned long *reg_p = 0;
iap10@3328 220 struct exec_domain *ed = current;
kaf24@3635 221 unsigned long eip;
iap10@3328 222
iap10@3328 223 __vmread(GUEST_EIP, &eip);
iap10@3328 224
iap10@3328 225 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
iap10@3328 226
iap10@3328 227 VMX_DBG_LOG(DBG_LEVEL_1,
kaf24@3635 228 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx\n",
iap10@3328 229 eip, reg, exit_qualification);
iap10@3328 230
iap10@3328 231 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
iap10@3328 232 CASE_GET_REG_P(EAX, eax);
iap10@3328 233 CASE_GET_REG_P(ECX, ecx);
iap10@3328 234 CASE_GET_REG_P(EDX, edx);
iap10@3328 235 CASE_GET_REG_P(EBX, ebx);
iap10@3328 236 CASE_GET_REG_P(EBP, ebp);
iap10@3328 237 CASE_GET_REG_P(ESI, esi);
iap10@3328 238 CASE_GET_REG_P(EDI, edi);
iap10@3328 239 case REG_ESP:
iap10@3328 240 break;
iap10@3328 241 default:
iap10@3328 242 __vmx_bug(regs);
iap10@3328 243 }
iap10@3328 244
iap10@3328 245 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
iap10@3328 246 case TYPE_MOV_TO_DR:
iap10@3328 247 /* don't need to check the range */
iap10@3328 248 if (reg != REG_ESP)
iap10@3328 249 ed->thread.debugreg[reg] = *reg_p;
iap10@3328 250 else {
iap10@3328 251 unsigned long value;
iap10@3328 252 __vmread(GUEST_ESP, &value);
iap10@3328 253 ed->thread.debugreg[reg] = value;
iap10@3328 254 }
iap10@3328 255 break;
iap10@3328 256 case TYPE_MOV_FROM_DR:
iap10@3328 257 if (reg != REG_ESP)
iap10@3328 258 *reg_p = ed->thread.debugreg[reg];
iap10@3328 259 else {
iap10@3328 260 __vmwrite(GUEST_ESP, ed->thread.debugreg[reg]);
iap10@3328 261 }
iap10@3328 262 break;
iap10@3328 263 }
iap10@3328 264 }
iap10@3328 265
iap10@3328 266 /*
iap10@3328 267 * Invalidate the TLB for va. Invalidate the shadow page corresponding
iap10@3328 268 * the address va.
iap10@3328 269 */
iap10@3328 270 static void vmx_vmexit_do_invlpg(unsigned long va)
iap10@3328 271 {
iap10@3328 272 unsigned long eip;
iap10@3328 273 struct exec_domain *d = current;
iap10@3328 274 unsigned int index;
iap10@3328 275
iap10@3328 276 __vmread(GUEST_EIP, &eip);
iap10@3328 277
iap10@3328 278 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg:eip=%08lx, va=%08lx\n",
iap10@3328 279 eip, va);
iap10@3328 280
iap10@3328 281 /*
iap10@3328 282 * We do the safest things first, then try to update the shadow
iap10@3328 283 * copying from guest
iap10@3328 284 */
iap10@3328 285 vmx_shadow_invlpg(&d->mm, va);
iap10@3328 286 index = (va >> L2_PAGETABLE_SHIFT);
iap10@3328 287 d->mm.guest_pl2e_cache[index] = mk_l2_pgentry(0); /* invalidate pgd cache */
iap10@3328 288 }
iap10@3328 289
iap10@3328 290 static inline void guest_pl2e_cache_invalidate(struct mm_struct *m)
iap10@3328 291 {
iap10@3328 292 /*
iap10@3328 293 * Need to optimize this
iap10@3328 294 */
iap10@3328 295 memset(m->guest_pl2e_cache, 0, PAGE_SIZE);
iap10@3328 296 }
iap10@3328 297
iap10@3607 298 inline unsigned long gva_to_gpa(unsigned long gva)
iap10@3328 299 {
iap10@3328 300 unsigned long gpde, gpte, pfn, index;
iap10@3328 301 struct exec_domain *d = current;
iap10@3328 302 struct mm_struct *m = &d->mm;
iap10@3328 303
iap10@3328 304 __guest_get_pl2e(m, gva, &gpde);
iap10@3328 305 index = (gva >> L2_PAGETABLE_SHIFT);
iap10@3328 306
iap10@3328 307 pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
iap10@3328 308
iap10@3328 309 m->guest_pl2e_cache[index] =
iap10@3328 310 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
iap10@3328 311
iap10@3328 312 if ( unlikely(__get_user(gpte, (unsigned long *)
iap10@3328 313 &linear_pg_table[gva >> PAGE_SHIFT])) )
iap10@3328 314 {
iap10@3328 315 printk("gva_to_gpa EXIT: read gpte faulted" );
iap10@3328 316 return 0;
iap10@3328 317 }
iap10@3328 318
iap10@3328 319 if ( !(gpte & _PAGE_PRESENT) )
iap10@3328 320 {
iap10@3328 321 printk("gva_to_gpa - EXIT: gpte not present (%lx)",gpte );
iap10@3328 322 return 0;
iap10@3328 323 }
iap10@3328 324
iap10@3328 325 return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK);
iap10@3328 326 }
iap10@3328 327
iap10@3328 328 static void vmx_io_instruction(struct xen_regs *regs,
iap10@3328 329 unsigned long exit_qualification, unsigned long inst_len)
iap10@3328 330 {
iap10@3328 331 struct exec_domain *d = current;
iap10@3328 332 vcpu_iodata_t *vio;
iap10@3328 333 ioreq_t *p;
iap10@3328 334 unsigned long addr;
iap10@3328 335 unsigned long eip;
iap10@3328 336
iap10@3328 337 __vmread(GUEST_EIP, &eip);
iap10@3328 338
iap10@3328 339 VMX_DBG_LOG(DBG_LEVEL_1,
iap10@3328 340 "vmx_io_instruction: eip=%08lx, exit_qualification = %lx\n",
iap10@3328 341 eip, exit_qualification);
iap10@3328 342
iap10@3328 343 if (test_bit(6, &exit_qualification))
iap10@3328 344 addr = (exit_qualification >> 16) & (0xffff);
iap10@3328 345 else
iap10@3328 346 addr = regs->edx & 0xffff;
iap10@3328 347
iap10@3328 348 if (addr == 0x80) {
iap10@3328 349 __update_guest_eip(inst_len);
iap10@3328 350 return;
iap10@3328 351 }
iap10@3328 352
iap10@3328 353 vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va;
iap10@3328 354 if (vio == 0) {
iap10@3328 355 VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx\n", (unsigned long) vio);
iap10@3328 356 domain_crash();
iap10@3328 357 }
iap10@3328 358 p = &vio->vp_ioreq;
iap10@3328 359 p->dir = test_bit(3, &exit_qualification);
iap10@3328 360 set_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags);
iap10@3328 361
iap10@3328 362 p->pdata_valid = 0;
iap10@3328 363 p->count = 1;
iap10@3328 364 p->size = (exit_qualification & 7) + 1;
iap10@3328 365
iap10@3328 366 if (test_bit(4, &exit_qualification)) {
iap10@3606 367 unsigned long eflags;
iap10@3606 368
iap10@3606 369 __vmread(GUEST_EFLAGS, &eflags);
iap10@3606 370 p->df = (eflags & X86_EFLAGS_DF) ? 1 : 0;
iap10@3328 371 p->pdata_valid = 1;
iap10@3328 372 p->u.pdata = (void *) ((p->dir == IOREQ_WRITE) ?
iap10@3328 373 regs->esi
iap10@3328 374 : regs->edi);
iap10@3328 375 p->u.pdata = (void *) gva_to_gpa(p->u.data);
iap10@3328 376 if (test_bit(5, &exit_qualification))
iap10@3328 377 p->count = regs->ecx;
iap10@3328 378 if ((p->u.data & PAGE_MASK) !=
iap10@3328 379 ((p->u.data + p->count * p->size - 1) & PAGE_MASK)) {
iap10@3328 380 printk("stringio crosses page boundary!\n");
iap10@3328 381 if (p->u.data & (p->size - 1)) {
iap10@3328 382 printk("Not aligned I/O!\n");
iap10@3328 383 domain_crash();
iap10@3328 384 }
iap10@3328 385 p->count = (PAGE_SIZE - (p->u.data & ~PAGE_MASK)) / p->size;
iap10@3328 386 } else {
iap10@3328 387 __update_guest_eip(inst_len);
iap10@3328 388 }
iap10@3328 389 } else if (p->dir == IOREQ_WRITE) {
iap10@3328 390 p->u.data = regs->eax;
iap10@3328 391 __update_guest_eip(inst_len);
iap10@3328 392 } else
iap10@3328 393 __update_guest_eip(inst_len);
iap10@3328 394
iap10@3328 395 p->addr = addr;
iap10@3328 396 p->port_mm = 0;
iap10@3328 397 p->state = STATE_IOREQ_READY;
iap10@3328 398 evtchn_send(IOPACKET_PORT);
iap10@3328 399 do_block();
iap10@3328 400 }
iap10@3328 401
iap10@3328 402 #define CASE_GET_REG(REG, reg) \
iap10@3328 403 case REG_ ## REG: value = regs->reg; break
iap10@3328 404
iap10@3328 405 /*
iap10@3328 406 * Write to control registers
iap10@3328 407 */
iap10@3328 408 static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
iap10@3328 409 {
iap10@3328 410 unsigned long value;
iap10@3328 411 unsigned long old_cr;
iap10@3328 412 struct exec_domain *d = current;
iap10@3328 413
iap10@3328 414 switch (gp) {
iap10@3328 415 CASE_GET_REG(EAX, eax);
iap10@3328 416 CASE_GET_REG(ECX, ecx);
iap10@3328 417 CASE_GET_REG(EDX, edx);
iap10@3328 418 CASE_GET_REG(EBX, ebx);
iap10@3328 419 CASE_GET_REG(EBP, ebp);
iap10@3328 420 CASE_GET_REG(ESI, esi);
iap10@3328 421 CASE_GET_REG(EDI, edi);
iap10@3328 422 case REG_ESP:
iap10@3328 423 __vmread(GUEST_ESP, &value);
iap10@3328 424 break;
iap10@3328 425 default:
iap10@3328 426 printk("invalid gp: %d\n", gp);
iap10@3328 427 __vmx_bug(regs);
iap10@3328 428 }
iap10@3328 429
iap10@3328 430 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx, \n", cr, value);
iap10@3328 431 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx, \n", (unsigned long) current);
iap10@3328 432
iap10@3328 433 switch(cr) {
iap10@3328 434 case 0:
iap10@3328 435 {
iap10@3328 436 unsigned long old_base_pfn = 0, pfn;
iap10@3328 437
iap10@3328 438 /*
iap10@3328 439 * CR0:
iap10@3328 440 * We don't want to lose PE and PG.
iap10@3328 441 */
iap10@3328 442 __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
iap10@3328 443 __vmwrite(CR0_READ_SHADOW, value);
iap10@3328 444
iap10@3328 445 if (value & (X86_CR0_PE | X86_CR0_PG) &&
iap10@3328 446 !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state)) {
iap10@3328 447 /*
iap10@3328 448 * Enable paging
iap10@3328 449 */
iap10@3328 450 set_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state);
iap10@3328 451 /*
iap10@3328 452 * The guest CR3 must be pointing to the guest physical.
iap10@3328 453 */
iap10@3328 454 if (!(pfn = phys_to_machine_mapping[
iap10@3328 455 d->thread.arch_vmx.cpu_cr3 >> PAGE_SHIFT]))
iap10@3328 456 {
iap10@3328 457 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx\n",
iap10@3328 458 d->thread.arch_vmx.cpu_cr3);
iap10@3328 459 domain_crash(); /* need to take a clean path */
iap10@3328 460 }
iap10@3328 461 old_base_pfn = pagetable_val(d->mm.pagetable) >> PAGE_SHIFT;
iap10@3328 462 /*
iap10@3328 463 * Now mm.pagetable points to machine physical.
iap10@3328 464 */
iap10@3328 465 d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
iap10@3328 466
iap10@3328 467 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New mm.pagetable = %lx\n",
iap10@3328 468 (unsigned long) (pfn << PAGE_SHIFT));
iap10@3328 469
iap10@3328 470 shadow_lock(&d->mm);
iap10@3328 471 shadow_mode_enable(d->domain, SHM_full_32);
iap10@3328 472 shadow_unlock(&d->mm);
iap10@3328 473
iap10@3328 474 __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table));
iap10@3328 475 /*
iap10@3328 476 * mm->shadow_table should hold the next CR3 for shadow
iap10@3328 477 */
iap10@3328 478 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, pfn = %lx\n",
iap10@3328 479 d->thread.arch_vmx.cpu_cr3, pfn);
iap10@3328 480 put_page_and_type(&frame_table[old_base_pfn]);
iap10@3328 481
iap10@3328 482 }
iap10@3328 483 break;
iap10@3328 484 }
iap10@3328 485 case 3:
iap10@3328 486 {
iap10@3328 487 unsigned long pfn;
iap10@3328 488
iap10@3328 489 /*
iap10@3328 490 * If paging is not enabled yet, simply copy the valut to CR3.
iap10@3328 491 */
iap10@3328 492 if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state)) {
iap10@3328 493 d->thread.arch_vmx.cpu_cr3 = value;
iap10@3328 494 return;
iap10@3328 495 }
iap10@3328 496
iap10@3328 497 guest_pl2e_cache_invalidate(&d->mm);
iap10@3328 498 /*
iap10@3328 499 * We make a new one if the shadow does not exist.
iap10@3328 500 */
iap10@3328 501 if (value == d->thread.arch_vmx.cpu_cr3) {
iap10@3328 502 /*
iap10@3328 503 * This is simple TLB flush, implying the guest has
iap10@3328 504 * removed some translation or changed page attributes.
iap10@3328 505 * We simply invalidate the shadow.
iap10@3328 506 */
iap10@3328 507 pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
iap10@3328 508 if ((pfn << PAGE_SHIFT) != pagetable_val(d->mm.pagetable))
iap10@3328 509 __vmx_bug(regs);
iap10@3328 510 vmx_shadow_clear_state(&d->mm);
iap10@3328 511 shadow_invalidate(&d->mm);
iap10@3328 512 } else {
iap10@3328 513 /*
iap10@3328 514 * If different, make a shadow. Check if the PDBR is valid
iap10@3328 515 * first.
iap10@3328 516 */
iap10@3328 517 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx\n", value);
iap10@3328 518 if ((value >> PAGE_SHIFT) > d->domain->max_pages)
iap10@3328 519 {
iap10@3328 520 VMX_DBG_LOG(DBG_LEVEL_VMMU,
iap10@3328 521 "Invalid CR3 value=%lx\n", value);
iap10@3328 522 domain_crash(); /* need to take a clean path */
iap10@3328 523 }
iap10@3328 524 pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
iap10@3328 525 vmx_shadow_clear_state(&d->mm);
iap10@3328 526 d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
iap10@3328 527 shadow_mk_pagetable(&d->mm);
iap10@3328 528 /*
iap10@3328 529 * mm->shadow_table should hold the next CR3 for shadow
iap10@3328 530 */
iap10@3328 531 d->thread.arch_vmx.cpu_cr3 = value;
iap10@3328 532 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx\n",
iap10@3328 533 value);
iap10@3328 534 __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table));
iap10@3328 535 }
iap10@3328 536 break;
iap10@3328 537 }
iap10@3328 538 case 4:
iap10@3328 539 /* CR4 */
iap10@3328 540 if (value & X86_CR4_PAE)
iap10@3328 541 __vmx_bug(regs); /* not implemented */
iap10@3328 542 __vmread(CR4_READ_SHADOW, &old_cr);
iap10@3328 543
iap10@3328 544 __vmwrite(GUEST_CR4, (value | X86_CR4_VMXE));
iap10@3328 545 __vmwrite(CR4_READ_SHADOW, value);
iap10@3328 546
iap10@3328 547 /*
iap10@3328 548 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
iap10@3328 549 * all TLB entries except global entries.
iap10@3328 550 */
iap10@3328 551 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
iap10@3328 552 vmx_shadow_clear_state(&d->mm);
iap10@3328 553 shadow_invalidate(&d->mm);
iap10@3328 554 guest_pl2e_cache_invalidate(&d->mm);
iap10@3328 555 }
iap10@3328 556 break;
iap10@3328 557 default:
iap10@3328 558 printk("invalid cr: %d\n", gp);
iap10@3328 559 __vmx_bug(regs);
iap10@3328 560 }
iap10@3328 561 }
iap10@3328 562
iap10@3328 563 #define CASE_SET_REG(REG, reg) \
iap10@3328 564 case REG_ ## REG: \
iap10@3328 565 regs->reg = value; \
iap10@3328 566 break
iap10@3328 567
iap10@3328 568 /*
iap10@3328 569 * Read from control registers. CR0 and CR4 are read from the shadow.
iap10@3328 570 */
iap10@3328 571 static void mov_from_cr(int cr, int gp, struct xen_regs *regs)
iap10@3328 572 {
iap10@3328 573 unsigned long value;
iap10@3328 574 struct exec_domain *d = current;
iap10@3328 575
iap10@3328 576 if (cr != 3)
iap10@3328 577 __vmx_bug(regs);
iap10@3328 578
iap10@3328 579 value = (unsigned long) d->thread.arch_vmx.cpu_cr3;
iap10@3328 580 ASSERT(value);
iap10@3328 581
iap10@3328 582 switch (gp) {
iap10@3328 583 CASE_SET_REG(EAX, eax);
iap10@3328 584 CASE_SET_REG(ECX, ecx);
iap10@3328 585 CASE_SET_REG(EDX, edx);
iap10@3328 586 CASE_SET_REG(EBX, ebx);
iap10@3328 587 CASE_SET_REG(EBP, ebp);
iap10@3328 588 CASE_SET_REG(ESI, esi);
iap10@3328 589 CASE_SET_REG(EDI, edi);
iap10@3328 590 case REG_ESP:
iap10@3328 591 __vmwrite(GUEST_ESP, value);
iap10@3328 592 regs->esp = value;
iap10@3328 593 break;
iap10@3328 594 default:
iap10@3328 595 printk("invalid gp: %d\n", gp);
iap10@3328 596 __vmx_bug(regs);
iap10@3328 597 }
iap10@3328 598
iap10@3328 599 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx, \n", cr, value);
iap10@3328 600 }
iap10@3328 601
iap10@3328 602 static void vmx_cr_access (unsigned long exit_qualification, struct xen_regs *regs)
iap10@3328 603 {
iap10@3328 604 unsigned int gp, cr;
iap10@3328 605 unsigned long value;
iap10@3328 606
iap10@3328 607 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
iap10@3328 608 case TYPE_MOV_TO_CR:
iap10@3328 609 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
iap10@3328 610 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
iap10@3328 611 mov_to_cr(gp, cr, regs);
iap10@3328 612 break;
iap10@3328 613 case TYPE_MOV_FROM_CR:
iap10@3328 614 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
iap10@3328 615 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
iap10@3328 616 mov_from_cr(cr, gp, regs);
iap10@3328 617 break;
iap10@3328 618 case TYPE_CLTS:
iap10@3328 619 __vmread(GUEST_CR0, &value);
iap10@3328 620 value &= ~X86_CR0_TS; /* clear TS */
iap10@3328 621 __vmwrite(GUEST_CR0, value);
iap10@3328 622
iap10@3328 623 __vmread(CR0_READ_SHADOW, &value);
iap10@3328 624 value &= ~X86_CR0_TS; /* clear TS */
iap10@3328 625 __vmwrite(CR0_READ_SHADOW, value);
iap10@3328 626 break;
iap10@3328 627 default:
iap10@3328 628 __vmx_bug(regs);
iap10@3328 629 break;
iap10@3328 630 }
iap10@3328 631 }
iap10@3328 632
iap10@3328 633 static inline void vmx_do_msr_read(struct xen_regs *regs)
iap10@3328 634 {
kaf24@3635 635 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
iap10@3328 636 regs->ecx, regs->eax, regs->edx);
iap10@3328 637
iap10@3328 638 rdmsr(regs->ecx, regs->eax, regs->edx);
iap10@3328 639
kaf24@3635 640 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
kaf24@3635 641 "ecx=%lx, eax=%lx, edx=%lx",
kaf24@3635 642 regs->ecx, regs->eax, regs->edx);
iap10@3328 643 }
iap10@3328 644
iap10@3328 645 /*
iap10@3328 646 * Need to use this exit to rescheule
iap10@3328 647 */
iap10@3328 648 static inline void vmx_vmexit_do_hlt()
iap10@3328 649 {
iap10@3328 650 #if VMX_DEBUG
iap10@3328 651 unsigned long eip;
iap10@3328 652 __vmread(GUEST_EIP, &eip);
iap10@3328 653 #endif
iap10@3328 654 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%08lx\n", eip);
iap10@3328 655 __enter_scheduler();
iap10@3328 656 }
iap10@3328 657
iap10@3328 658 static inline void vmx_vmexit_do_mwait()
iap10@3328 659 {
iap10@3328 660 #if VMX_DEBUG
iap10@3328 661 unsigned long eip;
iap10@3328 662 __vmread(GUEST_EIP, &eip);
iap10@3328 663 #endif
iap10@3328 664 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%08lx\n", eip);
iap10@3328 665 __enter_scheduler();
iap10@3328 666 }
iap10@3328 667
iap10@3328 668 #define BUF_SIZ 256
iap10@3328 669 #define MAX_LINE 80
iap10@3328 670 char print_buf[BUF_SIZ];
iap10@3328 671 static int index;
iap10@3328 672
iap10@3328 673 static void vmx_print_line(const char c, struct exec_domain *d)
iap10@3328 674 {
iap10@3328 675
iap10@3328 676 if (index == MAX_LINE || c == '\n') {
iap10@3328 677 if (index == MAX_LINE) {
iap10@3328 678 print_buf[index++] = c;
iap10@3328 679 }
iap10@3328 680 print_buf[index] = '\0';
iap10@3328 681 printk("(GUEST: %u) %s\n", d->domain->id, (char *) &print_buf);
iap10@3328 682 index = 0;
iap10@3328 683 }
iap10@3328 684 else
iap10@3328 685 print_buf[index++] = c;
iap10@3328 686 }
iap10@3328 687
iap10@3328 688 #ifdef XEN_DEBUGGER
iap10@3328 689 void save_xen_regs(struct xen_regs *regs)
iap10@3328 690 {
iap10@3328 691 __vmread(GUEST_SS_SELECTOR, &regs->xss);
iap10@3328 692 __vmread(GUEST_ESP, &regs->esp);
iap10@3328 693 __vmread(GUEST_EFLAGS, &regs->eflags);
iap10@3328 694 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
iap10@3328 695 __vmread(GUEST_EIP, &regs->eip);
iap10@3328 696
iap10@3328 697 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
iap10@3328 698 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
iap10@3328 699 __vmread(GUEST_ES_SELECTOR, &regs->xes);
iap10@3328 700 __vmread(GUEST_DS_SELECTOR, &regs->xds);
iap10@3328 701 }
iap10@3328 702
iap10@3328 703 void restore_xen_regs(struct xen_regs *regs)
iap10@3328 704 {
iap10@3328 705 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
iap10@3328 706 __vmwrite(GUEST_ESP, regs->esp);
iap10@3328 707 __vmwrite(GUEST_EFLAGS, regs->eflags);
iap10@3328 708 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
iap10@3328 709 __vmwrite(GUEST_EIP, regs->eip);
iap10@3328 710
iap10@3328 711 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
iap10@3328 712 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
iap10@3328 713 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
iap10@3328 714 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
iap10@3328 715 }
iap10@3328 716 #endif
iap10@3328 717
iap10@3328 718 asmlinkage void vmx_vmexit_handler(struct xen_regs regs)
iap10@3328 719 {
iap10@3328 720 unsigned int exit_reason, idtv_info_field;
iap10@3328 721 unsigned long exit_qualification, eip, inst_len = 0;
iap10@3328 722 struct exec_domain *d = current;
iap10@3328 723 int error;
iap10@3328 724
iap10@3328 725 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
iap10@3328 726 __vmx_bug(&regs);
iap10@3328 727
iap10@3328 728 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
iap10@3328 729 if (idtv_info_field & INTR_INFO_VALID_MASK) {
iap10@3328 730 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
iap10@3328 731 if ((idtv_info_field & 0xff) == 14) {
iap10@3328 732 unsigned long error_code;
iap10@3328 733
iap10@3328 734 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
iap10@3328 735 printk("#PG error code: %lx\n", error_code);
iap10@3328 736 }
iap10@3328 737 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x\n",
iap10@3328 738 idtv_info_field);
iap10@3328 739 }
iap10@3328 740
iap10@3328 741 /* don't bother H/W interrutps */
iap10@3328 742 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
iap10@3328 743 exit_reason != EXIT_REASON_VMCALL &&
iap10@3328 744 exit_reason != EXIT_REASON_IO_INSTRUCTION)
iap10@3328 745 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x\n", exit_reason);
iap10@3328 746
iap10@3328 747 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
iap10@3328 748 __vmread(EXIT_QUALIFICATION, &exit_qualification);
iap10@3328 749 __vmread(GUEST_EIP, &eip);
iap10@3328 750 domain_crash();
iap10@3328 751 return;
iap10@3328 752 }
iap10@3328 753
iap10@3328 754 switch (exit_reason) {
iap10@3328 755 case EXIT_REASON_EXCEPTION_NMI:
iap10@3328 756 {
iap10@3328 757 /*
iap10@3328 758 * We don't set the software-interrupt exiting (INT n).
iap10@3328 759 * (1) We can get an exception (e.g. #PG) in the guest, or
iap10@3328 760 * (2) NMI
iap10@3328 761 */
iap10@3328 762 int error;
iap10@3328 763 unsigned int vector;
iap10@3328 764 unsigned long va;
iap10@3328 765 unsigned long error_code;
iap10@3328 766
iap10@3328 767 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
iap10@3328 768 && !(vector & INTR_INFO_VALID_MASK))
iap10@3328 769 __vmx_bug(&regs);
iap10@3328 770 vector &= 0xff;
iap10@3328 771
iap10@3328 772 switch (vector) {
iap10@3328 773 #ifdef XEN_DEBUGGER
iap10@3328 774 case VECTOR_DB:
iap10@3328 775 {
iap10@3328 776 save_xen_regs(&regs);
iap10@3328 777 pdb_handle_exception(1, &regs, 1);
iap10@3328 778 restore_xen_regs(&regs);
iap10@3328 779 break;
iap10@3328 780 }
iap10@3328 781 case VECTOR_BP:
iap10@3328 782 {
iap10@3328 783 save_xen_regs(&regs);
iap10@3328 784 pdb_handle_exception(3, &regs, 1);
iap10@3328 785 restore_xen_regs(&regs);
iap10@3328 786 break;
iap10@3328 787 }
iap10@3328 788 #endif
iap10@3328 789 case VECTOR_GP:
iap10@3328 790 {
iap10@3328 791 vmx_do_general_protection_fault(&regs);
iap10@3328 792 break;
iap10@3328 793 }
iap10@3328 794 case VECTOR_PG:
iap10@3328 795 {
iap10@3328 796 __vmread(EXIT_QUALIFICATION, &va);
iap10@3328 797 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
iap10@3328 798 VMX_DBG_LOG(DBG_LEVEL_VMMU,
kaf24@3635 799 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n",
kaf24@3635 800 regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi,
kaf24@3635 801 regs.edi);
iap10@3607 802 d->thread.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
iap10@3328 803
iap10@3328 804 if (!(error = vmx_do_page_fault(va, error_code))) {
iap10@3328 805 /*
iap10@3328 806 * Inject #PG using Interruption-Information Fields
iap10@3328 807 */
iap10@3328 808 unsigned long intr_fields;
iap10@3328 809
iap10@3328 810 intr_fields = (INTR_INFO_VALID_MASK |
iap10@3328 811 INTR_TYPE_EXCEPTION |
iap10@3328 812 INTR_INFO_DELIEVER_CODE_MASK |
iap10@3328 813 VECTOR_PG);
iap10@3328 814 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
iap10@3328 815 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
iap10@3328 816 d->thread.arch_vmx.cpu_cr2 = va;
iap10@3328 817 }
iap10@3328 818 break;
iap10@3328 819 }
iap10@3328 820 default:
iap10@3328 821 __vmx_bug(&regs);
iap10@3328 822 break;
iap10@3328 823 }
iap10@3328 824 break;
iap10@3328 825 }
iap10@3328 826 case EXIT_REASON_EXTERNAL_INTERRUPT:
iap10@3328 827 {
iap10@3328 828 extern int vector_irq[];
iap10@3328 829 extern asmlinkage void do_IRQ(struct xen_regs);
iap10@3328 830 extern void smp_apic_timer_interrupt(struct xen_regs *);
iap10@3328 831 extern void timer_interrupt(int, void *, struct xen_regs *);
iap10@3328 832 unsigned int vector;
iap10@3328 833
iap10@3328 834 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
iap10@3328 835 && !(vector & INTR_INFO_VALID_MASK))
iap10@3328 836 __vmx_bug(&regs);
iap10@3328 837
iap10@3328 838 vector &= 0xff;
iap10@3328 839 local_irq_disable();
iap10@3328 840
iap10@3328 841 if (vector == LOCAL_TIMER_VECTOR) {
iap10@3328 842 smp_apic_timer_interrupt(&regs);
iap10@3328 843 } else {
iap10@3328 844 regs.entry_vector = (vector == FIRST_DEVICE_VECTOR?
iap10@3328 845 0 : vector_irq[vector]);
iap10@3328 846 do_IRQ(regs);
iap10@3328 847 }
iap10@3328 848 break;
iap10@3328 849 }
iap10@3328 850 case EXIT_REASON_PENDING_INTERRUPT:
iap10@3328 851 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
iap10@3328 852 MONITOR_CPU_BASED_EXEC_CONTROLS);
iap10@3328 853 vmx_intr_assist(d);
iap10@3328 854 break;
iap10@3328 855 case EXIT_REASON_TASK_SWITCH:
iap10@3328 856 __vmx_bug(&regs);
iap10@3328 857 break;
iap10@3328 858 case EXIT_REASON_CPUID:
iap10@3328 859 __get_instruction_length(inst_len);
iap10@3328 860 vmx_vmexit_do_cpuid(regs.eax, &regs);
iap10@3328 861 __update_guest_eip(inst_len);
iap10@3328 862 break;
iap10@3328 863 case EXIT_REASON_HLT:
iap10@3328 864 __get_instruction_length(inst_len);
iap10@3328 865 __update_guest_eip(inst_len);
iap10@3328 866 vmx_vmexit_do_hlt();
iap10@3328 867 break;
iap10@3328 868 case EXIT_REASON_INVLPG:
iap10@3328 869 {
iap10@3328 870 unsigned long va;
iap10@3328 871
iap10@3328 872 __vmread(EXIT_QUALIFICATION, &va);
iap10@3328 873 vmx_vmexit_do_invlpg(va);
iap10@3328 874 __get_instruction_length(inst_len);
iap10@3328 875 __update_guest_eip(inst_len);
iap10@3328 876 break;
iap10@3328 877 }
iap10@3328 878 case EXIT_REASON_VMCALL:
iap10@3328 879 __get_instruction_length(inst_len);
iap10@3328 880 __vmread(GUEST_EIP, &eip);
iap10@3328 881 __vmread(EXIT_QUALIFICATION, &exit_qualification);
iap10@3328 882
iap10@3328 883 vmx_print_line(regs.eax, d); /* provides the current domain */
iap10@3328 884 __update_guest_eip(inst_len);
iap10@3328 885 break;
iap10@3328 886 case EXIT_REASON_CR_ACCESS:
iap10@3328 887 {
iap10@3328 888 __vmread(GUEST_EIP, &eip);
iap10@3328 889 __get_instruction_length(inst_len);
iap10@3328 890 __vmread(EXIT_QUALIFICATION, &exit_qualification);
iap10@3328 891
iap10@3328 892 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx\n",
iap10@3328 893 eip, inst_len, exit_qualification);
iap10@3328 894 vmx_cr_access(exit_qualification, &regs);
iap10@3328 895 __update_guest_eip(inst_len);
iap10@3328 896 break;
iap10@3328 897 }
iap10@3328 898 case EXIT_REASON_DR_ACCESS:
iap10@3328 899 __vmread(EXIT_QUALIFICATION, &exit_qualification);
iap10@3328 900 vmx_dr_access(exit_qualification, &regs);
iap10@3328 901 __get_instruction_length(inst_len);
iap10@3328 902 __update_guest_eip(inst_len);
iap10@3328 903 break;
iap10@3328 904 case EXIT_REASON_IO_INSTRUCTION:
iap10@3328 905 __vmread(EXIT_QUALIFICATION, &exit_qualification);
iap10@3328 906 __get_instruction_length(inst_len);
iap10@3328 907 vmx_io_instruction(&regs, exit_qualification, inst_len);
iap10@3328 908 break;
iap10@3328 909 case EXIT_REASON_MSR_READ:
iap10@3328 910 __get_instruction_length(inst_len);
iap10@3328 911 vmx_do_msr_read(&regs);
iap10@3328 912 __update_guest_eip(inst_len);
iap10@3328 913 break;
iap10@3328 914 case EXIT_REASON_MSR_WRITE:
iap10@3328 915 __vmread(GUEST_EIP, &eip);
kaf24@3635 916 VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%08lx, eax=%08lx, edx=%08lx",
iap10@3328 917 eip, regs.eax, regs.edx);
iap10@3328 918 /* just ignore this point */
iap10@3328 919 __get_instruction_length(inst_len);
iap10@3328 920 __update_guest_eip(inst_len);
iap10@3328 921 break;
iap10@3328 922 case EXIT_REASON_MWAIT_INSTRUCTION:
iap10@3328 923 __get_instruction_length(inst_len);
iap10@3328 924 __update_guest_eip(inst_len);
iap10@3328 925 vmx_vmexit_do_mwait();
iap10@3328 926 break;
iap10@3328 927 default:
iap10@3328 928 __vmx_bug(&regs); /* should not happen */
iap10@3328 929 }
iap10@3328 930 return;
iap10@3328 931 }
iap10@3328 932
iap10@3328 933 asmlinkage void load_cr2(void)
iap10@3328 934 {
iap10@3328 935 struct exec_domain *d = current;
iap10@3328 936
iap10@3328 937 local_irq_disable();
iap10@3328 938 asm volatile("movl %0,%%cr2": :"r" (d->thread.arch_vmx.cpu_cr2));
iap10@3328 939 }