xen-vtx-unstable

annotate xen/arch/x86/vmx_vmcs.c @ 6099:d1034eae9708

Improved error reporting on vmlaunch/vmresume failure.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Aug 11 21:01:10 2005 +0000 (2005-08-11)
parents f294acb25858
children bf98996dded2
rev   line source
iap10@3290 1 /*
iap10@3290 2 * vmx_vmcs.c: VMCS management
iap10@3290 3 * Copyright (c) 2004, Intel Corporation.
iap10@3290 4 *
iap10@3290 5 * This program is free software; you can redistribute it and/or modify it
iap10@3290 6 * under the terms and conditions of the GNU General Public License,
iap10@3290 7 * version 2, as published by the Free Software Foundation.
iap10@3290 8 *
iap10@3290 9 * This program is distributed in the hope it will be useful, but WITHOUT
iap10@3290 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iap10@3290 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
iap10@3290 12 * more details.
iap10@3290 13 *
iap10@3290 14 * You should have received a copy of the GNU General Public License along with
iap10@3290 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
iap10@3290 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
iap10@3290 17 *
iap10@3290 18 */
iap10@3290 19
iap10@3290 20 #include <xen/config.h>
iap10@3290 21 #include <xen/init.h>
iap10@3290 22 #include <xen/mm.h>
iap10@3290 23 #include <xen/lib.h>
iap10@3290 24 #include <xen/errno.h>
kaf24@5356 25 #include <xen/domain_page.h>
cl349@5291 26 #include <asm/current.h>
iap10@3290 27 #include <asm/cpufeature.h>
iap10@3290 28 #include <asm/processor.h>
iap10@3290 29 #include <asm/msr.h>
iap10@3290 30 #include <asm/vmx.h>
kaf24@5722 31 #include <asm/flushtlb.h>
iap10@3290 32 #include <xen/event.h>
iap10@3290 33 #include <xen/kernel.h>
iap10@3290 34 #include <public/io/ioreq.h>
kaf24@5722 35 #if CONFIG_PAGING_LEVELS >= 4
kaf24@5722 36 #include <asm/shadow_64.h>
kaf24@5722 37 #endif
mafetter@3717 38 #ifdef CONFIG_VMX
mafetter@3717 39
iap10@3290 40 struct vmcs_struct *alloc_vmcs(void)
iap10@3290 41 {
iap10@3290 42 struct vmcs_struct *vmcs;
kaf24@5059 43 u32 vmx_msr_low, vmx_msr_high;
iap10@3290 44
kaf24@5059 45 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
kaf24@5059 46 vmcs_size = vmx_msr_high & 0x1fff;
kaf24@5398 47 vmcs = alloc_xenheap_pages(get_order(vmcs_size));
kaf24@5398 48 memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
iap10@3290 49
kaf24@5059 50 vmcs->vmcs_revision_id = vmx_msr_low;
iap10@3290 51 return vmcs;
iap10@3290 52 }
iap10@3290 53
iap10@3290 54 void free_vmcs(struct vmcs_struct *vmcs)
iap10@3290 55 {
iap10@3290 56 int order;
iap10@3290 57
kaf24@5775 58 order = get_order(vmcs_size);
kaf24@5398 59 free_xenheap_pages(vmcs, order);
iap10@3290 60 }
iap10@3290 61
kaf24@5836 62 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
iap10@3290 63 {
iap10@3290 64 int error = 0;
kaf24@5836 65 void *io_bitmap_a;
kaf24@5836 66 void *io_bitmap_b;
kaf24@5775 67
iap10@3290 68 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
iap10@3290 69 MONITOR_PIN_BASED_EXEC_CONTROLS);
iap10@3290 70
iap10@3290 71 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
iap10@3290 72 MONITOR_CPU_BASED_EXEC_CONTROLS);
kaf24@5775 73
iap10@3290 74 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
kaf24@5775 75
iap10@3290 76 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
iap10@3290 77
kaf24@5836 78 /* need to use 0x1000 instead of PAGE_SIZE */
kaf24@5836 79 io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000));
kaf24@5836 80 io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000));
kaf24@5836 81 memset(io_bitmap_a, 0xff, 0x1000);
kaf24@5836 82 /* don't bother debug port access */
kaf24@5836 83 clear_bit(PC_DEBUG_PORT, io_bitmap_a);
kaf24@5836 84 memset(io_bitmap_b, 0xff, 0x1000);
kaf24@5836 85
kaf24@5836 86 error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_phys(io_bitmap_a));
kaf24@5836 87 error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_phys(io_bitmap_b));
kaf24@5836 88
kaf24@5836 89 arch_vmx->io_bitmap_a = io_bitmap_a;
kaf24@5836 90 arch_vmx->io_bitmap_b = io_bitmap_b;
kaf24@5836 91
iap10@3290 92 return error;
iap10@3290 93 }
iap10@3290 94
iap10@3290 95 #define GUEST_SEGMENT_LIMIT 0xffffffff
iap10@3290 96 #define HOST_SEGMENT_LIMIT 0xffffffff
iap10@3290 97
iap10@3290 98 struct host_execution_env {
iap10@3290 99 /* selectors */
iap10@3290 100 unsigned short ldtr_selector;
iap10@3290 101 unsigned short tr_selector;
iap10@3290 102 unsigned short ds_selector;
iap10@3290 103 unsigned short cs_selector;
iap10@3290 104 /* limits */
iap10@3290 105 unsigned short gdtr_limit;
iap10@3290 106 unsigned short ldtr_limit;
iap10@3290 107 unsigned short idtr_limit;
iap10@3290 108 unsigned short tr_limit;
iap10@3290 109 /* base */
iap10@3290 110 unsigned long gdtr_base;
iap10@3290 111 unsigned long ldtr_base;
iap10@3290 112 unsigned long idtr_base;
iap10@3290 113 unsigned long tr_base;
iap10@3290 114 unsigned long ds_base;
iap10@3290 115 unsigned long cs_base;
kaf24@5658 116 #ifdef __x86_64__
kaf24@5658 117 unsigned long fs_base;
kaf24@5658 118 unsigned long gs_base;
kaf24@5658 119 #endif
kaf24@5658 120
iap10@3290 121 /* control registers */
iap10@3290 122 unsigned long cr3;
iap10@3290 123 unsigned long cr0;
iap10@3290 124 unsigned long cr4;
iap10@3290 125 unsigned long dr7;
iap10@3290 126 };
iap10@3290 127
iap10@3290 128 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
iap10@3290 129
kaf24@5289 130 int vmx_setup_platform(struct vcpu *d, struct cpu_user_regs *regs)
iap10@3290 131 {
iap10@3290 132 int i;
iap10@3290 133 unsigned int n;
iap10@3290 134 unsigned long *p, mpfn, offset, addr;
iap10@3290 135 struct e820entry *e820p;
iap10@3290 136 unsigned long gpfn = 0;
iap10@3290 137
kaf24@5727 138 local_flush_tlb_pge();
kaf24@4683 139 regs->ebx = 0; /* Linux expects ebx to be 0 for boot proc */
iap10@3290 140
kaf24@4683 141 n = regs->ecx;
iap10@3290 142 if (n > 32) {
maf46@3855 143 VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n);
iap10@3290 144 return -1;
iap10@3290 145 }
iap10@3290 146
kaf24@4683 147 addr = regs->edi;
iap10@3290 148 offset = (addr & ~PAGE_MASK);
iap10@3290 149 addr = round_pgdown(addr);
kaf24@5356 150
iap10@3707 151 mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
kaf24@5356 152 p = map_domain_page(mpfn);
iap10@3290 153
iap10@3290 154 e820p = (struct e820entry *) ((unsigned long) p + offset);
iap10@3290 155
maf46@3880 156 #ifndef NDEBUG
maf46@3880 157 print_e820_memory_map(e820p, n);
maf46@3880 158 #endif
maf46@3880 159
kaf24@5356 160 for ( i = 0; i < n; i++ )
kaf24@5356 161 {
kaf24@5356 162 if ( e820p[i].type == E820_SHARED_PAGE )
kaf24@5356 163 {
iap10@3290 164 gpfn = (e820p[i].addr >> PAGE_SHIFT);
iap10@3290 165 break;
iap10@3290 166 }
iap10@3290 167 }
iap10@3290 168
kaf24@5356 169 if ( gpfn == 0 )
kaf24@5356 170 {
kaf24@5356 171 unmap_domain_page(p);
iap10@3290 172 return -1;
iap10@3290 173 }
iap10@3290 174
kaf24@5356 175 unmap_domain_page(p);
iap10@3748 176
iap10@3748 177 /* Initialise shared page */
kaf24@5356 178 mpfn = phys_to_machine_mapping(gpfn);
kaf24@5356 179 p = map_domain_page(mpfn);
arun@5608 180 d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
arun@5608 181
arun@5615 182 VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d->domain));
arun@5615 183
arun@5608 184 clear_bit(iopacket_port(d->domain),
arun@5608 185 &d->domain->shared_info->evtchn_mask[0]);
iap10@3290 186
iap10@3290 187 return 0;
iap10@3290 188 }
iap10@3290 189
kaf24@5289 190 void vmx_do_launch(struct vcpu *v)
iap10@3290 191 {
iap10@3290 192 /* Update CR3, GDT, LDT, TR */
iap10@3290 193 unsigned int tr, cpu, error = 0;
iap10@3290 194 struct host_execution_env host_env;
iap10@3290 195 struct Xgt_desc_struct desc;
kaf24@4633 196 unsigned long pfn = 0;
iap10@3290 197 struct pfn_info *page;
kaf24@4923 198 struct cpu_user_regs *regs = guest_cpu_user_regs();
iap10@3290 199
cl349@4856 200 vmx_stts();
cl349@4856 201
kaf24@4633 202 cpu = smp_processor_id();
iap10@3290 203
iap10@3290 204 page = (struct pfn_info *) alloc_domheap_page(NULL);
iap10@3290 205 pfn = (unsigned long) (page - frame_table);
iap10@3290 206
kaf24@5289 207 vmx_setup_platform(v, regs);
iap10@3290 208
kaf24@5820 209 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
kaf24@5820 210 host_env.idtr_limit = desc.size;
kaf24@5820 211 host_env.idtr_base = desc.address;
kaf24@5820 212 error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
kaf24@5820 213
arun@4585 214 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
iap10@3290 215 host_env.gdtr_limit = desc.size;
iap10@3290 216 host_env.gdtr_base = desc.address;
iap10@3290 217 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
iap10@3290 218
iap10@3290 219 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
iap10@3290 220 error |= __vmwrite(GUEST_LDTR_BASE, 0);
iap10@3290 221 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
iap10@3290 222
arun@4585 223 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
iap10@3290 224 host_env.tr_selector = tr;
iap10@3290 225 host_env.tr_limit = sizeof(struct tss_struct);
iap10@3290 226 host_env.tr_base = (unsigned long) &init_tss[cpu];
iap10@3290 227
iap10@3290 228 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
iap10@3290 229 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
iap10@3290 230 error |= __vmwrite(GUEST_TR_BASE, 0);
iap10@3290 231 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
iap10@3290 232
kaf24@5289 233 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.guest_table));
kaf24@5289 234 __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
kaf24@5414 235 __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
iap10@3290 236
kaf24@5289 237 v->arch.schedule_tail = arch_vmx_do_resume;
iap10@3290 238 }
iap10@3290 239
iap10@3290 240 /*
iap10@3290 241 * Initially set the same environement as host.
iap10@3290 242 */
iap10@3290 243 static inline int
kaf24@4683 244 construct_init_vmcs_guest(struct cpu_user_regs *regs,
kaf24@4683 245 struct vcpu_guest_context *ctxt,
iap10@3290 246 struct host_execution_env *host_env)
iap10@3290 247 {
iap10@3290 248 int error = 0;
iap10@3290 249 union vmcs_arbytes arbytes;
iap10@3290 250 unsigned long dr7;
iap10@3290 251 unsigned long eflags, shadow_cr;
iap10@3290 252
iap10@3290 253 /* MSR */
iap10@3290 254 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
iap10@3290 255 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
iap10@3290 256
iap10@3290 257 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
iap10@3290 258 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
iap10@3290 259 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
iap10@3290 260 /* interrupt */
iap10@3290 261 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
iap10@3290 262 /* mask */
kaf24@5658 263 error |= __vmwrite(CR0_GUEST_HOST_MASK, -1UL);
kaf24@5658 264 error |= __vmwrite(CR4_GUEST_HOST_MASK, -1UL);
iap10@3290 265
iap10@3290 266 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
iap10@3290 267 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
iap10@3290 268
iap10@3290 269 /* TSC */
iap10@3290 270 error |= __vmwrite(TSC_OFFSET, 0);
iap10@3290 271 error |= __vmwrite(CR3_TARGET_COUNT, 0);
iap10@3290 272
iap10@3290 273 /* Guest Selectors */
kaf24@4683 274 error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs);
kaf24@4683 275 error |= __vmwrite(GUEST_ES_SELECTOR, regs->es);
kaf24@4683 276 error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss);
kaf24@4683 277 error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds);
kaf24@4683 278 error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs);
kaf24@4683 279 error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs);
iap10@3290 280
iap10@3290 281 /* Guest segment Limits */
iap10@3290 282 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 283 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 284 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 285 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 286 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 287 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 288
iap10@3290 289 error |= __vmwrite(GUEST_IDTR_LIMIT, host_env->idtr_limit);
iap10@3290 290
iap10@3290 291 /* AR bytes */
iap10@3290 292 arbytes.bytes = 0;
iap10@3290 293 arbytes.fields.seg_type = 0x3; /* type = 3 */
iap10@3290 294 arbytes.fields.s = 1; /* code or data, i.e. not system */
iap10@3290 295 arbytes.fields.dpl = 0; /* DPL = 3 */
iap10@3290 296 arbytes.fields.p = 1; /* segment present */
iap10@3290 297 arbytes.fields.default_ops_size = 1; /* 32-bit */
iap10@3290 298 arbytes.fields.g = 1;
iap10@3290 299 arbytes.fields.null_bit = 0; /* not null */
iap10@3290 300
iap10@3290 301 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
iap10@3290 302 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
iap10@3290 303 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
iap10@3290 304 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
iap10@3290 305 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
iap10@3290 306
iap10@3290 307 arbytes.fields.seg_type = 0xb; /* type = 0xb */
iap10@3290 308 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
iap10@3290 309
kaf24@4683 310 error |= __vmwrite(GUEST_GDTR_BASE, regs->edx);
kaf24@4683 311 regs->edx = 0;
kaf24@4683 312 error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax);
kaf24@4683 313 regs->eax = 0;
iap10@3290 314
iap10@3290 315 arbytes.fields.s = 0; /* not code or data segement */
iap10@3290 316 arbytes.fields.seg_type = 0x2; /* LTD */
iap10@3290 317 arbytes.fields.default_ops_size = 0; /* 16-bit */
iap10@3290 318 arbytes.fields.g = 0;
iap10@3290 319 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
iap10@3290 320
iap10@3290 321 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
iap10@3290 322 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
iap10@3290 323
iap10@3290 324 error |= __vmwrite(GUEST_CR0, host_env->cr0); /* same CR0 */
iap10@3290 325
iap10@3290 326 /* Initally PG, PE are not set*/
iap10@3290 327 shadow_cr = host_env->cr0;
arun@5186 328 shadow_cr &= ~X86_CR0_PG;
iap10@3290 329 error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
kaf24@3755 330 /* CR3 is set in vmx_final_setup_guest */
kaf24@5658 331 #ifdef __x86_64__
kaf24@5727 332 error |= __vmwrite(GUEST_CR4, host_env->cr4 & ~X86_CR4_PSE);
kaf24@5658 333 #else
iap10@3290 334 error |= __vmwrite(GUEST_CR4, host_env->cr4);
kaf24@5658 335 #endif
iap10@3290 336 shadow_cr = host_env->cr4;
kaf24@5658 337
kaf24@5658 338 #ifdef __x86_64__
kaf24@5658 339 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
kaf24@5658 340 #else
iap10@3290 341 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
kaf24@5658 342 #endif
iap10@3290 343 error |= __vmwrite(CR4_READ_SHADOW, shadow_cr);
iap10@3290 344
iap10@3290 345 error |= __vmwrite(GUEST_ES_BASE, host_env->ds_base);
iap10@3290 346 error |= __vmwrite(GUEST_CS_BASE, host_env->cs_base);
iap10@3290 347 error |= __vmwrite(GUEST_SS_BASE, host_env->ds_base);
iap10@3290 348 error |= __vmwrite(GUEST_DS_BASE, host_env->ds_base);
iap10@3290 349 error |= __vmwrite(GUEST_FS_BASE, host_env->ds_base);
iap10@3290 350 error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
iap10@3290 351 error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
iap10@3290 352
kaf24@5414 353 error |= __vmwrite(GUEST_RSP, regs->esp);
kaf24@5414 354 error |= __vmwrite(GUEST_RIP, regs->eip);
iap10@3290 355
kaf24@4683 356 eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
iap10@3290 357 eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
iap10@3290 358
kaf24@5414 359 error |= __vmwrite(GUEST_RFLAGS, eflags);
iap10@3290 360
iap10@3290 361 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
iap10@3290 362 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
iap10@3290 363 error |= __vmwrite(GUEST_DR7, dr7);
kaf24@5414 364 error |= __vmwrite(VMCS_LINK_POINTER, 0xffffffff);
kaf24@5414 365 error |= __vmwrite(VMCS_LINK_POINTER_HIGH, 0xffffffff);
iap10@3290 366
iap10@3290 367 return error;
iap10@3290 368 }
iap10@3290 369
iap10@3290 370 static inline int construct_vmcs_host(struct host_execution_env *host_env)
iap10@3290 371 {
iap10@3290 372 int error = 0;
iap10@3290 373 unsigned long crn;
iap10@3290 374
iap10@3290 375 /* Host Selectors */
iap10@3290 376 host_env->ds_selector = __HYPERVISOR_DS;
iap10@3290 377 error |= __vmwrite(HOST_ES_SELECTOR, host_env->ds_selector);
iap10@3290 378 error |= __vmwrite(HOST_SS_SELECTOR, host_env->ds_selector);
iap10@3290 379 error |= __vmwrite(HOST_DS_SELECTOR, host_env->ds_selector);
kaf24@5658 380 #if defined (__i386__)
iap10@3290 381 error |= __vmwrite(HOST_FS_SELECTOR, host_env->ds_selector);
iap10@3290 382 error |= __vmwrite(HOST_GS_SELECTOR, host_env->ds_selector);
kaf24@5658 383 error |= __vmwrite(HOST_FS_BASE, host_env->ds_base);
kaf24@5658 384 error |= __vmwrite(HOST_GS_BASE, host_env->ds_base);
iap10@3290 385
kaf24@5658 386 #else
kaf24@5658 387 rdmsrl(MSR_FS_BASE, host_env->fs_base);
kaf24@5658 388 rdmsrl(MSR_GS_BASE, host_env->gs_base);
kaf24@5658 389 error |= __vmwrite(HOST_FS_BASE, host_env->fs_base);
kaf24@5658 390 error |= __vmwrite(HOST_GS_BASE, host_env->gs_base);
kaf24@5658 391
kaf24@5658 392 #endif
iap10@3290 393 host_env->cs_selector = __HYPERVISOR_CS;
iap10@3290 394 error |= __vmwrite(HOST_CS_SELECTOR, host_env->cs_selector);
iap10@3290 395
iap10@3290 396 host_env->ds_base = 0;
iap10@3290 397 host_env->cs_base = 0;
iap10@3290 398
kaf24@5193 399 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
iap10@3290 400 host_env->cr0 = crn;
iap10@3290 401 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
iap10@3290 402
iap10@3290 403 /* CR3 is set in vmx_final_setup_hostos */
kaf24@5193 404 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
iap10@3290 405 host_env->cr4 = crn;
iap10@3290 406 error |= __vmwrite(HOST_CR4, crn);
kaf24@5820 407
kaf24@5414 408 error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
kaf24@5658 409 #ifdef __x86_64__
kaf24@5658 410 /* TBD: support cr8 for 64-bit guest */
kaf24@5658 411 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
kaf24@5658 412 __vmwrite(TPR_THRESHOLD, 0);
kaf24@5658 413 __vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
kaf24@5658 414 #endif
iap10@3290 415
iap10@3290 416 return error;
iap10@3290 417 }
iap10@3290 418
iap10@3290 419 /*
iap10@3290 420 * Need to extend to support full virtualization.
iap10@3290 421 * The variable use_host_env indicates if the new VMCS needs to use
iap10@3290 422 * the same setups as the host has (xenolinux).
iap10@3290 423 */
iap10@3290 424
iap10@3290 425 int construct_vmcs(struct arch_vmx_struct *arch_vmx,
kaf24@4683 426 struct cpu_user_regs *regs,
kaf24@4683 427 struct vcpu_guest_context *ctxt,
iap10@3290 428 int use_host_env)
iap10@3290 429 {
iap10@3290 430 int error;
iap10@3290 431 u64 vmcs_phys_ptr;
iap10@3290 432
iap10@3290 433 struct host_execution_env host_env;
iap10@3290 434
iap10@3290 435 if (use_host_env != VMCS_USE_HOST_ENV)
iap10@3290 436 return -EINVAL;
iap10@3290 437
iap10@3290 438 memset(&host_env, 0, sizeof(struct host_execution_env));
iap10@3290 439
iap10@3290 440 vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
iap10@3290 441
iap10@3290 442 if ((error = __vmpclear (vmcs_phys_ptr))) {
iap10@3290 443 printk("construct_vmcs: VMCLEAR failed\n");
iap10@3290 444 return -EINVAL;
iap10@3290 445 }
iap10@3290 446 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
iap10@3290 447 printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n",
iap10@3290 448 (unsigned long) vmcs_phys_ptr);
iap10@3290 449 return -EINVAL;
iap10@3290 450 }
kaf24@5836 451 if ((error = construct_vmcs_controls(arch_vmx))) {
iap10@3290 452 printk("construct_vmcs: construct_vmcs_controls failed\n");
iap10@3290 453 return -EINVAL;
iap10@3290 454 }
iap10@3290 455 /* host selectors */
iap10@3290 456 if ((error = construct_vmcs_host(&host_env))) {
iap10@3290 457 printk("construct_vmcs: construct_vmcs_host failed\n");
iap10@3290 458 return -EINVAL;
iap10@3290 459 }
iap10@3290 460 /* guest selectors */
kaf24@4683 461 if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) {
iap10@3290 462 printk("construct_vmcs: construct_vmcs_guest failed\n");
iap10@3290 463 return -EINVAL;
iap10@3290 464 }
iap10@3290 465
iap10@3290 466 if ((error |= __vmwrite(EXCEPTION_BITMAP,
iap10@3290 467 MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
iap10@3290 468 printk("construct_vmcs: setting Exception bitmap failed\n");
iap10@3290 469 return -EINVAL;
iap10@3290 470 }
iap10@3290 471
kaf24@5821 472 if (regs->eflags & EF_TF)
kaf24@5821 473 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
kaf24@5821 474 else
kaf24@5821 475 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
kaf24@5821 476
kaf24@5821 477 return 0;
kaf24@5821 478 }
kaf24@5821 479
kaf24@5821 480 /*
kaf24@5821 481 * modify guest eflags and execption bitmap for gdb
kaf24@5821 482 */
kaf24@5821 483 int modify_vmcs(struct arch_vmx_struct *arch_vmx,
kaf24@5821 484 struct cpu_user_regs *regs)
kaf24@5821 485 {
kaf24@5821 486 int error;
kaf24@5821 487 u64 vmcs_phys_ptr, old, old_phys_ptr;
kaf24@5821 488 vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
kaf24@5821 489
kaf24@5821 490 old_phys_ptr = virt_to_phys(&old);
kaf24@5821 491 __vmptrst(old_phys_ptr);
kaf24@5821 492 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
kaf24@5821 493 printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
kaf24@5821 494 (unsigned long) vmcs_phys_ptr);
kaf24@5821 495 return -EINVAL;
kaf24@5821 496 }
kaf24@5821 497 load_cpu_user_regs(regs);
kaf24@5821 498
kaf24@5821 499 __vmptrld(old_phys_ptr);
kaf24@5821 500
iap10@3290 501 return 0;
iap10@3290 502 }
iap10@3290 503
iap10@3290 504 int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 505 {
iap10@3290 506 int error;
iap10@3290 507
iap10@3290 508 if ((error = __vmptrld(phys_ptr))) {
iap10@3290 509 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 510 return error;
iap10@3290 511 }
iap10@3290 512 set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 513 return 0;
iap10@3290 514 }
iap10@3290 515
iap10@3290 516 int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 517 {
iap10@3290 518 /* take the current VMCS */
iap10@3290 519 __vmptrst(phys_ptr);
iap10@3290 520 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 521 return 0;
iap10@3290 522 }
iap10@3290 523
iap10@3290 524 void vm_launch_fail(unsigned long eflags)
iap10@3290 525 {
kaf24@6099 526 unsigned long error;
kaf24@6099 527 __vmread(VM_INSTRUCTION_ERROR, &error);
kaf24@6099 528 printk("<vm_launch_fail> error code %lx\n", error);
arun@5382 529 __vmx_bug(guest_cpu_user_regs());
iap10@3290 530 }
iap10@3290 531
iap10@3290 532 void vm_resume_fail(unsigned long eflags)
iap10@3290 533 {
kaf24@6099 534 unsigned long error;
kaf24@6099 535 __vmread(VM_INSTRUCTION_ERROR, &error);
kaf24@6099 536 printk("<vm_resume_fail> error code %lx\n", error);
arun@5382 537 __vmx_bug(guest_cpu_user_regs());
iap10@3290 538 }
iap10@3290 539
mafetter@3717 540 #endif /* CONFIG_VMX */
kaf24@3914 541
kaf24@3914 542 /*
kaf24@3914 543 * Local variables:
kaf24@3914 544 * mode: C
kaf24@3914 545 * c-set-style: "BSD"
kaf24@3914 546 * c-basic-offset: 4
kaf24@3914 547 * tab-width: 4
kaf24@3914 548 * indent-tabs-mode: nil
kaf24@3988 549 * End:
kaf24@3914 550 */