xen-vtx-unstable

annotate xen/arch/x86/vmx_vmcs.c @ 5722:f261f14b9781

Now we have extended the patch to support x86_64 domU as well. The
shadow mode survived with the domU running lmbench, ltp, kernbench, etc.
by running a script that enables shadow LOGDIRTY mode, CLEAN, and
disables at 5-second intervals in an infinite loop. Thanks Ian for
providing the Python script. Tested x86 domU and VMX domains as well.
Big ones are all new, and are used for 64-bit only. Please apply.

We also verified that shadow_64.c and shadow_public.c could be built for
x86 and that they worked fine there. We can provide a small patch that
does it (once the code is in ;-).

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Chengyuan Li <chengyuan.li@intel.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 11 09:14:11 2005 +0000 (2005-07-11)
parents ff5d7ccd8d69
children ba925b4aef28 56a63f9f378f
rev   line source
iap10@3290 1 /*
iap10@3290 2 * vmx_vmcs.c: VMCS management
iap10@3290 3 * Copyright (c) 2004, Intel Corporation.
iap10@3290 4 *
iap10@3290 5 * This program is free software; you can redistribute it and/or modify it
iap10@3290 6 * under the terms and conditions of the GNU General Public License,
iap10@3290 7 * version 2, as published by the Free Software Foundation.
iap10@3290 8 *
iap10@3290 9 * This program is distributed in the hope it will be useful, but WITHOUT
iap10@3290 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iap10@3290 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
iap10@3290 12 * more details.
iap10@3290 13 *
iap10@3290 14 * You should have received a copy of the GNU General Public License along with
iap10@3290 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
iap10@3290 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
iap10@3290 17 *
iap10@3290 18 */
iap10@3290 19
iap10@3290 20 #include <xen/config.h>
iap10@3290 21 #include <xen/init.h>
iap10@3290 22 #include <xen/mm.h>
iap10@3290 23 #include <xen/lib.h>
iap10@3290 24 #include <xen/errno.h>
kaf24@5356 25 #include <xen/domain_page.h>
cl349@5291 26 #include <asm/current.h>
iap10@3290 27 #include <asm/cpufeature.h>
iap10@3290 28 #include <asm/processor.h>
iap10@3290 29 #include <asm/msr.h>
iap10@3290 30 #include <asm/vmx.h>
kaf24@5722 31 #include <asm/flushtlb.h>
iap10@3290 32 #include <xen/event.h>
iap10@3290 33 #include <xen/kernel.h>
iap10@3290 34 #include <public/io/ioreq.h>
kaf24@5722 35 #if CONFIG_PAGING_LEVELS >= 4
kaf24@5722 36 #include <asm/shadow_64.h>
kaf24@5722 37 #endif
mafetter@3717 38 #ifdef CONFIG_VMX
mafetter@3717 39
iap10@3290 40 struct vmcs_struct *alloc_vmcs(void)
iap10@3290 41 {
iap10@3290 42 struct vmcs_struct *vmcs;
kaf24@5059 43 u32 vmx_msr_low, vmx_msr_high;
iap10@3290 44
kaf24@5059 45 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
kaf24@5059 46 vmcs_size = vmx_msr_high & 0x1fff;
kaf24@5398 47 vmcs = alloc_xenheap_pages(get_order(vmcs_size));
kaf24@5398 48 memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
iap10@3290 49
kaf24@5059 50 vmcs->vmcs_revision_id = vmx_msr_low;
iap10@3290 51 return vmcs;
iap10@3290 52 }
iap10@3290 53
iap10@3290 54 void free_vmcs(struct vmcs_struct *vmcs)
iap10@3290 55 {
iap10@3290 56 int order;
iap10@3290 57
iap10@3290 58 order = (vmcs_size >> PAGE_SHIFT) - 1;
kaf24@5398 59 free_xenheap_pages(vmcs, order);
iap10@3290 60 }
iap10@3290 61
iap10@3290 62 static inline int construct_vmcs_controls(void)
iap10@3290 63 {
iap10@3290 64 int error = 0;
iap10@3290 65
iap10@3290 66 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
iap10@3290 67 MONITOR_PIN_BASED_EXEC_CONTROLS);
iap10@3290 68
iap10@3290 69 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
iap10@3290 70 MONITOR_CPU_BASED_EXEC_CONTROLS);
kaf24@5658 71 #if defined (__x86_64__)
kaf24@5658 72 error |= __vmwrite(VM_EXIT_CONTROLS,
kaf24@5658 73 MONITOR_VM_EXIT_CONTROLS | VM_EXIT_CONTROLS_IA_32E_MODE);
kaf24@5658 74 #else
iap10@3290 75 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
kaf24@5658 76 #endif
iap10@3290 77 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
iap10@3290 78
iap10@3290 79 return error;
iap10@3290 80 }
iap10@3290 81
iap10@3290 82 #define GUEST_SEGMENT_LIMIT 0xffffffff
iap10@3290 83 #define HOST_SEGMENT_LIMIT 0xffffffff
iap10@3290 84
iap10@3290 85 struct host_execution_env {
iap10@3290 86 /* selectors */
iap10@3290 87 unsigned short ldtr_selector;
iap10@3290 88 unsigned short tr_selector;
iap10@3290 89 unsigned short ds_selector;
iap10@3290 90 unsigned short cs_selector;
iap10@3290 91 /* limits */
iap10@3290 92 unsigned short gdtr_limit;
iap10@3290 93 unsigned short ldtr_limit;
iap10@3290 94 unsigned short idtr_limit;
iap10@3290 95 unsigned short tr_limit;
iap10@3290 96 /* base */
iap10@3290 97 unsigned long gdtr_base;
iap10@3290 98 unsigned long ldtr_base;
iap10@3290 99 unsigned long idtr_base;
iap10@3290 100 unsigned long tr_base;
iap10@3290 101 unsigned long ds_base;
iap10@3290 102 unsigned long cs_base;
kaf24@5658 103 #ifdef __x86_64__
kaf24@5658 104 unsigned long fs_base;
kaf24@5658 105 unsigned long gs_base;
kaf24@5658 106 #endif
kaf24@5658 107
iap10@3290 108 /* control registers */
iap10@3290 109 unsigned long cr3;
iap10@3290 110 unsigned long cr0;
iap10@3290 111 unsigned long cr4;
iap10@3290 112 unsigned long dr7;
iap10@3290 113 };
iap10@3290 114
iap10@3290 115 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
iap10@3290 116
kaf24@5289 117 int vmx_setup_platform(struct vcpu *d, struct cpu_user_regs *regs)
iap10@3290 118 {
iap10@3290 119 int i;
iap10@3290 120 unsigned int n;
iap10@3290 121 unsigned long *p, mpfn, offset, addr;
iap10@3290 122 struct e820entry *e820p;
iap10@3290 123 unsigned long gpfn = 0;
iap10@3290 124
kaf24@4683 125 regs->ebx = 0; /* Linux expects ebx to be 0 for boot proc */
iap10@3290 126
kaf24@4683 127 n = regs->ecx;
iap10@3290 128 if (n > 32) {
maf46@3855 129 VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n);
iap10@3290 130 return -1;
iap10@3290 131 }
iap10@3290 132
kaf24@4683 133 addr = regs->edi;
iap10@3290 134 offset = (addr & ~PAGE_MASK);
iap10@3290 135 addr = round_pgdown(addr);
kaf24@5356 136
iap10@3707 137 mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
kaf24@5356 138 p = map_domain_page(mpfn);
iap10@3290 139
iap10@3290 140 e820p = (struct e820entry *) ((unsigned long) p + offset);
iap10@3290 141
maf46@3880 142 #ifndef NDEBUG
maf46@3880 143 print_e820_memory_map(e820p, n);
maf46@3880 144 #endif
maf46@3880 145
kaf24@5356 146 for ( i = 0; i < n; i++ )
kaf24@5356 147 {
kaf24@5356 148 if ( e820p[i].type == E820_SHARED_PAGE )
kaf24@5356 149 {
iap10@3290 150 gpfn = (e820p[i].addr >> PAGE_SHIFT);
iap10@3290 151 break;
iap10@3290 152 }
iap10@3290 153 }
iap10@3290 154
kaf24@5356 155 if ( gpfn == 0 )
kaf24@5356 156 {
kaf24@5356 157 unmap_domain_page(p);
iap10@3290 158 return -1;
iap10@3290 159 }
iap10@3290 160
kaf24@5356 161 unmap_domain_page(p);
iap10@3748 162
iap10@3748 163 /* Initialise shared page */
kaf24@5356 164 mpfn = phys_to_machine_mapping(gpfn);
kaf24@5356 165 p = map_domain_page(mpfn);
arun@5608 166 d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
arun@5608 167
arun@5615 168 VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d->domain));
arun@5615 169
arun@5608 170 clear_bit(iopacket_port(d->domain),
arun@5608 171 &d->domain->shared_info->evtchn_mask[0]);
iap10@3290 172
iap10@3290 173 return 0;
iap10@3290 174 }
iap10@3290 175
kaf24@5289 176 void vmx_do_launch(struct vcpu *v)
iap10@3290 177 {
iap10@3290 178 /* Update CR3, GDT, LDT, TR */
iap10@3290 179 unsigned int tr, cpu, error = 0;
iap10@3290 180 struct host_execution_env host_env;
iap10@3290 181 struct Xgt_desc_struct desc;
kaf24@4633 182 unsigned long pfn = 0;
iap10@3290 183 struct pfn_info *page;
kaf24@4923 184 struct cpu_user_regs *regs = guest_cpu_user_regs();
iap10@3290 185
cl349@4856 186 vmx_stts();
cl349@4856 187
kaf24@4633 188 cpu = smp_processor_id();
iap10@3290 189
iap10@3290 190 page = (struct pfn_info *) alloc_domheap_page(NULL);
iap10@3290 191 pfn = (unsigned long) (page - frame_table);
iap10@3290 192
kaf24@5289 193 vmx_setup_platform(v, regs);
iap10@3290 194
arun@4585 195 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
iap10@3290 196 host_env.gdtr_limit = desc.size;
iap10@3290 197 host_env.gdtr_base = desc.address;
iap10@3290 198
iap10@3290 199 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
iap10@3290 200
iap10@3290 201 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
iap10@3290 202 error |= __vmwrite(GUEST_LDTR_BASE, 0);
iap10@3290 203 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
iap10@3290 204
arun@4585 205 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
iap10@3290 206 host_env.tr_selector = tr;
iap10@3290 207 host_env.tr_limit = sizeof(struct tss_struct);
iap10@3290 208 host_env.tr_base = (unsigned long) &init_tss[cpu];
iap10@3290 209
iap10@3290 210 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
iap10@3290 211 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
iap10@3290 212 error |= __vmwrite(GUEST_TR_BASE, 0);
iap10@3290 213 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
iap10@3290 214
kaf24@5289 215 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.guest_table));
kaf24@5289 216 __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
kaf24@5414 217 __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
iap10@3290 218
kaf24@5289 219 v->arch.schedule_tail = arch_vmx_do_resume;
iap10@3290 220 }
iap10@3290 221
iap10@3290 222 /*
iap10@3290 223 * Initially set the same environement as host.
iap10@3290 224 */
iap10@3290 225 static inline int
kaf24@4683 226 construct_init_vmcs_guest(struct cpu_user_regs *regs,
kaf24@4683 227 struct vcpu_guest_context *ctxt,
iap10@3290 228 struct host_execution_env *host_env)
iap10@3290 229 {
iap10@3290 230 int error = 0;
iap10@3290 231 union vmcs_arbytes arbytes;
iap10@3290 232 unsigned long dr7;
iap10@3290 233 unsigned long eflags, shadow_cr;
iap10@3290 234
iap10@3290 235 /* MSR */
iap10@3290 236 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
iap10@3290 237 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
iap10@3290 238
iap10@3290 239 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
iap10@3290 240 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
iap10@3290 241 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
iap10@3290 242 /* interrupt */
iap10@3290 243 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
iap10@3290 244 /* mask */
kaf24@5658 245 error |= __vmwrite(CR0_GUEST_HOST_MASK, -1UL);
kaf24@5658 246 error |= __vmwrite(CR4_GUEST_HOST_MASK, -1UL);
iap10@3290 247
iap10@3290 248 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
iap10@3290 249 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
iap10@3290 250
iap10@3290 251 /* TSC */
iap10@3290 252 error |= __vmwrite(TSC_OFFSET, 0);
iap10@3290 253 error |= __vmwrite(CR3_TARGET_COUNT, 0);
iap10@3290 254
iap10@3290 255 /* Guest Selectors */
kaf24@4683 256 error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs);
kaf24@4683 257 error |= __vmwrite(GUEST_ES_SELECTOR, regs->es);
kaf24@4683 258 error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss);
kaf24@4683 259 error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds);
kaf24@4683 260 error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs);
kaf24@4683 261 error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs);
iap10@3290 262
iap10@3290 263 /* Guest segment Limits */
iap10@3290 264 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 265 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 266 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 267 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 268 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 269 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 270
iap10@3290 271 error |= __vmwrite(GUEST_IDTR_LIMIT, host_env->idtr_limit);
iap10@3290 272
iap10@3290 273 /* AR bytes */
iap10@3290 274 arbytes.bytes = 0;
iap10@3290 275 arbytes.fields.seg_type = 0x3; /* type = 3 */
iap10@3290 276 arbytes.fields.s = 1; /* code or data, i.e. not system */
iap10@3290 277 arbytes.fields.dpl = 0; /* DPL = 3 */
iap10@3290 278 arbytes.fields.p = 1; /* segment present */
iap10@3290 279 arbytes.fields.default_ops_size = 1; /* 32-bit */
iap10@3290 280 arbytes.fields.g = 1;
iap10@3290 281 arbytes.fields.null_bit = 0; /* not null */
iap10@3290 282
iap10@3290 283 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
iap10@3290 284 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
iap10@3290 285 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
iap10@3290 286 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
iap10@3290 287 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
iap10@3290 288
iap10@3290 289 arbytes.fields.seg_type = 0xb; /* type = 0xb */
iap10@3290 290 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
iap10@3290 291
kaf24@4683 292 error |= __vmwrite(GUEST_GDTR_BASE, regs->edx);
kaf24@4683 293 regs->edx = 0;
kaf24@4683 294 error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax);
kaf24@4683 295 regs->eax = 0;
iap10@3290 296
iap10@3290 297 arbytes.fields.s = 0; /* not code or data segement */
iap10@3290 298 arbytes.fields.seg_type = 0x2; /* LTD */
iap10@3290 299 arbytes.fields.default_ops_size = 0; /* 16-bit */
iap10@3290 300 arbytes.fields.g = 0;
iap10@3290 301 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
iap10@3290 302
iap10@3290 303 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
iap10@3290 304 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
iap10@3290 305
iap10@3290 306 error |= __vmwrite(GUEST_CR0, host_env->cr0); /* same CR0 */
iap10@3290 307
iap10@3290 308 /* Initally PG, PE are not set*/
iap10@3290 309 shadow_cr = host_env->cr0;
arun@5186 310 shadow_cr &= ~X86_CR0_PG;
iap10@3290 311 error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
kaf24@3755 312 /* CR3 is set in vmx_final_setup_guest */
kaf24@5658 313 #ifdef __x86_64__
kaf24@5658 314 error |= __vmwrite(GUEST_CR4, host_env->cr4 & ~X86_CR4_PAE);
kaf24@5658 315 printk("construct_init_vmcs_guest: guest CR4 is %lx\n", host_env->cr4 );
kaf24@5658 316 #else
iap10@3290 317 error |= __vmwrite(GUEST_CR4, host_env->cr4);
kaf24@5658 318 #endif
iap10@3290 319 shadow_cr = host_env->cr4;
kaf24@5658 320
kaf24@5658 321 #ifdef __x86_64__
kaf24@5658 322 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
kaf24@5658 323 #else
iap10@3290 324 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
kaf24@5658 325 #endif
iap10@3290 326 error |= __vmwrite(CR4_READ_SHADOW, shadow_cr);
iap10@3290 327
iap10@3290 328 error |= __vmwrite(GUEST_ES_BASE, host_env->ds_base);
iap10@3290 329 error |= __vmwrite(GUEST_CS_BASE, host_env->cs_base);
iap10@3290 330 error |= __vmwrite(GUEST_SS_BASE, host_env->ds_base);
iap10@3290 331 error |= __vmwrite(GUEST_DS_BASE, host_env->ds_base);
iap10@3290 332 error |= __vmwrite(GUEST_FS_BASE, host_env->ds_base);
iap10@3290 333 error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
iap10@3290 334 error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
iap10@3290 335
kaf24@5414 336 error |= __vmwrite(GUEST_RSP, regs->esp);
kaf24@5414 337 error |= __vmwrite(GUEST_RIP, regs->eip);
iap10@3290 338
kaf24@4683 339 eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
iap10@3290 340 eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
iap10@3290 341
kaf24@5414 342 error |= __vmwrite(GUEST_RFLAGS, eflags);
iap10@3290 343
iap10@3290 344 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
iap10@3290 345 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
iap10@3290 346 error |= __vmwrite(GUEST_DR7, dr7);
kaf24@5414 347 error |= __vmwrite(VMCS_LINK_POINTER, 0xffffffff);
kaf24@5414 348 error |= __vmwrite(VMCS_LINK_POINTER_HIGH, 0xffffffff);
iap10@3290 349
iap10@3290 350 return error;
iap10@3290 351 }
iap10@3290 352
iap10@3290 353 static inline int construct_vmcs_host(struct host_execution_env *host_env)
iap10@3290 354 {
iap10@3290 355 int error = 0;
iap10@3290 356 unsigned long crn;
iap10@3290 357 struct Xgt_desc_struct desc;
iap10@3290 358
iap10@3290 359 /* Host Selectors */
iap10@3290 360 host_env->ds_selector = __HYPERVISOR_DS;
iap10@3290 361 error |= __vmwrite(HOST_ES_SELECTOR, host_env->ds_selector);
iap10@3290 362 error |= __vmwrite(HOST_SS_SELECTOR, host_env->ds_selector);
iap10@3290 363 error |= __vmwrite(HOST_DS_SELECTOR, host_env->ds_selector);
kaf24@5658 364 #if defined (__i386__)
iap10@3290 365 error |= __vmwrite(HOST_FS_SELECTOR, host_env->ds_selector);
iap10@3290 366 error |= __vmwrite(HOST_GS_SELECTOR, host_env->ds_selector);
kaf24@5658 367 error |= __vmwrite(HOST_FS_BASE, host_env->ds_base);
kaf24@5658 368 error |= __vmwrite(HOST_GS_BASE, host_env->ds_base);
iap10@3290 369
kaf24@5658 370 #else
kaf24@5658 371 rdmsrl(MSR_FS_BASE, host_env->fs_base);
kaf24@5658 372 rdmsrl(MSR_GS_BASE, host_env->gs_base);
kaf24@5658 373 error |= __vmwrite(HOST_FS_BASE, host_env->fs_base);
kaf24@5658 374 error |= __vmwrite(HOST_GS_BASE, host_env->gs_base);
kaf24@5658 375
kaf24@5658 376 #endif
iap10@3290 377 host_env->cs_selector = __HYPERVISOR_CS;
iap10@3290 378 error |= __vmwrite(HOST_CS_SELECTOR, host_env->cs_selector);
iap10@3290 379
iap10@3290 380 host_env->ds_base = 0;
iap10@3290 381 host_env->cs_base = 0;
iap10@3290 382
iap10@3290 383 /* Debug */
arun@4585 384 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
iap10@3290 385 host_env->idtr_limit = desc.size;
iap10@3290 386 host_env->idtr_base = desc.address;
iap10@3290 387 error |= __vmwrite(HOST_IDTR_BASE, host_env->idtr_base);
iap10@3290 388
kaf24@5193 389 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
arun@4588 390
iap10@3290 391 host_env->cr0 = crn;
iap10@3290 392 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
iap10@3290 393
iap10@3290 394 /* CR3 is set in vmx_final_setup_hostos */
kaf24@5193 395 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
iap10@3290 396 host_env->cr4 = crn;
iap10@3290 397 error |= __vmwrite(HOST_CR4, crn);
kaf24@5414 398 error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
kaf24@5658 399 #ifdef __x86_64__
kaf24@5658 400 /* TBD: support cr8 for 64-bit guest */
kaf24@5658 401 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
kaf24@5658 402 __vmwrite(TPR_THRESHOLD, 0);
kaf24@5658 403 __vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
kaf24@5658 404 #endif
iap10@3290 405
iap10@3290 406 return error;
iap10@3290 407 }
iap10@3290 408
iap10@3290 409 /*
iap10@3290 410 * Need to extend to support full virtualization.
iap10@3290 411 * The variable use_host_env indicates if the new VMCS needs to use
iap10@3290 412 * the same setups as the host has (xenolinux).
iap10@3290 413 */
iap10@3290 414
iap10@3290 415 int construct_vmcs(struct arch_vmx_struct *arch_vmx,
kaf24@4683 416 struct cpu_user_regs *regs,
kaf24@4683 417 struct vcpu_guest_context *ctxt,
iap10@3290 418 int use_host_env)
iap10@3290 419 {
iap10@3290 420 int error;
iap10@3290 421 u64 vmcs_phys_ptr;
iap10@3290 422
iap10@3290 423 struct host_execution_env host_env;
iap10@3290 424
iap10@3290 425 if (use_host_env != VMCS_USE_HOST_ENV)
iap10@3290 426 return -EINVAL;
iap10@3290 427
iap10@3290 428 memset(&host_env, 0, sizeof(struct host_execution_env));
iap10@3290 429
iap10@3290 430 vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
iap10@3290 431
iap10@3290 432 if ((error = __vmpclear (vmcs_phys_ptr))) {
iap10@3290 433 printk("construct_vmcs: VMCLEAR failed\n");
iap10@3290 434 return -EINVAL;
iap10@3290 435 }
iap10@3290 436 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
iap10@3290 437 printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n",
iap10@3290 438 (unsigned long) vmcs_phys_ptr);
iap10@3290 439 return -EINVAL;
iap10@3290 440 }
iap10@3290 441 if ((error = construct_vmcs_controls())) {
iap10@3290 442 printk("construct_vmcs: construct_vmcs_controls failed\n");
iap10@3290 443 return -EINVAL;
iap10@3290 444 }
iap10@3290 445 /* host selectors */
iap10@3290 446 if ((error = construct_vmcs_host(&host_env))) {
iap10@3290 447 printk("construct_vmcs: construct_vmcs_host failed\n");
iap10@3290 448 return -EINVAL;
iap10@3290 449 }
iap10@3290 450 /* guest selectors */
kaf24@4683 451 if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) {
iap10@3290 452 printk("construct_vmcs: construct_vmcs_guest failed\n");
iap10@3290 453 return -EINVAL;
iap10@3290 454 }
iap10@3290 455
iap10@3290 456 if ((error |= __vmwrite(EXCEPTION_BITMAP,
iap10@3290 457 MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
iap10@3290 458 printk("construct_vmcs: setting Exception bitmap failed\n");
iap10@3290 459 return -EINVAL;
iap10@3290 460 }
iap10@3290 461
iap10@3290 462 return 0;
iap10@3290 463 }
iap10@3290 464
iap10@3290 465 int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 466 {
iap10@3290 467 int error;
iap10@3290 468
iap10@3290 469 if ((error = __vmptrld(phys_ptr))) {
iap10@3290 470 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 471 return error;
iap10@3290 472 }
iap10@3290 473 set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 474 return 0;
iap10@3290 475 }
iap10@3290 476
iap10@3290 477 int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 478 {
iap10@3290 479 /* take the current VMCS */
iap10@3290 480 __vmptrst(phys_ptr);
iap10@3290 481 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 482 return 0;
iap10@3290 483 }
iap10@3290 484
iap10@3290 485 void vm_launch_fail(unsigned long eflags)
iap10@3290 486 {
arun@5382 487 __vmx_bug(guest_cpu_user_regs());
iap10@3290 488 }
iap10@3290 489
iap10@3290 490 void vm_resume_fail(unsigned long eflags)
iap10@3290 491 {
arun@5382 492 __vmx_bug(guest_cpu_user_regs());
iap10@3290 493 }
iap10@3290 494
mafetter@3717 495 #endif /* CONFIG_VMX */
kaf24@3914 496
kaf24@3914 497 /*
kaf24@3914 498 * Local variables:
kaf24@3914 499 * mode: C
kaf24@3914 500 * c-set-style: "BSD"
kaf24@3914 501 * c-basic-offset: 4
kaf24@3914 502 * tab-width: 4
kaf24@3914 503 * indent-tabs-mode: nil
kaf24@3988 504 * End:
kaf24@3914 505 */