xen-vtx-unstable

annotate xen/arch/x86/vmx_vmcs.c @ 3748:31070c4d28c6

bitkeeper revision 1.1159.1.555 (42094892MsTPGiy_x_uFbwMVQuq4Qg)

Fix the synchronization issues between xend and the device model at
startup time.

Initialize the shared page in the hypervisor. Otherwise, the hypervisor
might try to inject spurious interrupts into the guest due to
uninitialized data.

Signed-off-by: Xin B Li <xin.b.li@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: ian@xensource.com
author iap10@labyrinth.cl.cam.ac.uk
date Tue Feb 08 23:17:38 2005 +0000 (2005-02-08)
parents f5f2757b3aa2
children 1494093616a3
rev   line source
kaf24@3677 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
iap10@3290 2 /*
iap10@3290 3 * vmx_vmcs.c: VMCS management
iap10@3290 4 * Copyright (c) 2004, Intel Corporation.
iap10@3290 5 *
iap10@3290 6 * This program is free software; you can redistribute it and/or modify it
iap10@3290 7 * under the terms and conditions of the GNU General Public License,
iap10@3290 8 * version 2, as published by the Free Software Foundation.
iap10@3290 9 *
iap10@3290 10 * This program is distributed in the hope it will be useful, but WITHOUT
iap10@3290 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iap10@3290 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
iap10@3290 13 * more details.
iap10@3290 14 *
iap10@3290 15 * You should have received a copy of the GNU General Public License along with
iap10@3290 16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
iap10@3290 17 * Place - Suite 330, Boston, MA 02111-1307 USA.
iap10@3290 18 *
iap10@3290 19 */
iap10@3290 20
iap10@3290 21 #include <xen/config.h>
iap10@3290 22 #include <xen/init.h>
iap10@3290 23 #include <xen/mm.h>
iap10@3290 24 #include <xen/lib.h>
iap10@3290 25 #include <xen/errno.h>
iap10@3290 26
iap10@3290 27 #include <asm/cpufeature.h>
iap10@3290 28 #include <asm/processor.h>
iap10@3290 29 #include <asm/msr.h>
iap10@3290 30 #include <asm/vmx.h>
iap10@3290 31 #include <xen/event.h>
iap10@3290 32 #include <xen/kernel.h>
iap10@3290 33 #include <public/io/ioreq.h>
iap10@3290 34 #include <asm/domain_page.h>
iap10@3290 35
mafetter@3717 36 #ifdef CONFIG_VMX
mafetter@3717 37
iap10@3290 38 struct vmcs_struct *alloc_vmcs(void)
iap10@3290 39 {
iap10@3290 40 struct vmcs_struct *vmcs;
iap10@3290 41 unsigned int cpu_sig = cpuid_eax(0x00000001);
iap10@3290 42
iap10@3290 43 vmcs = (struct vmcs_struct *) alloc_xenheap_pages(get_order(vmcs_size));
iap10@3290 44 memset((char *) vmcs, 0, vmcs_size); /* don't remove this */
iap10@3290 45
iap10@3290 46 vmcs->vmcs_revision_id = (cpu_sig > 0xf41)? 3 : 1;
iap10@3290 47 return vmcs;
iap10@3290 48 }
iap10@3290 49
iap10@3290 50 void free_vmcs(struct vmcs_struct *vmcs)
iap10@3290 51 {
iap10@3290 52 int order;
iap10@3290 53
iap10@3290 54 order = (vmcs_size >> PAGE_SHIFT) - 1;
iap10@3290 55 free_xenheap_pages((unsigned long) vmcs, order);
iap10@3290 56 }
iap10@3290 57
iap10@3290 58 static inline int construct_vmcs_controls(void)
iap10@3290 59 {
iap10@3290 60 int error = 0;
iap10@3290 61
iap10@3290 62 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
iap10@3290 63 MONITOR_PIN_BASED_EXEC_CONTROLS);
iap10@3290 64
iap10@3290 65 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
iap10@3290 66 MONITOR_CPU_BASED_EXEC_CONTROLS);
iap10@3290 67
iap10@3290 68 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
iap10@3290 69 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
iap10@3290 70
iap10@3290 71 return error;
iap10@3290 72 }
iap10@3290 73
iap10@3290 74 #define GUEST_SEGMENT_LIMIT 0xffffffff
iap10@3290 75 #define HOST_SEGMENT_LIMIT 0xffffffff
iap10@3290 76
iap10@3290 77 struct host_execution_env {
iap10@3290 78 /* selectors */
iap10@3290 79 unsigned short ldtr_selector;
iap10@3290 80 unsigned short tr_selector;
iap10@3290 81 unsigned short ds_selector;
iap10@3290 82 unsigned short cs_selector;
iap10@3290 83 /* limits */
iap10@3290 84 unsigned short gdtr_limit;
iap10@3290 85 unsigned short ldtr_limit;
iap10@3290 86 unsigned short idtr_limit;
iap10@3290 87 unsigned short tr_limit;
iap10@3290 88 /* base */
iap10@3290 89 unsigned long gdtr_base;
iap10@3290 90 unsigned long ldtr_base;
iap10@3290 91 unsigned long idtr_base;
iap10@3290 92 unsigned long tr_base;
iap10@3290 93 unsigned long ds_base;
iap10@3290 94 unsigned long cs_base;
iap10@3290 95 /* control registers */
iap10@3290 96 unsigned long cr3;
iap10@3290 97 unsigned long cr0;
iap10@3290 98 unsigned long cr4;
iap10@3290 99 unsigned long dr7;
iap10@3290 100 };
iap10@3290 101
iap10@3290 102 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
iap10@3290 103
iap10@3290 104 int vmx_setup_platform(struct exec_domain *d, execution_context_t *context)
iap10@3290 105 {
iap10@3290 106 int i;
iap10@3290 107 unsigned int n;
iap10@3290 108 unsigned long *p, mpfn, offset, addr;
iap10@3290 109 struct e820entry *e820p;
iap10@3290 110 unsigned long gpfn = 0;
iap10@3290 111
iap10@3290 112 context->ebx = 0; /* Linux expects ebx to be 0 for boot proc */
iap10@3290 113
iap10@3290 114 n = context->ecx;
iap10@3290 115 if (n > 32) {
iap10@3290 116 VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d\n", n);
iap10@3290 117 return -1;
iap10@3290 118 }
iap10@3290 119
iap10@3290 120 addr = context->edi;
iap10@3290 121 offset = (addr & ~PAGE_MASK);
iap10@3290 122 addr = round_pgdown(addr);
iap10@3707 123 mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
iap10@3290 124 p = map_domain_mem(mpfn << PAGE_SHIFT);
iap10@3290 125
iap10@3290 126 e820p = (struct e820entry *) ((unsigned long) p + offset);
iap10@3290 127
iap10@3290 128 for (i = 0; i < n; i++) {
iap10@3290 129 if (e820p[i].type == E820_SHARED_PAGE) {
iap10@3290 130 gpfn = (e820p[i].addr >> PAGE_SHIFT);
iap10@3290 131 break;
iap10@3290 132 }
iap10@3290 133 }
iap10@3290 134
iap10@3290 135 if (gpfn == 0) {
iap10@3708 136 printk("No shared Page ?\n");
iap10@3708 137 unmap_domain_mem(p);
iap10@3290 138 return -1;
iap10@3290 139 }
iap10@3290 140 unmap_domain_mem(p);
iap10@3290 141
iap10@3707 142 mpfn = phys_to_machine_mapping(gpfn);
iap10@3290 143 p = map_domain_mem(mpfn << PAGE_SHIFT);
iap10@3708 144 ASSERT(p != NULL);
iap10@3748 145
iap10@3748 146 /* Initialise shared page */
iap10@3748 147 memset(p, 0, PAGE_SIZE);
iap10@3748 148
kaf24@3677 149 d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p;
iap10@3290 150
iap10@3290 151 return 0;
iap10@3290 152 }
iap10@3290 153
iap10@3290 154 void vmx_do_launch(struct exec_domain *ed)
iap10@3290 155 {
iap10@3290 156 /* Update CR3, GDT, LDT, TR */
iap10@3290 157 unsigned int tr, cpu, error = 0;
iap10@3290 158 struct host_execution_env host_env;
iap10@3290 159 struct Xgt_desc_struct desc;
iap10@3290 160 struct list_head *list_ent;
iap10@3290 161 l2_pgentry_t *mpl2e, *guest_pl2e_cache;
iap10@3290 162 unsigned long i, pfn = 0;
iap10@3290 163 struct pfn_info *page;
iap10@3290 164 execution_context_t *ec = get_execution_context();
iap10@3290 165 struct domain *d = ed->domain;
iap10@3290 166
iap10@3290 167 cpu = smp_processor_id();
kaf24@3677 168 d->arch.min_pfn = d->arch.max_pfn = 0;
iap10@3290 169
iap10@3290 170 spin_lock(&d->page_alloc_lock);
iap10@3290 171 list_ent = d->page_list.next;
iap10@3290 172
kaf24@3677 173 mpl2e = (l2_pgentry_t *)map_domain_mem(pagetable_val(ed->arch.monitor_table));
iap10@3290 174
kaf24@3303 175 for ( i = 0; list_ent != &d->page_list; i++ )
kaf24@3303 176 {
iap10@3290 177 pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
kaf24@3677 178 d->arch.min_pfn = min(d->arch.min_pfn, pfn);
kaf24@3677 179 d->arch.max_pfn = max(d->arch.max_pfn, pfn);
iap10@3290 180 list_ent = frame_table[pfn].list.next;
iap10@3290 181 }
iap10@3290 182
iap10@3290 183 spin_unlock(&d->page_alloc_lock);
iap10@3290 184
iap10@3290 185 page = (struct pfn_info *) alloc_domheap_page(NULL);
iap10@3290 186 pfn = (unsigned long) (page - frame_table);
iap10@3290 187
iap10@3290 188 /*
iap10@3290 189 * make linear_pt_table work for guest ptes
iap10@3290 190 */
iap10@3290 191 mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3290 192 mk_l2_pgentry((pfn << PAGE_SHIFT)| __PAGE_HYPERVISOR);
iap10@3290 193
iap10@3290 194 guest_pl2e_cache = map_domain_mem(pfn << PAGE_SHIFT);
iap10@3290 195 memset(guest_pl2e_cache, 0, PAGE_SIZE); /* clean it up */
kaf24@3677 196 ed->arch.guest_pl2e_cache = guest_pl2e_cache;
iap10@3290 197
iap10@3290 198 unmap_domain_mem(mpl2e);
iap10@3290 199
iap10@3290 200 vmx_setup_platform(ed, ec);
iap10@3290 201
iap10@3290 202 __asm__ __volatile__ ("sgdt (%%eax) \n" :: "a"(&desc) : "memory");
iap10@3290 203 host_env.gdtr_limit = desc.size;
iap10@3290 204 host_env.gdtr_base = desc.address;
iap10@3290 205
iap10@3290 206 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
iap10@3290 207
iap10@3290 208 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
iap10@3290 209 error |= __vmwrite(GUEST_LDTR_BASE, 0);
iap10@3290 210 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
iap10@3290 211
iap10@3290 212 __asm__ __volatile__ ("str (%%eax) \n" :: "a"(&tr) : "memory");
iap10@3290 213 host_env.tr_selector = tr;
iap10@3290 214 host_env.tr_limit = sizeof(struct tss_struct);
iap10@3290 215 host_env.tr_base = (unsigned long) &init_tss[cpu];
iap10@3290 216
iap10@3290 217 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
iap10@3290 218 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
iap10@3290 219 error |= __vmwrite(GUEST_TR_BASE, 0);
iap10@3290 220 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
iap10@3290 221
kaf24@3677 222 ed->arch.shadow_table = ed->arch.pagetable;
kaf24@3677 223 __vmwrite(GUEST_CR3, pagetable_val(ed->arch.pagetable));
kaf24@3677 224 __vmwrite(HOST_CR3, pagetable_val(ed->arch.monitor_table));
iap10@3290 225 __vmwrite(HOST_ESP, (unsigned long) get_stack_top());
iap10@3290 226
kaf24@3677 227 ed->arch.schedule_tail = arch_vmx_do_resume;
iap10@3290 228 }
iap10@3290 229
iap10@3290 230 /*
iap10@3290 231 * Initially set the same environement as host.
iap10@3290 232 */
iap10@3290 233 static inline int
iap10@3290 234 construct_init_vmcs_guest(execution_context_t *context,
iap10@3290 235 full_execution_context_t *full_context,
iap10@3290 236 struct host_execution_env *host_env)
iap10@3290 237 {
iap10@3290 238 int error = 0;
iap10@3290 239 union vmcs_arbytes arbytes;
iap10@3290 240 unsigned long dr7;
iap10@3290 241 unsigned long eflags, shadow_cr;
iap10@3290 242
iap10@3290 243 /* MSR */
iap10@3290 244 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
iap10@3290 245 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
iap10@3290 246
iap10@3290 247 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
iap10@3290 248 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
iap10@3290 249 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
iap10@3290 250 /* interrupt */
iap10@3290 251 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
iap10@3290 252 /* mask */
iap10@3290 253 error |= __vmwrite(CR0_GUEST_HOST_MASK, 0xffffffff);
iap10@3290 254 error |= __vmwrite(CR4_GUEST_HOST_MASK, 0xffffffff);
iap10@3290 255
iap10@3290 256 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
iap10@3290 257 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
iap10@3290 258
iap10@3290 259 /* TSC */
iap10@3290 260 error |= __vmwrite(TSC_OFFSET, 0);
iap10@3290 261 error |= __vmwrite(CR3_TARGET_COUNT, 0);
iap10@3290 262
iap10@3290 263 /* Guest Selectors */
iap10@3290 264 error |= __vmwrite(GUEST_CS_SELECTOR, context->cs);
iap10@3290 265 error |= __vmwrite(GUEST_ES_SELECTOR, context->es);
iap10@3290 266 error |= __vmwrite(GUEST_SS_SELECTOR, context->ss);
iap10@3290 267 error |= __vmwrite(GUEST_DS_SELECTOR, context->ds);
iap10@3290 268 error |= __vmwrite(GUEST_FS_SELECTOR, context->fs);
iap10@3290 269 error |= __vmwrite(GUEST_GS_SELECTOR, context->gs);
iap10@3290 270
iap10@3290 271 /* Guest segment Limits */
iap10@3290 272 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 273 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 274 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 275 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 276 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 277 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 278
iap10@3290 279 error |= __vmwrite(GUEST_IDTR_LIMIT, host_env->idtr_limit);
iap10@3290 280
iap10@3290 281 /* AR bytes */
iap10@3290 282 arbytes.bytes = 0;
iap10@3290 283 arbytes.fields.seg_type = 0x3; /* type = 3 */
iap10@3290 284 arbytes.fields.s = 1; /* code or data, i.e. not system */
iap10@3290 285 arbytes.fields.dpl = 0; /* DPL = 3 */
iap10@3290 286 arbytes.fields.p = 1; /* segment present */
iap10@3290 287 arbytes.fields.default_ops_size = 1; /* 32-bit */
iap10@3290 288 arbytes.fields.g = 1;
iap10@3290 289 arbytes.fields.null_bit = 0; /* not null */
iap10@3290 290
iap10@3290 291 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
iap10@3290 292 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
iap10@3290 293 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
iap10@3290 294 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
iap10@3290 295 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
iap10@3290 296
iap10@3290 297 arbytes.fields.seg_type = 0xb; /* type = 0xb */
iap10@3290 298 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
iap10@3290 299
iap10@3290 300 error |= __vmwrite(GUEST_GDTR_BASE, context->edx);
iap10@3290 301 context->edx = 0;
iap10@3290 302 error |= __vmwrite(GUEST_GDTR_LIMIT, context->eax);
iap10@3290 303 context->eax = 0;
iap10@3290 304
iap10@3290 305 arbytes.fields.s = 0; /* not code or data segement */
iap10@3290 306 arbytes.fields.seg_type = 0x2; /* LTD */
iap10@3290 307 arbytes.fields.default_ops_size = 0; /* 16-bit */
iap10@3290 308 arbytes.fields.g = 0;
iap10@3290 309 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
iap10@3290 310
iap10@3290 311 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
iap10@3290 312 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
iap10@3290 313
iap10@3290 314 error |= __vmwrite(GUEST_CR0, host_env->cr0); /* same CR0 */
iap10@3290 315
iap10@3290 316 /* Initally PG, PE are not set*/
iap10@3290 317 shadow_cr = host_env->cr0;
iap10@3290 318 shadow_cr &= ~(X86_CR0_PE | X86_CR0_PG);
iap10@3290 319 error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
iap10@3290 320 /* CR3 is set in vmx_final_setup_guestos */
iap10@3290 321 error |= __vmwrite(GUEST_CR4, host_env->cr4);
iap10@3290 322 shadow_cr = host_env->cr4;
iap10@3290 323 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
iap10@3290 324 error |= __vmwrite(CR4_READ_SHADOW, shadow_cr);
iap10@3290 325
iap10@3290 326 error |= __vmwrite(GUEST_ES_BASE, host_env->ds_base);
iap10@3290 327 error |= __vmwrite(GUEST_CS_BASE, host_env->cs_base);
iap10@3290 328 error |= __vmwrite(GUEST_SS_BASE, host_env->ds_base);
iap10@3290 329 error |= __vmwrite(GUEST_DS_BASE, host_env->ds_base);
iap10@3290 330 error |= __vmwrite(GUEST_FS_BASE, host_env->ds_base);
iap10@3290 331 error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
iap10@3290 332 error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
iap10@3290 333
iap10@3290 334 error |= __vmwrite(GUEST_ESP, context->esp);
iap10@3290 335 error |= __vmwrite(GUEST_EIP, context->eip);
iap10@3290 336
iap10@3290 337 eflags = context->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
iap10@3290 338 eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
iap10@3290 339
iap10@3290 340 error |= __vmwrite(GUEST_EFLAGS, eflags);
iap10@3290 341
iap10@3290 342 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
iap10@3290 343 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
iap10@3290 344 error |= __vmwrite(GUEST_DR7, dr7);
iap10@3290 345 error |= __vmwrite(GUEST_VMCS0, 0xffffffff);
iap10@3290 346 error |= __vmwrite(GUEST_VMCS1, 0xffffffff);
iap10@3290 347
iap10@3290 348 return error;
iap10@3290 349 }
iap10@3290 350
iap10@3290 351 static inline int construct_vmcs_host(struct host_execution_env *host_env)
iap10@3290 352 {
iap10@3290 353 int error = 0;
iap10@3290 354 unsigned long crn;
iap10@3290 355 struct Xgt_desc_struct desc;
iap10@3290 356
iap10@3290 357 /* Host Selectors */
iap10@3290 358 host_env->ds_selector = __HYPERVISOR_DS;
iap10@3290 359 error |= __vmwrite(HOST_ES_SELECTOR, host_env->ds_selector);
iap10@3290 360 error |= __vmwrite(HOST_SS_SELECTOR, host_env->ds_selector);
iap10@3290 361 error |= __vmwrite(HOST_DS_SELECTOR, host_env->ds_selector);
iap10@3290 362 error |= __vmwrite(HOST_FS_SELECTOR, host_env->ds_selector);
iap10@3290 363 error |= __vmwrite(HOST_GS_SELECTOR, host_env->ds_selector);
iap10@3290 364
iap10@3290 365 host_env->cs_selector = __HYPERVISOR_CS;
iap10@3290 366 error |= __vmwrite(HOST_CS_SELECTOR, host_env->cs_selector);
iap10@3290 367
iap10@3290 368 host_env->ds_base = 0;
iap10@3290 369 host_env->cs_base = 0;
iap10@3290 370 error |= __vmwrite(HOST_FS_BASE, host_env->ds_base);
iap10@3290 371 error |= __vmwrite(HOST_GS_BASE, host_env->ds_base);
iap10@3290 372
iap10@3290 373 /* Debug */
iap10@3290 374 __asm__ __volatile__ ("sidt (%%eax) \n" :: "a"(&desc) : "memory");
iap10@3290 375 host_env->idtr_limit = desc.size;
iap10@3290 376 host_env->idtr_base = desc.address;
iap10@3290 377 error |= __vmwrite(HOST_IDTR_BASE, host_env->idtr_base);
iap10@3290 378
iap10@3290 379 __asm__ __volatile__ ("movl %%cr0,%0" : "=r" (crn) : );
iap10@3290 380 host_env->cr0 = crn;
iap10@3290 381 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
iap10@3290 382
iap10@3290 383 /* CR3 is set in vmx_final_setup_hostos */
iap10@3290 384 __asm__ __volatile__ ("movl %%cr4,%0" : "=r" (crn) : );
iap10@3290 385 host_env->cr4 = crn;
iap10@3290 386 error |= __vmwrite(HOST_CR4, crn);
iap10@3290 387 error |= __vmwrite(HOST_EIP, (unsigned long) vmx_asm_vmexit_handler);
iap10@3290 388
iap10@3290 389 return error;
iap10@3290 390 }
iap10@3290 391
iap10@3290 392 /*
iap10@3290 393 * Need to extend to support full virtualization.
iap10@3290 394 * The variable use_host_env indicates if the new VMCS needs to use
iap10@3290 395 * the same setups as the host has (xenolinux).
iap10@3290 396 */
iap10@3290 397
iap10@3290 398 int construct_vmcs(struct arch_vmx_struct *arch_vmx,
iap10@3290 399 execution_context_t *context,
iap10@3290 400 full_execution_context_t *full_context,
iap10@3290 401 int use_host_env)
iap10@3290 402 {
iap10@3290 403 int error;
iap10@3290 404 u64 vmcs_phys_ptr;
iap10@3290 405
iap10@3290 406 struct host_execution_env host_env;
iap10@3290 407
iap10@3290 408 if (use_host_env != VMCS_USE_HOST_ENV)
iap10@3290 409 return -EINVAL;
iap10@3290 410
iap10@3290 411 memset(&host_env, 0, sizeof(struct host_execution_env));
iap10@3290 412
iap10@3290 413 vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
iap10@3290 414
iap10@3290 415 if ((error = __vmpclear (vmcs_phys_ptr))) {
iap10@3290 416 printk("construct_vmcs: VMCLEAR failed\n");
iap10@3290 417 return -EINVAL;
iap10@3290 418 }
iap10@3290 419 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
iap10@3290 420 printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n",
iap10@3290 421 (unsigned long) vmcs_phys_ptr);
iap10@3290 422 return -EINVAL;
iap10@3290 423 }
iap10@3290 424 if ((error = construct_vmcs_controls())) {
iap10@3290 425 printk("construct_vmcs: construct_vmcs_controls failed\n");
iap10@3290 426 return -EINVAL;
iap10@3290 427 }
iap10@3290 428 /* host selectors */
iap10@3290 429 if ((error = construct_vmcs_host(&host_env))) {
iap10@3290 430 printk("construct_vmcs: construct_vmcs_host failed\n");
iap10@3290 431 return -EINVAL;
iap10@3290 432 }
iap10@3290 433 /* guest selectors */
iap10@3290 434 if ((error = construct_init_vmcs_guest(context, full_context, &host_env))) {
iap10@3290 435 printk("construct_vmcs: construct_vmcs_guest failed\n");
iap10@3290 436 return -EINVAL;
iap10@3290 437 }
iap10@3290 438
iap10@3290 439 if ((error |= __vmwrite(EXCEPTION_BITMAP,
iap10@3290 440 MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
iap10@3290 441 printk("construct_vmcs: setting Exception bitmap failed\n");
iap10@3290 442 return -EINVAL;
iap10@3290 443 }
iap10@3290 444
iap10@3290 445 return 0;
iap10@3290 446 }
iap10@3290 447
iap10@3290 448 int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 449 {
iap10@3290 450 int error;
iap10@3290 451
iap10@3290 452 if ((error = __vmptrld(phys_ptr))) {
iap10@3290 453 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 454 return error;
iap10@3290 455 }
iap10@3290 456 set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 457 return 0;
iap10@3290 458 }
iap10@3290 459
iap10@3290 460 int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 461 {
iap10@3290 462 /* take the current VMCS */
iap10@3290 463 __vmptrst(phys_ptr);
iap10@3290 464 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 465 return 0;
iap10@3290 466 }
iap10@3290 467
iap10@3290 468 void vm_launch_fail(unsigned long eflags)
iap10@3290 469 {
iap10@3290 470 BUG();
iap10@3290 471 }
iap10@3290 472
iap10@3290 473 void vm_resume_fail(unsigned long eflags)
iap10@3290 474 {
iap10@3290 475 BUG();
iap10@3290 476 }
iap10@3290 477
mafetter@3717 478 #endif /* CONFIG_VMX */