xen-vtx-unstable

annotate xen/arch/x86/vmx_vmcs.c @ 3717:ea98f0bb6510

bitkeeper revision 1.1159.212.127 (4208b02bTdSR4AVYRg8diDkKZmIVUg)

General shadow code cleanup.

Fixed compilation problems when SHADOW_DEBUG is enabled.
Fixed compilation problems when CONFIG_VMX is undefined.

Simplified l1pte_write_fault and l1pte_read_fault.
Name change: spfn => smfn (shadow machine frame numbers).

In general, the terms pfn and gpfn now refer to pages in the
guest's idea of physical frames (which diffs for full shadow
guests). mfn always refers to a machine frame number.

One bug fix for check_pagetable():
If we're using writable page tables
along with shadow mode, don't check the currently writable page table
page -- check its snapshot instead.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Tue Feb 08 12:27:23 2005 +0000 (2005-02-08)
parents 9e80fc0dcac5
children f5f2757b3aa2
rev   line source
kaf24@3677 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
iap10@3290 2 /*
iap10@3290 3 * vmx_vmcs.c: VMCS management
iap10@3290 4 * Copyright (c) 2004, Intel Corporation.
iap10@3290 5 *
iap10@3290 6 * This program is free software; you can redistribute it and/or modify it
iap10@3290 7 * under the terms and conditions of the GNU General Public License,
iap10@3290 8 * version 2, as published by the Free Software Foundation.
iap10@3290 9 *
iap10@3290 10 * This program is distributed in the hope it will be useful, but WITHOUT
iap10@3290 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iap10@3290 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
iap10@3290 13 * more details.
iap10@3290 14 *
iap10@3290 15 * You should have received a copy of the GNU General Public License along with
iap10@3290 16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
iap10@3290 17 * Place - Suite 330, Boston, MA 02111-1307 USA.
iap10@3290 18 *
iap10@3290 19 */
iap10@3290 20
iap10@3290 21 #include <xen/config.h>
iap10@3290 22 #include <xen/init.h>
iap10@3290 23 #include <xen/mm.h>
iap10@3290 24 #include <xen/lib.h>
iap10@3290 25 #include <xen/errno.h>
iap10@3290 26
iap10@3290 27 #include <asm/cpufeature.h>
iap10@3290 28 #include <asm/processor.h>
iap10@3290 29 #include <asm/msr.h>
iap10@3290 30 #include <asm/vmx.h>
iap10@3290 31 #include <xen/event.h>
iap10@3290 32 #include <xen/kernel.h>
iap10@3290 33 #include <public/io/ioreq.h>
iap10@3290 34 #include <asm/domain_page.h>
iap10@3290 35
mafetter@3717 36 #ifdef CONFIG_VMX
mafetter@3717 37
iap10@3290 38 struct vmcs_struct *alloc_vmcs(void)
iap10@3290 39 {
iap10@3290 40 struct vmcs_struct *vmcs;
iap10@3290 41 unsigned int cpu_sig = cpuid_eax(0x00000001);
iap10@3290 42
iap10@3290 43 vmcs = (struct vmcs_struct *) alloc_xenheap_pages(get_order(vmcs_size));
iap10@3290 44 memset((char *) vmcs, 0, vmcs_size); /* don't remove this */
iap10@3290 45
iap10@3290 46 vmcs->vmcs_revision_id = (cpu_sig > 0xf41)? 3 : 1;
iap10@3290 47 return vmcs;
iap10@3290 48 }
iap10@3290 49
iap10@3290 50 void free_vmcs(struct vmcs_struct *vmcs)
iap10@3290 51 {
iap10@3290 52 int order;
iap10@3290 53
iap10@3290 54 order = (vmcs_size >> PAGE_SHIFT) - 1;
iap10@3290 55 free_xenheap_pages((unsigned long) vmcs, order);
iap10@3290 56 }
iap10@3290 57
iap10@3290 58 static inline int construct_vmcs_controls(void)
iap10@3290 59 {
iap10@3290 60 int error = 0;
iap10@3290 61
iap10@3290 62 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
iap10@3290 63 MONITOR_PIN_BASED_EXEC_CONTROLS);
iap10@3290 64
iap10@3290 65 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
iap10@3290 66 MONITOR_CPU_BASED_EXEC_CONTROLS);
iap10@3290 67
iap10@3290 68 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
iap10@3290 69 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
iap10@3290 70
iap10@3290 71 return error;
iap10@3290 72 }
iap10@3290 73
iap10@3290 74 #define GUEST_SEGMENT_LIMIT 0xffffffff
iap10@3290 75 #define HOST_SEGMENT_LIMIT 0xffffffff
iap10@3290 76
iap10@3290 77 struct host_execution_env {
iap10@3290 78 /* selectors */
iap10@3290 79 unsigned short ldtr_selector;
iap10@3290 80 unsigned short tr_selector;
iap10@3290 81 unsigned short ds_selector;
iap10@3290 82 unsigned short cs_selector;
iap10@3290 83 /* limits */
iap10@3290 84 unsigned short gdtr_limit;
iap10@3290 85 unsigned short ldtr_limit;
iap10@3290 86 unsigned short idtr_limit;
iap10@3290 87 unsigned short tr_limit;
iap10@3290 88 /* base */
iap10@3290 89 unsigned long gdtr_base;
iap10@3290 90 unsigned long ldtr_base;
iap10@3290 91 unsigned long idtr_base;
iap10@3290 92 unsigned long tr_base;
iap10@3290 93 unsigned long ds_base;
iap10@3290 94 unsigned long cs_base;
iap10@3290 95 /* control registers */
iap10@3290 96 unsigned long cr3;
iap10@3290 97 unsigned long cr0;
iap10@3290 98 unsigned long cr4;
iap10@3290 99 unsigned long dr7;
iap10@3290 100 };
iap10@3290 101
iap10@3290 102 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
iap10@3290 103
iap10@3290 104 int vmx_setup_platform(struct exec_domain *d, execution_context_t *context)
iap10@3290 105 {
iap10@3290 106 int i;
iap10@3290 107 unsigned int n;
iap10@3290 108 unsigned long *p, mpfn, offset, addr;
iap10@3290 109 struct e820entry *e820p;
iap10@3290 110 unsigned long gpfn = 0;
iap10@3290 111
iap10@3290 112 context->ebx = 0; /* Linux expects ebx to be 0 for boot proc */
iap10@3290 113
iap10@3290 114 n = context->ecx;
iap10@3290 115 if (n > 32) {
iap10@3290 116 VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d\n", n);
iap10@3290 117 return -1;
iap10@3290 118 }
iap10@3290 119
iap10@3290 120 addr = context->edi;
iap10@3290 121 offset = (addr & ~PAGE_MASK);
iap10@3290 122 addr = round_pgdown(addr);
iap10@3707 123 mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
iap10@3290 124 p = map_domain_mem(mpfn << PAGE_SHIFT);
iap10@3290 125
iap10@3290 126 e820p = (struct e820entry *) ((unsigned long) p + offset);
iap10@3290 127
iap10@3290 128 for (i = 0; i < n; i++) {
iap10@3290 129 if (e820p[i].type == E820_SHARED_PAGE) {
iap10@3290 130 gpfn = (e820p[i].addr >> PAGE_SHIFT);
iap10@3290 131 break;
iap10@3290 132 }
iap10@3290 133 }
iap10@3290 134
iap10@3290 135 if (gpfn == 0) {
iap10@3708 136 printk("No shared Page ?\n");
iap10@3708 137 unmap_domain_mem(p);
iap10@3290 138 return -1;
iap10@3290 139 }
iap10@3290 140 unmap_domain_mem(p);
iap10@3290 141
iap10@3707 142 mpfn = phys_to_machine_mapping(gpfn);
iap10@3290 143 p = map_domain_mem(mpfn << PAGE_SHIFT);
iap10@3708 144 ASSERT(p != NULL);
kaf24@3677 145 d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p;
iap10@3290 146
iap10@3290 147 return 0;
iap10@3290 148 }
iap10@3290 149
iap10@3290 150 void vmx_do_launch(struct exec_domain *ed)
iap10@3290 151 {
iap10@3290 152 /* Update CR3, GDT, LDT, TR */
iap10@3290 153 unsigned int tr, cpu, error = 0;
iap10@3290 154 struct host_execution_env host_env;
iap10@3290 155 struct Xgt_desc_struct desc;
iap10@3290 156 struct list_head *list_ent;
iap10@3290 157 l2_pgentry_t *mpl2e, *guest_pl2e_cache;
iap10@3290 158 unsigned long i, pfn = 0;
iap10@3290 159 struct pfn_info *page;
iap10@3290 160 execution_context_t *ec = get_execution_context();
iap10@3290 161 struct domain *d = ed->domain;
iap10@3290 162
iap10@3290 163 cpu = smp_processor_id();
kaf24@3677 164 d->arch.min_pfn = d->arch.max_pfn = 0;
iap10@3290 165
iap10@3290 166 spin_lock(&d->page_alloc_lock);
iap10@3290 167 list_ent = d->page_list.next;
iap10@3290 168
kaf24@3677 169 mpl2e = (l2_pgentry_t *)map_domain_mem(pagetable_val(ed->arch.monitor_table));
iap10@3290 170
kaf24@3303 171 for ( i = 0; list_ent != &d->page_list; i++ )
kaf24@3303 172 {
iap10@3290 173 pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
kaf24@3677 174 d->arch.min_pfn = min(d->arch.min_pfn, pfn);
kaf24@3677 175 d->arch.max_pfn = max(d->arch.max_pfn, pfn);
iap10@3290 176 list_ent = frame_table[pfn].list.next;
iap10@3290 177 }
iap10@3290 178
iap10@3290 179 spin_unlock(&d->page_alloc_lock);
iap10@3290 180
iap10@3290 181 page = (struct pfn_info *) alloc_domheap_page(NULL);
iap10@3290 182 pfn = (unsigned long) (page - frame_table);
iap10@3290 183
iap10@3290 184 /*
iap10@3290 185 * make linear_pt_table work for guest ptes
iap10@3290 186 */
iap10@3290 187 mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3290 188 mk_l2_pgentry((pfn << PAGE_SHIFT)| __PAGE_HYPERVISOR);
iap10@3290 189
iap10@3290 190 guest_pl2e_cache = map_domain_mem(pfn << PAGE_SHIFT);
iap10@3290 191 memset(guest_pl2e_cache, 0, PAGE_SIZE); /* clean it up */
kaf24@3677 192 ed->arch.guest_pl2e_cache = guest_pl2e_cache;
iap10@3290 193
iap10@3290 194 unmap_domain_mem(mpl2e);
iap10@3290 195
iap10@3290 196 vmx_setup_platform(ed, ec);
iap10@3290 197
iap10@3290 198 __asm__ __volatile__ ("sgdt (%%eax) \n" :: "a"(&desc) : "memory");
iap10@3290 199 host_env.gdtr_limit = desc.size;
iap10@3290 200 host_env.gdtr_base = desc.address;
iap10@3290 201
iap10@3290 202 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
iap10@3290 203
iap10@3290 204 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
iap10@3290 205 error |= __vmwrite(GUEST_LDTR_BASE, 0);
iap10@3290 206 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
iap10@3290 207
iap10@3290 208 __asm__ __volatile__ ("str (%%eax) \n" :: "a"(&tr) : "memory");
iap10@3290 209 host_env.tr_selector = tr;
iap10@3290 210 host_env.tr_limit = sizeof(struct tss_struct);
iap10@3290 211 host_env.tr_base = (unsigned long) &init_tss[cpu];
iap10@3290 212
iap10@3290 213 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
iap10@3290 214 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
iap10@3290 215 error |= __vmwrite(GUEST_TR_BASE, 0);
iap10@3290 216 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
iap10@3290 217
kaf24@3677 218 ed->arch.shadow_table = ed->arch.pagetable;
kaf24@3677 219 __vmwrite(GUEST_CR3, pagetable_val(ed->arch.pagetable));
kaf24@3677 220 __vmwrite(HOST_CR3, pagetable_val(ed->arch.monitor_table));
iap10@3290 221 __vmwrite(HOST_ESP, (unsigned long) get_stack_top());
iap10@3290 222
kaf24@3677 223 ed->arch.schedule_tail = arch_vmx_do_resume;
iap10@3290 224 }
iap10@3290 225
iap10@3290 226 /*
iap10@3290 227 * Initially set the same environement as host.
iap10@3290 228 */
iap10@3290 229 static inline int
iap10@3290 230 construct_init_vmcs_guest(execution_context_t *context,
iap10@3290 231 full_execution_context_t *full_context,
iap10@3290 232 struct host_execution_env *host_env)
iap10@3290 233 {
iap10@3290 234 int error = 0;
iap10@3290 235 union vmcs_arbytes arbytes;
iap10@3290 236 unsigned long dr7;
iap10@3290 237 unsigned long eflags, shadow_cr;
iap10@3290 238
iap10@3290 239 /* MSR */
iap10@3290 240 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
iap10@3290 241 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
iap10@3290 242
iap10@3290 243 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
iap10@3290 244 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
iap10@3290 245 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
iap10@3290 246 /* interrupt */
iap10@3290 247 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
iap10@3290 248 /* mask */
iap10@3290 249 error |= __vmwrite(CR0_GUEST_HOST_MASK, 0xffffffff);
iap10@3290 250 error |= __vmwrite(CR4_GUEST_HOST_MASK, 0xffffffff);
iap10@3290 251
iap10@3290 252 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
iap10@3290 253 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
iap10@3290 254
iap10@3290 255 /* TSC */
iap10@3290 256 error |= __vmwrite(TSC_OFFSET, 0);
iap10@3290 257 error |= __vmwrite(CR3_TARGET_COUNT, 0);
iap10@3290 258
iap10@3290 259 /* Guest Selectors */
iap10@3290 260 error |= __vmwrite(GUEST_CS_SELECTOR, context->cs);
iap10@3290 261 error |= __vmwrite(GUEST_ES_SELECTOR, context->es);
iap10@3290 262 error |= __vmwrite(GUEST_SS_SELECTOR, context->ss);
iap10@3290 263 error |= __vmwrite(GUEST_DS_SELECTOR, context->ds);
iap10@3290 264 error |= __vmwrite(GUEST_FS_SELECTOR, context->fs);
iap10@3290 265 error |= __vmwrite(GUEST_GS_SELECTOR, context->gs);
iap10@3290 266
iap10@3290 267 /* Guest segment Limits */
iap10@3290 268 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 269 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 270 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 271 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 272 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 273 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
iap10@3290 274
iap10@3290 275 error |= __vmwrite(GUEST_IDTR_LIMIT, host_env->idtr_limit);
iap10@3290 276
iap10@3290 277 /* AR bytes */
iap10@3290 278 arbytes.bytes = 0;
iap10@3290 279 arbytes.fields.seg_type = 0x3; /* type = 3 */
iap10@3290 280 arbytes.fields.s = 1; /* code or data, i.e. not system */
iap10@3290 281 arbytes.fields.dpl = 0; /* DPL = 3 */
iap10@3290 282 arbytes.fields.p = 1; /* segment present */
iap10@3290 283 arbytes.fields.default_ops_size = 1; /* 32-bit */
iap10@3290 284 arbytes.fields.g = 1;
iap10@3290 285 arbytes.fields.null_bit = 0; /* not null */
iap10@3290 286
iap10@3290 287 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
iap10@3290 288 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
iap10@3290 289 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
iap10@3290 290 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
iap10@3290 291 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
iap10@3290 292
iap10@3290 293 arbytes.fields.seg_type = 0xb; /* type = 0xb */
iap10@3290 294 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
iap10@3290 295
iap10@3290 296 error |= __vmwrite(GUEST_GDTR_BASE, context->edx);
iap10@3290 297 context->edx = 0;
iap10@3290 298 error |= __vmwrite(GUEST_GDTR_LIMIT, context->eax);
iap10@3290 299 context->eax = 0;
iap10@3290 300
iap10@3290 301 arbytes.fields.s = 0; /* not code or data segement */
iap10@3290 302 arbytes.fields.seg_type = 0x2; /* LTD */
iap10@3290 303 arbytes.fields.default_ops_size = 0; /* 16-bit */
iap10@3290 304 arbytes.fields.g = 0;
iap10@3290 305 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
iap10@3290 306
iap10@3290 307 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
iap10@3290 308 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
iap10@3290 309
iap10@3290 310 error |= __vmwrite(GUEST_CR0, host_env->cr0); /* same CR0 */
iap10@3290 311
iap10@3290 312 /* Initally PG, PE are not set*/
iap10@3290 313 shadow_cr = host_env->cr0;
iap10@3290 314 shadow_cr &= ~(X86_CR0_PE | X86_CR0_PG);
iap10@3290 315 error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
iap10@3290 316 /* CR3 is set in vmx_final_setup_guestos */
iap10@3290 317 error |= __vmwrite(GUEST_CR4, host_env->cr4);
iap10@3290 318 shadow_cr = host_env->cr4;
iap10@3290 319 shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
iap10@3290 320 error |= __vmwrite(CR4_READ_SHADOW, shadow_cr);
iap10@3290 321
iap10@3290 322 error |= __vmwrite(GUEST_ES_BASE, host_env->ds_base);
iap10@3290 323 error |= __vmwrite(GUEST_CS_BASE, host_env->cs_base);
iap10@3290 324 error |= __vmwrite(GUEST_SS_BASE, host_env->ds_base);
iap10@3290 325 error |= __vmwrite(GUEST_DS_BASE, host_env->ds_base);
iap10@3290 326 error |= __vmwrite(GUEST_FS_BASE, host_env->ds_base);
iap10@3290 327 error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
iap10@3290 328 error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
iap10@3290 329
iap10@3290 330 error |= __vmwrite(GUEST_ESP, context->esp);
iap10@3290 331 error |= __vmwrite(GUEST_EIP, context->eip);
iap10@3290 332
iap10@3290 333 eflags = context->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
iap10@3290 334 eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
iap10@3290 335
iap10@3290 336 error |= __vmwrite(GUEST_EFLAGS, eflags);
iap10@3290 337
iap10@3290 338 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
iap10@3290 339 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
iap10@3290 340 error |= __vmwrite(GUEST_DR7, dr7);
iap10@3290 341 error |= __vmwrite(GUEST_VMCS0, 0xffffffff);
iap10@3290 342 error |= __vmwrite(GUEST_VMCS1, 0xffffffff);
iap10@3290 343
iap10@3290 344 return error;
iap10@3290 345 }
iap10@3290 346
iap10@3290 347 static inline int construct_vmcs_host(struct host_execution_env *host_env)
iap10@3290 348 {
iap10@3290 349 int error = 0;
iap10@3290 350 unsigned long crn;
iap10@3290 351 struct Xgt_desc_struct desc;
iap10@3290 352
iap10@3290 353 /* Host Selectors */
iap10@3290 354 host_env->ds_selector = __HYPERVISOR_DS;
iap10@3290 355 error |= __vmwrite(HOST_ES_SELECTOR, host_env->ds_selector);
iap10@3290 356 error |= __vmwrite(HOST_SS_SELECTOR, host_env->ds_selector);
iap10@3290 357 error |= __vmwrite(HOST_DS_SELECTOR, host_env->ds_selector);
iap10@3290 358 error |= __vmwrite(HOST_FS_SELECTOR, host_env->ds_selector);
iap10@3290 359 error |= __vmwrite(HOST_GS_SELECTOR, host_env->ds_selector);
iap10@3290 360
iap10@3290 361 host_env->cs_selector = __HYPERVISOR_CS;
iap10@3290 362 error |= __vmwrite(HOST_CS_SELECTOR, host_env->cs_selector);
iap10@3290 363
iap10@3290 364 host_env->ds_base = 0;
iap10@3290 365 host_env->cs_base = 0;
iap10@3290 366 error |= __vmwrite(HOST_FS_BASE, host_env->ds_base);
iap10@3290 367 error |= __vmwrite(HOST_GS_BASE, host_env->ds_base);
iap10@3290 368
iap10@3290 369 /* Debug */
iap10@3290 370 __asm__ __volatile__ ("sidt (%%eax) \n" :: "a"(&desc) : "memory");
iap10@3290 371 host_env->idtr_limit = desc.size;
iap10@3290 372 host_env->idtr_base = desc.address;
iap10@3290 373 error |= __vmwrite(HOST_IDTR_BASE, host_env->idtr_base);
iap10@3290 374
iap10@3290 375 __asm__ __volatile__ ("movl %%cr0,%0" : "=r" (crn) : );
iap10@3290 376 host_env->cr0 = crn;
iap10@3290 377 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
iap10@3290 378
iap10@3290 379 /* CR3 is set in vmx_final_setup_hostos */
iap10@3290 380 __asm__ __volatile__ ("movl %%cr4,%0" : "=r" (crn) : );
iap10@3290 381 host_env->cr4 = crn;
iap10@3290 382 error |= __vmwrite(HOST_CR4, crn);
iap10@3290 383 error |= __vmwrite(HOST_EIP, (unsigned long) vmx_asm_vmexit_handler);
iap10@3290 384
iap10@3290 385 return error;
iap10@3290 386 }
iap10@3290 387
iap10@3290 388 /*
iap10@3290 389 * Need to extend to support full virtualization.
iap10@3290 390 * The variable use_host_env indicates if the new VMCS needs to use
iap10@3290 391 * the same setups as the host has (xenolinux).
iap10@3290 392 */
iap10@3290 393
iap10@3290 394 int construct_vmcs(struct arch_vmx_struct *arch_vmx,
iap10@3290 395 execution_context_t *context,
iap10@3290 396 full_execution_context_t *full_context,
iap10@3290 397 int use_host_env)
iap10@3290 398 {
iap10@3290 399 int error;
iap10@3290 400 u64 vmcs_phys_ptr;
iap10@3290 401
iap10@3290 402 struct host_execution_env host_env;
iap10@3290 403
iap10@3290 404 if (use_host_env != VMCS_USE_HOST_ENV)
iap10@3290 405 return -EINVAL;
iap10@3290 406
iap10@3290 407 memset(&host_env, 0, sizeof(struct host_execution_env));
iap10@3290 408
iap10@3290 409 vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
iap10@3290 410
iap10@3290 411 if ((error = __vmpclear (vmcs_phys_ptr))) {
iap10@3290 412 printk("construct_vmcs: VMCLEAR failed\n");
iap10@3290 413 return -EINVAL;
iap10@3290 414 }
iap10@3290 415 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
iap10@3290 416 printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n",
iap10@3290 417 (unsigned long) vmcs_phys_ptr);
iap10@3290 418 return -EINVAL;
iap10@3290 419 }
iap10@3290 420 if ((error = construct_vmcs_controls())) {
iap10@3290 421 printk("construct_vmcs: construct_vmcs_controls failed\n");
iap10@3290 422 return -EINVAL;
iap10@3290 423 }
iap10@3290 424 /* host selectors */
iap10@3290 425 if ((error = construct_vmcs_host(&host_env))) {
iap10@3290 426 printk("construct_vmcs: construct_vmcs_host failed\n");
iap10@3290 427 return -EINVAL;
iap10@3290 428 }
iap10@3290 429 /* guest selectors */
iap10@3290 430 if ((error = construct_init_vmcs_guest(context, full_context, &host_env))) {
iap10@3290 431 printk("construct_vmcs: construct_vmcs_guest failed\n");
iap10@3290 432 return -EINVAL;
iap10@3290 433 }
iap10@3290 434
iap10@3290 435 if ((error |= __vmwrite(EXCEPTION_BITMAP,
iap10@3290 436 MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
iap10@3290 437 printk("construct_vmcs: setting Exception bitmap failed\n");
iap10@3290 438 return -EINVAL;
iap10@3290 439 }
iap10@3290 440
iap10@3290 441 return 0;
iap10@3290 442 }
iap10@3290 443
iap10@3290 444 int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 445 {
iap10@3290 446 int error;
iap10@3290 447
iap10@3290 448 if ((error = __vmptrld(phys_ptr))) {
iap10@3290 449 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 450 return error;
iap10@3290 451 }
iap10@3290 452 set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 453 return 0;
iap10@3290 454 }
iap10@3290 455
iap10@3290 456 int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
iap10@3290 457 {
iap10@3290 458 /* take the current VMCS */
iap10@3290 459 __vmptrst(phys_ptr);
iap10@3290 460 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
iap10@3290 461 return 0;
iap10@3290 462 }
iap10@3290 463
iap10@3290 464 void vm_launch_fail(unsigned long eflags)
iap10@3290 465 {
iap10@3290 466 BUG();
iap10@3290 467 }
iap10@3290 468
iap10@3290 469 void vm_resume_fail(unsigned long eflags)
iap10@3290 470 {
iap10@3290 471 BUG();
iap10@3290 472 }
iap10@3290 473
mafetter@3717 474 #endif /* CONFIG_VMX */