debuggers.hg

view xen/arch/x86/hvm/vmx/vmcs.c @ 19964:3952eaeb70b0

Introduce and use a per-CPU read-mostly sub-section

Since mixing data that only gets setup once and then (perhaps
frequently) gets read by remote CPUs with data that the local CPU may
modify (again, perhaps frequently) still causes undesirable cache
protocol related bus traffic, separate the former class of objects
from the latter.

These objects converted here are just picked based on their write-once
(or write-very-rarely) properties; perhaps some more adjustments may
be desirable subsequently. The primary users of the new sub-section
will result from the next patch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 13 11:32:41 2009 +0100 (2009-07-13)
parents 7406764457a0
children 68e8b8379244
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <xen/keyhandler.h>
38 #include <asm/shadow.h>
39 #include <asm/tboot.h>
41 static int opt_vpid_enabled = 1;
42 boolean_param("vpid", opt_vpid_enabled);
44 static int opt_unrestricted_guest_enabled = 1;
45 boolean_param("unrestricted_guest", opt_unrestricted_guest_enabled);
47 /*
48 * These two parameters are used to config the controls for Pause-Loop Exiting:
49 * ple_gap: upper bound on the amount of time between two successive
50 * executions of PAUSE in a loop.
51 * ple_window: upper bound on the amount of time a guest is allowed to execute
52 * in a PAUSE loop.
53 * Time is measured based on a counter that runs at the same rate as the TSC,
54 * refer SDM volume 3b section 21.6.13 & 22.1.3.
55 */
56 static unsigned int ple_gap = 41;
57 integer_param("ple_gap", ple_gap);
58 static unsigned int ple_window = 4096;
59 integer_param("ple_window", ple_window);
61 /* Dynamic (run-time adjusted) execution control flags. */
62 u32 vmx_pin_based_exec_control __read_mostly;
63 u32 vmx_cpu_based_exec_control __read_mostly;
64 u32 vmx_secondary_exec_control __read_mostly;
65 u32 vmx_vmexit_control __read_mostly;
66 u32 vmx_vmentry_control __read_mostly;
67 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
69 static DEFINE_PER_CPU_READ_MOSTLY(struct vmcs_struct *, host_vmcs);
70 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
71 static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
73 static u32 vmcs_revision_id __read_mostly;
75 static void __init vmx_display_features(void)
76 {
77 int printed = 0;
79 printk("VMX: Supported advanced features:\n");
81 #define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; }
82 P(cpu_has_vmx_virtualize_apic_accesses, "APIC MMIO access virtualisation");
83 P(cpu_has_vmx_tpr_shadow, "APIC TPR shadow");
84 P(cpu_has_vmx_ept, "Extended Page Tables (EPT)");
85 P(cpu_has_vmx_vpid, "Virtual-Processor Identifiers (VPID)");
86 P(cpu_has_vmx_vnmi, "Virtual NMI");
87 P(cpu_has_vmx_msr_bitmap, "MSR direct-access bitmap");
88 P(cpu_has_vmx_unrestricted_guest, "Unrestricted Guest");
89 #undef P
91 if ( !printed )
92 printk(" - none\n");
93 }
95 static u32 adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr)
96 {
97 u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt;
99 rdmsr(msr, vmx_msr_low, vmx_msr_high);
101 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
102 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
104 /* Ensure minimum (required) set of control bits are supported. */
105 BUG_ON(ctl_min & ~ctl);
107 return ctl;
108 }
110 static void vmx_init_vmcs_config(void)
111 {
112 u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt;
113 u32 _vmx_pin_based_exec_control;
114 u32 _vmx_cpu_based_exec_control;
115 u32 _vmx_secondary_exec_control = 0;
116 u32 _vmx_vmexit_control;
117 u32 _vmx_vmentry_control;
119 rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high);
121 min = (PIN_BASED_EXT_INTR_MASK |
122 PIN_BASED_NMI_EXITING);
123 opt = PIN_BASED_VIRTUAL_NMIS;
124 _vmx_pin_based_exec_control = adjust_vmx_controls(
125 min, opt, MSR_IA32_VMX_PINBASED_CTLS);
127 min = (CPU_BASED_HLT_EXITING |
128 CPU_BASED_INVLPG_EXITING |
129 CPU_BASED_CR3_LOAD_EXITING |
130 CPU_BASED_CR3_STORE_EXITING |
131 CPU_BASED_MONITOR_EXITING |
132 CPU_BASED_MWAIT_EXITING |
133 CPU_BASED_MOV_DR_EXITING |
134 CPU_BASED_ACTIVATE_IO_BITMAP |
135 CPU_BASED_USE_TSC_OFFSETING |
136 (opt_softtsc ? CPU_BASED_RDTSC_EXITING : 0));
137 opt = (CPU_BASED_ACTIVATE_MSR_BITMAP |
138 CPU_BASED_TPR_SHADOW |
139 CPU_BASED_MONITOR_TRAP_FLAG |
140 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
141 _vmx_cpu_based_exec_control = adjust_vmx_controls(
142 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
143 #ifdef __x86_64__
144 if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) )
145 {
146 min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING;
147 _vmx_cpu_based_exec_control = adjust_vmx_controls(
148 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
149 }
150 #endif
152 if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
153 {
154 min = 0;
155 opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
156 SECONDARY_EXEC_WBINVD_EXITING |
157 SECONDARY_EXEC_ENABLE_EPT |
158 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
159 if ( opt_vpid_enabled )
160 opt |= SECONDARY_EXEC_ENABLE_VPID;
161 if ( opt_unrestricted_guest_enabled )
162 opt |= SECONDARY_EXEC_UNRESTRICTED_GUEST;
164 _vmx_secondary_exec_control = adjust_vmx_controls(
165 min, opt, MSR_IA32_VMX_PROCBASED_CTLS2);
166 }
168 if ( _vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT )
169 {
170 /*
171 * To use EPT we expect to be able to clear certain intercepts.
172 * We check VMX_BASIC_MSR[55] to correctly handle default1 controls.
173 */
174 uint32_t must_be_one, must_be_zero, msr = MSR_IA32_VMX_PROCBASED_CTLS;
175 if ( vmx_basic_msr_high & (1u << 23) )
176 msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS;
177 rdmsr(msr, must_be_one, must_be_zero);
178 if ( must_be_one & (CPU_BASED_INVLPG_EXITING |
179 CPU_BASED_CR3_LOAD_EXITING |
180 CPU_BASED_CR3_STORE_EXITING) )
181 _vmx_secondary_exec_control &=
182 ~(SECONDARY_EXEC_ENABLE_EPT |
183 SECONDARY_EXEC_UNRESTRICTED_GUEST);
184 }
186 if ( (_vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) &&
187 ple_gap == 0 )
188 {
189 printk("Disable Pause-Loop Exiting.\n");
190 _vmx_secondary_exec_control &= ~ SECONDARY_EXEC_PAUSE_LOOP_EXITING;
191 }
193 #if defined(__i386__)
194 /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */
195 if ( !(_vmx_secondary_exec_control &
196 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) )
197 _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
198 #endif
200 min = VM_EXIT_ACK_INTR_ON_EXIT;
201 opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT;
202 #ifdef __x86_64__
203 min |= VM_EXIT_IA32E_MODE;
204 #endif
205 _vmx_vmexit_control = adjust_vmx_controls(
206 min, opt, MSR_IA32_VMX_EXIT_CTLS);
208 min = 0;
209 opt = VM_ENTRY_LOAD_GUEST_PAT;
210 _vmx_vmentry_control = adjust_vmx_controls(
211 min, opt, MSR_IA32_VMX_ENTRY_CTLS);
213 if ( !vmx_pin_based_exec_control )
214 {
215 /* First time through. */
216 vmcs_revision_id = vmx_basic_msr_low;
217 vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
218 vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
219 vmx_secondary_exec_control = _vmx_secondary_exec_control;
220 vmx_vmexit_control = _vmx_vmexit_control;
221 vmx_vmentry_control = _vmx_vmentry_control;
222 cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
223 vmx_display_features();
224 }
225 else
226 {
227 /* Globals are already initialised: re-check them. */
228 BUG_ON(vmcs_revision_id != vmx_basic_msr_low);
229 BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
230 BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
231 BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
232 BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
233 BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
234 BUG_ON(cpu_has_vmx_ins_outs_instr_info !=
235 !!(vmx_basic_msr_high & (1U<<22)));
236 }
238 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
239 BUG_ON((vmx_basic_msr_high & 0x1fff) > PAGE_SIZE);
241 #ifdef __x86_64__
242 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
243 BUG_ON(vmx_basic_msr_high & (1u<<16));
244 #endif
246 /* Require Write-Back (WB) memory type for VMCS accesses. */
247 BUG_ON(((vmx_basic_msr_high >> 18) & 15) != 6);
248 }
250 static struct vmcs_struct *vmx_alloc_vmcs(void)
251 {
252 struct vmcs_struct *vmcs;
254 if ( (vmcs = alloc_xenheap_page()) == NULL )
255 {
256 gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
257 return NULL;
258 }
260 clear_page(vmcs);
261 vmcs->vmcs_revision_id = vmcs_revision_id;
263 return vmcs;
264 }
266 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
267 {
268 free_xenheap_page(vmcs);
269 }
271 static void __vmx_clear_vmcs(void *info)
272 {
273 struct vcpu *v = info;
274 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
276 /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
277 ASSERT(!local_irq_is_enabled());
279 if ( arch_vmx->active_cpu == smp_processor_id() )
280 {
281 __vmpclear(virt_to_maddr(arch_vmx->vmcs));
283 arch_vmx->active_cpu = -1;
284 arch_vmx->launched = 0;
286 list_del(&arch_vmx->active_list);
288 if ( arch_vmx->vmcs == this_cpu(current_vmcs) )
289 this_cpu(current_vmcs) = NULL;
290 }
291 }
293 static void vmx_clear_vmcs(struct vcpu *v)
294 {
295 int cpu = v->arch.hvm_vmx.active_cpu;
297 if ( cpu != -1 )
298 on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1);
299 }
301 static void vmx_load_vmcs(struct vcpu *v)
302 {
303 unsigned long flags;
305 local_irq_save(flags);
307 if ( v->arch.hvm_vmx.active_cpu == -1 )
308 {
309 list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
310 v->arch.hvm_vmx.active_cpu = smp_processor_id();
311 }
313 ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());
315 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
316 this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;
318 local_irq_restore(flags);
319 }
321 int vmx_cpu_up(void)
322 {
323 u32 eax, edx;
324 int bios_locked, cpu = smp_processor_id();
325 u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1;
327 BUG_ON(!(read_cr4() & X86_CR4_VMXE));
329 /*
330 * Ensure the current processor operating mode meets
331 * the requred CRO fixed bits in VMX operation.
332 */
333 cr0 = read_cr0();
334 rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0);
335 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1);
336 if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) )
337 {
338 printk("CPU%d: some settings of host CR0 are "
339 "not allowed in VMX operation.\n", cpu);
340 return 0;
341 }
343 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
345 bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK);
346 if ( bios_locked )
347 {
348 if ( !(eax & (IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX |
349 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
350 {
351 printk("CPU%d: VMX disabled by BIOS.\n", cpu);
352 return 0;
353 }
354 }
355 else
356 {
357 eax = IA32_FEATURE_CONTROL_MSR_LOCK;
358 eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX;
359 if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) )
360 eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX;
361 wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0);
362 }
364 vmx_init_vmcs_config();
366 INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
368 if ( this_cpu(host_vmcs) == NULL )
369 {
370 this_cpu(host_vmcs) = vmx_alloc_vmcs();
371 if ( this_cpu(host_vmcs) == NULL )
372 {
373 printk("CPU%d: Could not allocate host VMCS\n", cpu);
374 return 0;
375 }
376 }
378 switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
379 {
380 case -2: /* #UD or #GP */
381 if ( bios_locked &&
382 test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) &&
383 (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) ||
384 !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
385 {
386 printk("CPU%d: VMXON failed: perhaps because of TXT settings "
387 "in your BIOS configuration?\n", cpu);
388 printk(" --> Disable TXT in your BIOS unless using a secure "
389 "bootloader.\n");
390 return 0;
391 }
392 /* fall through */
393 case -1: /* CF==1 or ZF==1 */
394 printk("CPU%d: unexpected VMXON failure\n", cpu);
395 return 0;
396 case 0: /* success */
397 break;
398 default:
399 BUG();
400 }
402 ept_sync_all();
404 vpid_sync_all();
406 return 1;
407 }
409 void vmx_cpu_down(void)
410 {
411 struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
412 unsigned long flags;
414 local_irq_save(flags);
416 while ( !list_empty(active_vmcs_list) )
417 __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
418 struct vcpu, arch.hvm_vmx.active_list));
420 BUG_ON(!(read_cr4() & X86_CR4_VMXE));
421 __vmxoff();
423 local_irq_restore(flags);
424 }
426 struct foreign_vmcs {
427 struct vcpu *v;
428 unsigned int count;
429 };
430 static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs);
432 void vmx_vmcs_enter(struct vcpu *v)
433 {
434 struct foreign_vmcs *fv;
436 /*
437 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
438 * vmx_vmcs_enter/exit critical regions.
439 */
440 if ( likely(v == current) )
441 return;
443 fv = &this_cpu(foreign_vmcs);
445 if ( fv->v == v )
446 {
447 BUG_ON(fv->count == 0);
448 }
449 else
450 {
451 BUG_ON(fv->v != NULL);
452 BUG_ON(fv->count != 0);
454 vcpu_pause(v);
455 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
457 vmx_clear_vmcs(v);
458 vmx_load_vmcs(v);
460 fv->v = v;
461 }
463 fv->count++;
464 }
466 void vmx_vmcs_exit(struct vcpu *v)
467 {
468 struct foreign_vmcs *fv;
470 if ( likely(v == current) )
471 return;
473 fv = &this_cpu(foreign_vmcs);
474 BUG_ON(fv->v != v);
475 BUG_ON(fv->count == 0);
477 if ( --fv->count == 0 )
478 {
479 /* Don't confuse vmx_do_resume (for @v or @current!) */
480 vmx_clear_vmcs(v);
481 if ( is_hvm_vcpu(current) )
482 vmx_load_vmcs(current);
484 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
485 vcpu_unpause(v);
487 fv->v = NULL;
488 }
489 }
491 struct xgt_desc {
492 unsigned short size;
493 unsigned long address __attribute__((packed));
494 };
496 static void vmx_set_host_env(struct vcpu *v)
497 {
498 unsigned int cpu = smp_processor_id();
500 __vmwrite(HOST_GDTR_BASE,
501 (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY));
502 __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
504 __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
505 __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu));
507 __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
509 /*
510 * Skip end of cpu_user_regs when entering the hypervisor because the
511 * CPU does not save context onto the stack. SS,RSP,CS,RIP,RFLAGS,etc
512 * all get saved into the VMCS instead.
513 */
514 __vmwrite(HOST_RSP,
515 (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
516 }
518 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
519 {
520 unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
522 /* VMX MSR bitmap supported? */
523 if ( msr_bitmap == NULL )
524 return;
526 /*
527 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
528 * have the write-low and read-high bitmap offsets the wrong way round.
529 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
530 */
531 if ( msr <= 0x1fff )
532 {
533 __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
534 __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
535 }
536 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
537 {
538 msr &= 0x1fff;
539 __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
540 __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
541 }
542 }
544 static int construct_vmcs(struct vcpu *v)
545 {
546 struct domain *d = v->domain;
547 uint16_t sysenter_cs;
548 unsigned long sysenter_eip;
550 vmx_vmcs_enter(v);
552 /* VMCS controls. */
553 __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
555 v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control;
556 v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
558 if ( paging_mode_hap(d) )
559 {
560 v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
561 CPU_BASED_CR3_LOAD_EXITING |
562 CPU_BASED_CR3_STORE_EXITING);
563 }
564 else
565 {
566 v->arch.hvm_vmx.secondary_exec_control &=
567 ~(SECONDARY_EXEC_ENABLE_EPT |
568 SECONDARY_EXEC_UNRESTRICTED_GUEST);
569 vmx_vmexit_control &= ~(VM_EXIT_SAVE_GUEST_PAT |
570 VM_EXIT_LOAD_HOST_PAT);
571 vmx_vmentry_control &= ~VM_ENTRY_LOAD_GUEST_PAT;
572 }
574 /* Do not enable Monitor Trap Flag unless start single step debug */
575 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
577 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
578 __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
579 __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
581 if ( cpu_has_vmx_ple )
582 {
583 __vmwrite(PLE_GAP, ple_gap);
584 __vmwrite(PLE_WINDOW, ple_window);
585 }
587 if ( cpu_has_vmx_secondary_exec_control )
588 __vmwrite(SECONDARY_VM_EXEC_CONTROL,
589 v->arch.hvm_vmx.secondary_exec_control);
591 /* MSR access bitmap. */
592 if ( cpu_has_vmx_msr_bitmap )
593 {
594 unsigned long *msr_bitmap = alloc_xenheap_page();
596 if ( msr_bitmap == NULL )
597 return -ENOMEM;
599 memset(msr_bitmap, ~0, PAGE_SIZE);
600 v->arch.hvm_vmx.msr_bitmap = msr_bitmap;
601 __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
603 vmx_disable_intercept_for_msr(v, MSR_FS_BASE);
604 vmx_disable_intercept_for_msr(v, MSR_GS_BASE);
605 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
606 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
607 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
608 if ( cpu_has_vmx_pat && paging_mode_hap(d) )
609 vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
610 }
612 /* I/O access bitmap. */
613 __vmwrite(IO_BITMAP_A, virt_to_maddr((char *)hvm_io_bitmap + 0));
614 __vmwrite(IO_BITMAP_B, virt_to_maddr((char *)hvm_io_bitmap + PAGE_SIZE));
616 /* Host data selectors. */
617 __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
618 __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
619 __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
620 __vmwrite(HOST_FS_SELECTOR, 0);
621 __vmwrite(HOST_GS_SELECTOR, 0);
622 __vmwrite(HOST_FS_BASE, 0);
623 __vmwrite(HOST_GS_BASE, 0);
625 /* Host control registers. */
626 v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
627 __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
628 __vmwrite(HOST_CR4, mmu_cr4_features);
630 /* Host CS:RIP. */
631 __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
632 __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
634 /* Host SYSENTER CS:RIP. */
635 rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs);
636 __vmwrite(HOST_SYSENTER_CS, sysenter_cs);
637 rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip);
638 __vmwrite(HOST_SYSENTER_EIP, sysenter_eip);
640 /* MSR intercepts. */
641 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
642 __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
643 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
645 __vmwrite(VM_ENTRY_INTR_INFO, 0);
647 __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
648 __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
650 __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
651 __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
653 __vmwrite(CR3_TARGET_COUNT, 0);
655 __vmwrite(GUEST_ACTIVITY_STATE, 0);
657 /* Guest segment bases. */
658 __vmwrite(GUEST_ES_BASE, 0);
659 __vmwrite(GUEST_SS_BASE, 0);
660 __vmwrite(GUEST_DS_BASE, 0);
661 __vmwrite(GUEST_FS_BASE, 0);
662 __vmwrite(GUEST_GS_BASE, 0);
663 __vmwrite(GUEST_CS_BASE, 0);
665 /* Guest segment limits. */
666 __vmwrite(GUEST_ES_LIMIT, ~0u);
667 __vmwrite(GUEST_SS_LIMIT, ~0u);
668 __vmwrite(GUEST_DS_LIMIT, ~0u);
669 __vmwrite(GUEST_FS_LIMIT, ~0u);
670 __vmwrite(GUEST_GS_LIMIT, ~0u);
671 __vmwrite(GUEST_CS_LIMIT, ~0u);
673 /* Guest segment AR bytes. */
674 __vmwrite(GUEST_ES_AR_BYTES, 0xc093); /* read/write, accessed */
675 __vmwrite(GUEST_SS_AR_BYTES, 0xc093);
676 __vmwrite(GUEST_DS_AR_BYTES, 0xc093);
677 __vmwrite(GUEST_FS_AR_BYTES, 0xc093);
678 __vmwrite(GUEST_GS_AR_BYTES, 0xc093);
679 __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
681 /* Guest IDT. */
682 __vmwrite(GUEST_IDTR_BASE, 0);
683 __vmwrite(GUEST_IDTR_LIMIT, 0);
685 /* Guest GDT. */
686 __vmwrite(GUEST_GDTR_BASE, 0);
687 __vmwrite(GUEST_GDTR_LIMIT, 0);
689 /* Guest LDT. */
690 __vmwrite(GUEST_LDTR_AR_BYTES, 0x0082); /* LDT */
691 __vmwrite(GUEST_LDTR_SELECTOR, 0);
692 __vmwrite(GUEST_LDTR_BASE, 0);
693 __vmwrite(GUEST_LDTR_LIMIT, 0);
695 /* Guest TSS. */
696 __vmwrite(GUEST_TR_AR_BYTES, 0x008b); /* 32-bit TSS (busy) */
697 __vmwrite(GUEST_TR_BASE, 0);
698 __vmwrite(GUEST_TR_LIMIT, 0xff);
700 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
701 __vmwrite(GUEST_DR7, 0);
702 __vmwrite(VMCS_LINK_POINTER, ~0UL);
703 #if defined(__i386__)
704 __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
705 #endif
707 __vmwrite(EXCEPTION_BITMAP,
708 HVM_TRAP_MASK
709 | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
710 | (1U << TRAP_no_device));
712 v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
713 hvm_update_guest_cr(v, 0);
715 v->arch.hvm_vcpu.guest_cr[4] = 0;
716 hvm_update_guest_cr(v, 4);
718 if ( cpu_has_vmx_tpr_shadow )
719 {
720 __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
721 page_to_maddr(vcpu_vlapic(v)->regs_page));
722 __vmwrite(TPR_THRESHOLD, 0);
723 }
725 if ( paging_mode_hap(d) )
726 {
727 __vmwrite(EPT_POINTER, d->arch.hvm_domain.vmx.ept_control.eptp);
728 #ifdef __i386__
729 __vmwrite(EPT_POINTER_HIGH,
730 d->arch.hvm_domain.vmx.ept_control.eptp >> 32);
731 #endif
732 }
734 if ( cpu_has_vmx_vpid )
735 {
736 v->arch.hvm_vmx.vpid =
737 v->domain->arch.hvm_domain.vmx.vpid_base + v->vcpu_id;
738 __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
739 }
741 if ( cpu_has_vmx_pat && paging_mode_hap(d) )
742 {
743 u64 host_pat, guest_pat;
745 rdmsrl(MSR_IA32_CR_PAT, host_pat);
746 guest_pat = 0x7040600070406ULL;
748 __vmwrite(HOST_PAT, host_pat);
749 __vmwrite(GUEST_PAT, guest_pat);
750 #ifdef __i386__
751 __vmwrite(HOST_PAT_HIGH, host_pat >> 32);
752 __vmwrite(GUEST_PAT_HIGH, guest_pat >> 32);
753 #endif
754 }
756 vmx_vmcs_exit(v);
758 paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
760 vmx_vlapic_msr_changed(v);
762 return 0;
763 }
765 int vmx_read_guest_msr(u32 msr, u64 *val)
766 {
767 struct vcpu *curr = current;
768 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
769 const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
771 for ( i = 0; i < msr_count; i++ )
772 {
773 if ( msr_area[i].index == msr )
774 {
775 *val = msr_area[i].data;
776 return 0;
777 }
778 }
780 return -ESRCH;
781 }
783 int vmx_write_guest_msr(u32 msr, u64 val)
784 {
785 struct vcpu *curr = current;
786 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
787 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
789 for ( i = 0; i < msr_count; i++ )
790 {
791 if ( msr_area[i].index == msr )
792 {
793 msr_area[i].data = val;
794 return 0;
795 }
796 }
798 return -ESRCH;
799 }
801 int vmx_add_guest_msr(u32 msr)
802 {
803 struct vcpu *curr = current;
804 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
805 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
807 if ( msr_area == NULL )
808 {
809 if ( (msr_area = alloc_xenheap_page()) == NULL )
810 return -ENOMEM;
811 curr->arch.hvm_vmx.msr_area = msr_area;
812 __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
813 __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
814 }
816 for ( i = 0; i < msr_count; i++ )
817 if ( msr_area[i].index == msr )
818 return 0;
820 if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
821 return -ENOSPC;
823 msr_area[msr_count].index = msr;
824 msr_area[msr_count].mbz = 0;
825 msr_area[msr_count].data = 0;
826 curr->arch.hvm_vmx.msr_count = ++msr_count;
827 __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
828 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
830 return 0;
831 }
833 int vmx_add_host_load_msr(u32 msr)
834 {
835 struct vcpu *curr = current;
836 unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
837 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
839 if ( msr_area == NULL )
840 {
841 if ( (msr_area = alloc_xenheap_page()) == NULL )
842 return -ENOMEM;
843 curr->arch.hvm_vmx.host_msr_area = msr_area;
844 __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
845 }
847 for ( i = 0; i < msr_count; i++ )
848 if ( msr_area[i].index == msr )
849 return 0;
851 if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
852 return -ENOSPC;
854 msr_area[msr_count].index = msr;
855 msr_area[msr_count].mbz = 0;
856 rdmsrl(msr, msr_area[msr_count].data);
857 curr->arch.hvm_vmx.host_msr_count = ++msr_count;
858 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
860 return 0;
861 }
863 int vmx_create_vmcs(struct vcpu *v)
864 {
865 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
866 int rc;
868 if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
869 return -ENOMEM;
871 INIT_LIST_HEAD(&arch_vmx->active_list);
872 __vmpclear(virt_to_maddr(arch_vmx->vmcs));
873 arch_vmx->active_cpu = -1;
874 arch_vmx->launched = 0;
876 if ( (rc = construct_vmcs(v)) != 0 )
877 {
878 vmx_free_vmcs(arch_vmx->vmcs);
879 return rc;
880 }
882 return 0;
883 }
885 void vmx_destroy_vmcs(struct vcpu *v)
886 {
887 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
889 vmx_clear_vmcs(v);
891 vmx_free_vmcs(arch_vmx->vmcs);
893 free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
894 free_xenheap_page(v->arch.hvm_vmx.msr_area);
895 free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
896 }
898 void vm_launch_fail(void)
899 {
900 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
901 printk("<vm_launch_fail> error code %lx\n", error);
902 domain_crash_synchronous();
903 }
905 void vm_resume_fail(void)
906 {
907 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
908 printk("<vm_resume_fail> error code %lx\n", error);
909 domain_crash_synchronous();
910 }
912 static void wbinvd_ipi(void *info)
913 {
914 wbinvd();
915 }
917 void vmx_do_resume(struct vcpu *v)
918 {
919 bool_t debug_state;
921 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
922 {
923 if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
924 vmx_load_vmcs(v);
925 }
926 else
927 {
928 /*
929 * For pass-through domain, guest PCI-E device driver may leverage the
930 * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
931 * Since migration may occur before WBINVD or CLFLUSH, we need to
932 * maintain data consistency either by:
933 * 1: flushing cache (wbinvd) when the guest is scheduled out if
934 * there is no wbinvd exit, or
935 * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
936 */
937 if ( has_arch_pdevs(v->domain) && !cpu_has_wbinvd_exiting )
938 {
939 int cpu = v->arch.hvm_vmx.active_cpu;
940 if ( cpu != -1 )
941 on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1);
942 }
944 vmx_clear_vmcs(v);
945 vmx_load_vmcs(v);
946 hvm_migrate_timers(v);
947 vmx_set_host_env(v);
948 vpid_sync_vcpu_all(v);
949 }
951 debug_state = v->domain->debugger_attached;
952 if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
953 {
954 v->arch.hvm_vcpu.debug_state_latch = debug_state;
955 vmx_update_debug_state(v);
956 }
958 hvm_do_resume(v);
959 reset_stack_and_jump(vmx_asm_do_vmentry);
960 }
962 static unsigned long vmr(unsigned long field)
963 {
964 int rc;
965 unsigned long val;
966 val = __vmread_safe(field, &rc);
967 return rc ? 0 : val;
968 }
970 static void vmx_dump_sel(char *name, uint32_t selector)
971 {
972 uint32_t sel, attr, limit;
973 uint64_t base;
974 sel = vmr(selector);
975 attr = vmr(selector + (GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR));
976 limit = vmr(selector + (GUEST_ES_LIMIT - GUEST_ES_SELECTOR));
977 base = vmr(selector + (GUEST_ES_BASE - GUEST_ES_SELECTOR));
978 printk("%s: sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016"PRIx64"\n",
979 name, sel, attr, limit, base);
980 }
982 static void vmx_dump_sel2(char *name, uint32_t lim)
983 {
984 uint32_t limit;
985 uint64_t base;
986 limit = vmr(lim);
987 base = vmr(lim + (GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
988 printk("%s: limit=0x%08x, base=0x%016"PRIx64"\n",
989 name, limit, base);
990 }
992 void vmcs_dump_vcpu(struct vcpu *v)
993 {
994 struct cpu_user_regs *regs = &v->arch.guest_context.user_regs;
995 unsigned long long x;
997 if ( v == current )
998 regs = guest_cpu_user_regs();
1000 vmx_vmcs_enter(v);
1002 printk("*** Guest State ***\n");
1003 printk("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n",
1004 (unsigned long long)vmr(GUEST_CR0),
1005 (unsigned long long)vmr(CR0_READ_SHADOW),
1006 (unsigned long long)vmr(CR0_GUEST_HOST_MASK));
1007 printk("CR4: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n",
1008 (unsigned long long)vmr(GUEST_CR4),
1009 (unsigned long long)vmr(CR4_READ_SHADOW),
1010 (unsigned long long)vmr(CR4_GUEST_HOST_MASK));
1011 printk("CR3: actual=0x%016llx, target_count=%d\n",
1012 (unsigned long long)vmr(GUEST_CR3),
1013 (int)vmr(CR3_TARGET_COUNT));
1014 printk(" target0=%016llx, target1=%016llx\n",
1015 (unsigned long long)vmr(CR3_TARGET_VALUE0),
1016 (unsigned long long)vmr(CR3_TARGET_VALUE1));
1017 printk(" target2=%016llx, target3=%016llx\n",
1018 (unsigned long long)vmr(CR3_TARGET_VALUE2),
1019 (unsigned long long)vmr(CR3_TARGET_VALUE3));
1020 printk("RSP = 0x%016llx (0x%016llx) RIP = 0x%016llx (0x%016llx)\n",
1021 (unsigned long long)vmr(GUEST_RSP),
1022 (unsigned long long)regs->esp,
1023 (unsigned long long)vmr(GUEST_RIP),
1024 (unsigned long long)regs->eip);
1025 printk("RFLAGS=0x%016llx (0x%016llx) DR7 = 0x%016llx\n",
1026 (unsigned long long)vmr(GUEST_RFLAGS),
1027 (unsigned long long)regs->eflags,
1028 (unsigned long long)vmr(GUEST_DR7));
1029 printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n",
1030 (unsigned long long)vmr(GUEST_SYSENTER_ESP),
1031 (int)vmr(GUEST_SYSENTER_CS),
1032 (unsigned long long)vmr(GUEST_SYSENTER_EIP));
1033 vmx_dump_sel("CS", GUEST_CS_SELECTOR);
1034 vmx_dump_sel("DS", GUEST_DS_SELECTOR);
1035 vmx_dump_sel("SS", GUEST_SS_SELECTOR);
1036 vmx_dump_sel("ES", GUEST_ES_SELECTOR);
1037 vmx_dump_sel("FS", GUEST_FS_SELECTOR);
1038 vmx_dump_sel("GS", GUEST_GS_SELECTOR);
1039 vmx_dump_sel2("GDTR", GUEST_GDTR_LIMIT);
1040 vmx_dump_sel("LDTR", GUEST_LDTR_SELECTOR);
1041 vmx_dump_sel2("IDTR", GUEST_IDTR_LIMIT);
1042 vmx_dump_sel("TR", GUEST_TR_SELECTOR);
1043 printk("Guest PAT = 0x%08x%08x\n",
1044 (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT));
1045 x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32;
1046 x |= (uint32_t)vmr(TSC_OFFSET);
1047 printk("TSC Offset = %016llx\n", x);
1048 x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32;
1049 x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL);
1050 printk("DebugCtl=%016llx DebugExceptions=%016llx\n", x,
1051 (unsigned long long)vmr(GUEST_PENDING_DBG_EXCEPTIONS));
1052 printk("Interruptibility=%04x ActivityState=%04x\n",
1053 (int)vmr(GUEST_INTERRUPTIBILITY_INFO),
1054 (int)vmr(GUEST_ACTIVITY_STATE));
1056 printk("*** Host State ***\n");
1057 printk("RSP = 0x%016llx RIP = 0x%016llx\n",
1058 (unsigned long long)vmr(HOST_RSP),
1059 (unsigned long long)vmr(HOST_RIP));
1060 printk("CS=%04x DS=%04x ES=%04x FS=%04x GS=%04x SS=%04x TR=%04x\n",
1061 (uint16_t)vmr(HOST_CS_SELECTOR),
1062 (uint16_t)vmr(HOST_DS_SELECTOR),
1063 (uint16_t)vmr(HOST_ES_SELECTOR),
1064 (uint16_t)vmr(HOST_FS_SELECTOR),
1065 (uint16_t)vmr(HOST_GS_SELECTOR),
1066 (uint16_t)vmr(HOST_SS_SELECTOR),
1067 (uint16_t)vmr(HOST_TR_SELECTOR));
1068 printk("FSBase=%016llx GSBase=%016llx TRBase=%016llx\n",
1069 (unsigned long long)vmr(HOST_FS_BASE),
1070 (unsigned long long)vmr(HOST_GS_BASE),
1071 (unsigned long long)vmr(HOST_TR_BASE));
1072 printk("GDTBase=%016llx IDTBase=%016llx\n",
1073 (unsigned long long)vmr(HOST_GDTR_BASE),
1074 (unsigned long long)vmr(HOST_IDTR_BASE));
1075 printk("CR0=%016llx CR3=%016llx CR4=%016llx\n",
1076 (unsigned long long)vmr(HOST_CR0),
1077 (unsigned long long)vmr(HOST_CR3),
1078 (unsigned long long)vmr(HOST_CR4));
1079 printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n",
1080 (unsigned long long)vmr(HOST_SYSENTER_ESP),
1081 (int)vmr(HOST_SYSENTER_CS),
1082 (unsigned long long)vmr(HOST_SYSENTER_EIP));
1083 printk("Host PAT = 0x%08x%08x\n",
1084 (uint32_t)vmr(HOST_PAT_HIGH), (uint32_t)vmr(HOST_PAT));
1086 printk("*** Control State ***\n");
1087 printk("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
1088 (uint32_t)vmr(PIN_BASED_VM_EXEC_CONTROL),
1089 (uint32_t)vmr(CPU_BASED_VM_EXEC_CONTROL),
1090 (uint32_t)vmr(SECONDARY_VM_EXEC_CONTROL));
1091 printk("EntryControls=%08x ExitControls=%08x\n",
1092 (uint32_t)vmr(VM_ENTRY_CONTROLS),
1093 (uint32_t)vmr(VM_EXIT_CONTROLS));
1094 printk("ExceptionBitmap=%08x\n",
1095 (uint32_t)vmr(EXCEPTION_BITMAP));
1096 printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
1097 (uint32_t)vmr(VM_ENTRY_INTR_INFO),
1098 (uint32_t)vmr(VM_ENTRY_EXCEPTION_ERROR_CODE),
1099 (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN));
1100 printk("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
1101 (uint32_t)vmr(VM_EXIT_INTR_INFO),
1102 (uint32_t)vmr(VM_EXIT_INTR_ERROR_CODE),
1103 (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN));
1104 printk(" reason=%08x qualification=%08x\n",
1105 (uint32_t)vmr(VM_EXIT_REASON),
1106 (uint32_t)vmr(EXIT_QUALIFICATION));
1107 printk("IDTVectoring: info=%08x errcode=%08x\n",
1108 (uint32_t)vmr(IDT_VECTORING_INFO),
1109 (uint32_t)vmr(IDT_VECTORING_ERROR_CODE));
1110 printk("TPR Threshold = 0x%02x\n",
1111 (uint32_t)vmr(TPR_THRESHOLD));
1112 printk("EPT pointer = 0x%08x%08x\n",
1113 (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER));
1114 printk("Virtual processor ID = 0x%04x\n",
1115 (uint32_t)vmr(VIRTUAL_PROCESSOR_ID));
1117 vmx_vmcs_exit(v);
1120 static void vmcs_dump(unsigned char ch)
1122 struct domain *d;
1123 struct vcpu *v;
1125 printk("*********** VMCS Areas **************\n");
1127 rcu_read_lock(&domlist_read_lock);
1129 for_each_domain ( d )
1131 if ( !is_hvm_domain(d) )
1132 continue;
1133 printk("\n>>> Domain %d <<<\n", d->domain_id);
1134 for_each_vcpu ( d, v )
1136 printk("\tVCPU %d\n", v->vcpu_id);
1137 vmcs_dump_vcpu(v);
1141 rcu_read_unlock(&domlist_read_lock);
1143 printk("**************************************\n");
1146 void setup_vmcs_dump(void)
1148 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
1152 /*
1153 * Local variables:
1154 * mode: C
1155 * c-set-style: "BSD"
1156 * c-basic-offset: 4
1157 * tab-width: 4
1158 * indent-tabs-mode: nil
1159 * End:
1160 */