debuggers.hg

view xen/arch/x86/hvm/vmx/vmcs.c @ 13651:fde9e1d474b7

hvm: Define a global I/O access bitmap, allowing direct access to port 0x80.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jan 25 18:20:58 2007 +0000 (2007-01-25)
parents 160ff08f8b1f
children 625aa1547cb6
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <xen/keyhandler.h>
38 #include <asm/shadow.h>
40 /* Basic flags for Pin-based VM-execution controls. */
41 #define MONITOR_PIN_BASED_EXEC_CONTROLS \
42 ( PIN_BASED_EXT_INTR_MASK | \
43 PIN_BASED_NMI_EXITING )
45 /* Basic flags for CPU-based VM-execution controls. */
46 #ifdef __x86_64__
47 #define MONITOR_CPU_BASED_EXEC_CONTROLS_SUBARCH \
48 ( CPU_BASED_CR8_LOAD_EXITING | \
49 CPU_BASED_CR8_STORE_EXITING )
50 #else
51 #define MONITOR_CPU_BASED_EXEC_CONTROLS_SUBARCH 0
52 #endif
53 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
54 ( MONITOR_CPU_BASED_EXEC_CONTROLS_SUBARCH | \
55 CPU_BASED_HLT_EXITING | \
56 CPU_BASED_INVDPG_EXITING | \
57 CPU_BASED_MWAIT_EXITING | \
58 CPU_BASED_MOV_DR_EXITING | \
59 CPU_BASED_ACTIVATE_IO_BITMAP | \
60 CPU_BASED_USE_TSC_OFFSETING )
62 /* Basic flags for VM-Exit controls. */
63 #ifdef __x86_64__
64 #define MONITOR_VM_EXIT_CONTROLS_SUBARCH VM_EXIT_IA32E_MODE
65 #else
66 #define MONITOR_VM_EXIT_CONTROLS_SUBARCH 0
67 #endif
68 #define MONITOR_VM_EXIT_CONTROLS \
69 ( MONITOR_VM_EXIT_CONTROLS_SUBARCH | \
70 VM_EXIT_ACK_INTR_ON_EXIT )
72 /* Basic flags for VM-Entry controls. */
73 #define MONITOR_VM_ENTRY_CONTROLS 0x00000000
75 /* Dynamic (run-time adjusted) execution control flags. */
76 static u32 vmx_pin_based_exec_control;
77 static u32 vmx_cpu_based_exec_control;
78 static u32 vmx_vmexit_control;
79 static u32 vmx_vmentry_control;
81 static u32 vmcs_revision_id;
83 static u32 adjust_vmx_controls(u32 ctrls, u32 msr)
84 {
85 u32 vmx_msr_low, vmx_msr_high;
87 rdmsr(msr, vmx_msr_low, vmx_msr_high);
89 /* Bit == 0 means must be zero. */
90 BUG_ON(ctrls & ~vmx_msr_high);
92 /* Bit == 1 means must be one. */
93 ctrls |= vmx_msr_low;
95 return ctrls;
96 }
98 void vmx_init_vmcs_config(void)
99 {
100 u32 vmx_msr_low, vmx_msr_high;
101 u32 _vmx_pin_based_exec_control;
102 u32 _vmx_cpu_based_exec_control;
103 u32 _vmx_vmexit_control;
104 u32 _vmx_vmentry_control;
106 _vmx_pin_based_exec_control =
107 adjust_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
108 MSR_IA32_VMX_PINBASED_CTLS_MSR);
109 _vmx_cpu_based_exec_control =
110 adjust_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
111 MSR_IA32_VMX_PROCBASED_CTLS_MSR);
112 _vmx_vmexit_control =
113 adjust_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
114 MSR_IA32_VMX_EXIT_CTLS_MSR);
115 _vmx_vmentry_control =
116 adjust_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
117 MSR_IA32_VMX_ENTRY_CTLS_MSR);
119 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
121 if ( smp_processor_id() == 0 )
122 {
123 vmcs_revision_id = vmx_msr_low;
124 vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
125 vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
126 vmx_vmexit_control = _vmx_vmexit_control;
127 vmx_vmentry_control = _vmx_vmentry_control;
128 }
129 else
130 {
131 BUG_ON(vmcs_revision_id != vmx_msr_low);
132 BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
133 BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
134 BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
135 BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
136 }
138 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
139 BUG_ON((vmx_msr_high & 0x1fff) > PAGE_SIZE);
140 }
142 static struct vmcs_struct *vmx_alloc_vmcs(void)
143 {
144 struct vmcs_struct *vmcs;
146 if ( (vmcs = alloc_xenheap_page()) == NULL )
147 {
148 gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
149 return NULL;
150 }
152 memset(vmcs, 0, PAGE_SIZE);
153 vmcs->vmcs_revision_id = vmcs_revision_id;
155 return vmcs;
156 }
158 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
159 {
160 free_xenheap_page(vmcs);
161 }
163 static void __vmx_clear_vmcs(void *info)
164 {
165 struct vcpu *v = info;
167 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
169 v->arch.hvm_vmx.active_cpu = -1;
170 v->arch.hvm_vmx.launched = 0;
171 }
173 static void vmx_clear_vmcs(struct vcpu *v)
174 {
175 int cpu = v->arch.hvm_vmx.active_cpu;
177 if ( cpu == -1 )
178 return;
180 if ( cpu == smp_processor_id() )
181 return __vmx_clear_vmcs(v);
183 on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
184 }
186 static void vmx_load_vmcs(struct vcpu *v)
187 {
188 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
189 v->arch.hvm_vmx.active_cpu = smp_processor_id();
190 }
192 void vmx_vmcs_enter(struct vcpu *v)
193 {
194 /*
195 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
196 * vmx_vmcs_enter/exit critical regions.
197 */
198 if ( v == current )
199 return;
201 vcpu_pause(v);
202 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
204 vmx_clear_vmcs(v);
205 vmx_load_vmcs(v);
206 }
208 void vmx_vmcs_exit(struct vcpu *v)
209 {
210 if ( v == current )
211 return;
213 /* Don't confuse arch_vmx_do_resume (for @v or @current!) */
214 vmx_clear_vmcs(v);
215 if ( is_hvm_vcpu(current) )
216 vmx_load_vmcs(current);
218 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
219 vcpu_unpause(v);
220 }
222 struct vmcs_struct *vmx_alloc_host_vmcs(void)
223 {
224 return vmx_alloc_vmcs();
225 }
227 void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
228 {
229 vmx_free_vmcs(vmcs);
230 }
232 #define GUEST_SEGMENT_LIMIT 0xffffffff
234 struct host_execution_env {
235 /* selectors */
236 unsigned short ldtr_selector;
237 unsigned short tr_selector;
238 unsigned short ds_selector;
239 unsigned short cs_selector;
240 /* limits */
241 unsigned short gdtr_limit;
242 unsigned short ldtr_limit;
243 unsigned short idtr_limit;
244 unsigned short tr_limit;
245 /* base */
246 unsigned long gdtr_base;
247 unsigned long ldtr_base;
248 unsigned long idtr_base;
249 unsigned long tr_base;
250 unsigned long ds_base;
251 unsigned long cs_base;
252 #ifdef __x86_64__
253 unsigned long fs_base;
254 unsigned long gs_base;
255 #endif
256 };
258 static void vmx_set_host_env(struct vcpu *v)
259 {
260 unsigned int tr, cpu;
261 struct host_execution_env host_env;
262 struct Xgt_desc_struct desc;
264 cpu = smp_processor_id();
265 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
266 host_env.idtr_limit = desc.size;
267 host_env.idtr_base = desc.address;
268 __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
270 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
271 host_env.gdtr_limit = desc.size;
272 host_env.gdtr_base = desc.address;
273 __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
275 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
276 host_env.tr_selector = tr;
277 host_env.tr_limit = sizeof(struct tss_struct);
278 host_env.tr_base = (unsigned long) &init_tss[cpu];
279 __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
280 __vmwrite(HOST_TR_BASE, host_env.tr_base);
282 /*
283 * Skip end of cpu_user_regs when entering the hypervisor because the
284 * CPU does not save context onto the stack. SS,RSP,CS,RIP,RFLAGS,etc
285 * all get saved into the VMCS instead.
286 */
287 __vmwrite(HOST_RSP,
288 (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
289 }
291 static void construct_vmcs(struct vcpu *v)
292 {
293 unsigned long cr0, cr4;
294 union vmcs_arbytes arbytes;
296 vmx_vmcs_enter(v);
298 /* VMCS controls. */
299 __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
300 __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
301 __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
302 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
303 v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
305 /* I/O access bitmap. */
306 __vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap));
307 __vmwrite(IO_BITMAP_B, virt_to_maddr(hvm_io_bitmap + PAGE_SIZE));
309 /* Host data selectors. */
310 __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
311 __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
312 __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
313 #if defined(__i386__)
314 __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
315 __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
316 __vmwrite(HOST_FS_BASE, 0);
317 __vmwrite(HOST_GS_BASE, 0);
318 #elif defined(__x86_64__)
319 {
320 unsigned long msr;
321 rdmsrl(MSR_FS_BASE, msr); __vmwrite(HOST_FS_BASE, msr);
322 rdmsrl(MSR_GS_BASE, msr); __vmwrite(HOST_GS_BASE, msr);
323 }
324 #endif
326 /* Host control registers. */
327 __vmwrite(HOST_CR0, read_cr0());
328 __vmwrite(HOST_CR4, read_cr4());
330 /* Host CS:RIP. */
331 __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
332 __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
334 /* MSR intercepts. */
335 __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
336 __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
337 __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
338 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
339 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
341 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
343 __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
344 __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
346 __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
347 __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
349 __vmwrite(CR3_TARGET_COUNT, 0);
351 __vmwrite(GUEST_ACTIVITY_STATE, 0);
353 /* Guest segment bases. */
354 __vmwrite(GUEST_ES_BASE, 0);
355 __vmwrite(GUEST_SS_BASE, 0);
356 __vmwrite(GUEST_DS_BASE, 0);
357 __vmwrite(GUEST_FS_BASE, 0);
358 __vmwrite(GUEST_GS_BASE, 0);
359 __vmwrite(GUEST_CS_BASE, 0);
361 /* Guest segment limits. */
362 __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
363 __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
364 __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
365 __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
366 __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
367 __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
369 /* Guest segment AR bytes. */
370 arbytes.bytes = 0;
371 arbytes.fields.seg_type = 0x3; /* type = 3 */
372 arbytes.fields.s = 1; /* code or data, i.e. not system */
373 arbytes.fields.dpl = 0; /* DPL = 3 */
374 arbytes.fields.p = 1; /* segment present */
375 arbytes.fields.default_ops_size = 1; /* 32-bit */
376 arbytes.fields.g = 1;
377 arbytes.fields.null_bit = 0; /* not null */
378 __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
379 __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
380 __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
381 __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
382 __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
383 arbytes.fields.seg_type = 0xb; /* type = 0xb */
384 __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
386 /* Guest GDT. */
387 __vmwrite(GUEST_GDTR_BASE, 0);
388 __vmwrite(GUEST_GDTR_LIMIT, 0);
390 /* Guest IDT. */
391 __vmwrite(GUEST_IDTR_BASE, 0);
392 __vmwrite(GUEST_IDTR_LIMIT, 0);
394 /* Guest LDT and TSS. */
395 arbytes.fields.s = 0; /* not code or data segement */
396 arbytes.fields.seg_type = 0x2; /* LTD */
397 arbytes.fields.default_ops_size = 0; /* 16-bit */
398 arbytes.fields.g = 0;
399 __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
400 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
401 __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
403 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
404 __vmwrite(GUEST_DR7, 0);
405 __vmwrite(VMCS_LINK_POINTER, ~0UL);
406 #if defined(__i386__)
407 __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
408 #endif
410 __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
412 /* Guest CR0. */
413 cr0 = read_cr0();
414 v->arch.hvm_vmx.cpu_cr0 = cr0;
415 __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
416 v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
417 __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
419 /* Guest CR4. */
420 cr4 = read_cr4();
421 __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
422 v->arch.hvm_vmx.cpu_shadow_cr4 =
423 cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
424 __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
426 #ifdef __x86_64__
427 /* VLAPIC TPR optimisation. */
428 v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
429 v->arch.hvm_vcpu.u.vmx.exec_control &=
430 ~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
431 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
432 __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
433 page_to_maddr(vcpu_vlapic(v)->regs_page));
434 __vmwrite(TPR_THRESHOLD, 0);
435 #endif
437 __vmwrite(GUEST_LDTR_SELECTOR, 0);
438 __vmwrite(GUEST_LDTR_BASE, 0);
439 __vmwrite(GUEST_LDTR_LIMIT, 0);
441 __vmwrite(GUEST_TR_BASE, 0);
442 __vmwrite(GUEST_TR_LIMIT, 0xff);
444 vmx_vmcs_exit(v);
446 shadow_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
447 }
449 int vmx_create_vmcs(struct vcpu *v)
450 {
451 if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
452 return -ENOMEM;
454 __vmx_clear_vmcs(v);
456 construct_vmcs(v);
458 return 0;
459 }
461 void vmx_destroy_vmcs(struct vcpu *v)
462 {
463 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
465 if ( arch_vmx->vmcs == NULL )
466 return;
468 vmx_clear_vmcs(v);
470 vmx_free_vmcs(arch_vmx->vmcs);
471 arch_vmx->vmcs = NULL;
472 }
474 void vm_launch_fail(unsigned long eflags)
475 {
476 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
477 printk("<vm_launch_fail> error code %lx\n", error);
478 domain_crash_synchronous();
479 }
481 void vm_resume_fail(unsigned long eflags)
482 {
483 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
484 printk("<vm_resume_fail> error code %lx\n", error);
485 domain_crash_synchronous();
486 }
488 void arch_vmx_do_resume(struct vcpu *v)
489 {
490 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
491 {
492 vmx_load_vmcs(v);
493 }
494 else
495 {
496 vmx_clear_vmcs(v);
497 vmx_load_vmcs(v);
498 hvm_migrate_timers(v);
499 vmx_set_host_env(v);
500 }
502 hvm_do_resume(v);
503 reset_stack_and_jump(vmx_asm_do_vmentry);
504 }
506 /* Dump a section of VMCS */
507 static void print_section(char *header, uint32_t start,
508 uint32_t end, int incr)
509 {
510 uint32_t addr, j;
511 unsigned long val;
512 int code, rc;
513 char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
514 char *err[4] = {"------ ", "------------------ ",
515 "---------- ", "------------------ "};
517 /* Find width of the field (encoded in bits 14:13 of address) */
518 code = (start>>13)&3;
520 if (header)
521 printk("\t %s", header);
523 for (addr=start, j=0; addr<=end; addr+=incr, j++) {
525 if (!(j&3))
526 printk("\n\t\t0x%08x: ", addr);
528 val = __vmread_safe(addr, &rc);
529 if (rc == 0)
530 printk(fmt[code], val);
531 else
532 printk("%s", err[code]);
533 }
535 printk("\n");
536 }
538 /* Dump current VMCS */
539 void vmcs_dump_vcpu(void)
540 {
541 print_section("16-bit Guest-State Fields", 0x800, 0x80e, 2);
542 print_section("16-bit Host-State Fields", 0xc00, 0xc0c, 2);
543 print_section("64-bit Control Fields", 0x2000, 0x2013, 1);
544 print_section("64-bit Guest-State Fields", 0x2800, 0x2803, 1);
545 print_section("32-bit Control Fields", 0x4000, 0x401c, 2);
546 print_section("32-bit RO Data Fields", 0x4400, 0x440e, 2);
547 print_section("32-bit Guest-State Fields", 0x4800, 0x482a, 2);
548 print_section("32-bit Host-State Fields", 0x4c00, 0x4c00, 2);
549 print_section("Natural 64-bit Control Fields", 0x6000, 0x600e, 2);
550 print_section("64-bit RO Data Fields", 0x6400, 0x640A, 2);
551 print_section("Natural 64-bit Guest-State Fields", 0x6800, 0x6826, 2);
552 print_section("Natural 64-bit Host-State Fields", 0x6c00, 0x6c16, 2);
553 }
556 static void vmcs_dump(unsigned char ch)
557 {
558 struct domain *d;
559 struct vcpu *v;
561 printk("*********** VMCS Areas **************\n");
562 for_each_domain ( d )
563 {
564 if ( !is_hvm_domain(d) )
565 continue;
566 printk("\n>>> Domain %d <<<\n", d->domain_id);
567 for_each_vcpu ( d, v )
568 {
569 printk("\tVCPU %d\n", v->vcpu_id);
570 vmx_vmcs_enter(v);
571 vmcs_dump_vcpu();
572 vmx_vmcs_exit(v);
573 }
574 }
576 printk("**************************************\n");
577 }
579 void setup_vmcs_dump(void)
580 {
581 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
582 }
585 /*
586 * Local variables:
587 * mode: C
588 * c-set-style: "BSD"
589 * c-basic-offset: 4
590 * tab-width: 4
591 * indent-tabs-mode: nil
592 * End:
593 */