debuggers.hg
changeset 14699:9a839ead4870
Enable VMX MSR bitmap support.
We use it to avoid VMExits on FS_BASE and GS_BASE MSR accesses.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
We use it to avoid VMExits on FS_BASE and GS_BASE MSR accesses.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Thu Mar 29 19:18:43 2007 +0100 (2007-03-29) |
parents | 704151d0e219 |
children | 4a240d458db9 |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/vmx/vmcs.c xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/msr.h |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Thu Mar 29 18:26:12 2007 +0100 1.2 +++ b/xen/arch/x86/hvm/hvm.c Thu Mar 29 19:18:43 2007 +0100 1.3 @@ -59,6 +59,9 @@ struct hvm_function_table hvm_funcs __re 1.4 /* I/O permission bitmap is globally shared by all HVM guests. */ 1.5 char __attribute__ ((__section__ (".bss.page_aligned"))) 1.6 hvm_io_bitmap[3*PAGE_SIZE]; 1.7 +/* MSR permission bitmap is globally shared by all HVM guests. */ 1.8 +char __attribute__ ((__section__ (".bss.page_aligned"))) 1.9 + hvm_msr_bitmap[PAGE_SIZE]; 1.10 1.11 void hvm_enable(struct hvm_function_table *fns) 1.12 { 1.13 @@ -72,6 +75,9 @@ void hvm_enable(struct hvm_function_tabl 1.14 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap)); 1.15 clear_bit(0x80, hvm_io_bitmap); 1.16 1.17 + /* All MSR accesses are intercepted by default. */ 1.18 + memset(hvm_msr_bitmap, ~0, sizeof(hvm_msr_bitmap)); 1.19 + 1.20 hvm_funcs = *fns; 1.21 hvm_enabled = 1; 1.22 }
2.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Thu Mar 29 18:26:12 2007 +0100 2.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Thu Mar 29 19:18:43 2007 +0100 2.3 @@ -61,6 +61,25 @@ static u32 adjust_vmx_controls(u32 ctl_m 2.4 return ctl; 2.5 } 2.6 2.7 +static void disable_intercept_for_msr(u32 msr) 2.8 +{ 2.9 + /* 2.10 + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). 2.11 + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 2.12 + */ 2.13 + if ( msr <= 0x1fff ) 2.14 + { 2.15 + __clear_bit(msr, hvm_msr_bitmap + 0x000); /* read-low */ 2.16 + __clear_bit(msr, hvm_msr_bitmap + 0x400); /* write-low */ 2.17 + } 2.18 + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) 2.19 + { 2.20 + msr &= 0x1fff; 2.21 + __clear_bit(msr, hvm_msr_bitmap + 0x800); /* read-high */ 2.22 + __clear_bit(msr, hvm_msr_bitmap + 0xc00); /* write-high */ 2.23 + } 2.24 +} 2.25 + 2.26 void vmx_init_vmcs_config(void) 2.27 { 2.28 u32 vmx_msr_low, vmx_msr_high, min, max; 2.29 @@ -82,6 +101,7 @@ void vmx_init_vmcs_config(void) 2.30 #ifdef __x86_64__ 2.31 min = max |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING; 2.32 #endif 2.33 + max |= CPU_BASED_ACTIVATE_MSR_BITMAP; 2.34 _vmx_cpu_based_exec_control = adjust_vmx_controls( 2.35 min, max, MSR_IA32_VMX_PROCBASED_CTLS_MSR); 2.36 2.37 @@ -105,6 +125,9 @@ void vmx_init_vmcs_config(void) 2.38 vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control; 2.39 vmx_vmexit_control = _vmx_vmexit_control; 2.40 vmx_vmentry_control = _vmx_vmentry_control; 2.41 + 2.42 + disable_intercept_for_msr(MSR_FS_BASE); 2.43 + disable_intercept_for_msr(MSR_GS_BASE); 2.44 } 2.45 else 2.46 { 2.47 @@ -287,6 +310,9 @@ static void construct_vmcs(struct vcpu * 2.48 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control); 2.49 v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control; 2.50 2.51 + if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP ) 2.52 + __vmwrite(MSR_BITMAP, virt_to_maddr(hvm_msr_bitmap)); 2.53 + 2.54 /* I/O access bitmap. */ 2.55 __vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap)); 2.56 __vmwrite(IO_BITMAP_B, virt_to_maddr(hvm_io_bitmap + PAGE_SIZE));
3.1 --- a/xen/include/asm-x86/hvm/support.h Thu Mar 29 18:26:12 2007 +0100 3.2 +++ b/xen/include/asm-x86/hvm/support.h Thu Mar 29 19:18:43 2007 +0100 3.3 @@ -215,6 +215,7 @@ int hvm_load(struct domain *d, hvm_domai 3.4 /* End of save/restore */ 3.5 3.6 extern char hvm_io_bitmap[]; 3.7 +extern char hvm_msr_bitmap[]; 3.8 extern int hvm_enabled; 3.9 3.10 void hvm_enable(struct hvm_function_table *);
4.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Mar 29 18:26:12 2007 +0100 4.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Mar 29 19:18:43 2007 +0100 4.3 @@ -109,6 +109,7 @@ extern int vmcs_version; 4.4 #define CPU_BASED_MOV_DR_EXITING 0x00800000 4.5 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 4.6 #define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 4.7 +#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000 4.8 #define CPU_BASED_MONITOR_EXITING 0x20000000 4.9 #define CPU_BASED_PAUSE_EXITING 0x40000000 4.10 4.11 @@ -143,6 +144,8 @@ enum vmcs_field { 4.12 IO_BITMAP_A_HIGH = 0x00002001, 4.13 IO_BITMAP_B = 0x00002002, 4.14 IO_BITMAP_B_HIGH = 0x00002003, 4.15 + MSR_BITMAP = 0x00002004, 4.16 + MSR_BITMAP_HIGH = 0x00002005, 4.17 VM_EXIT_MSR_STORE_ADDR = 0x00002006, 4.18 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, 4.19 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
5.1 --- a/xen/include/asm-x86/msr.h Thu Mar 29 18:26:12 2007 +0100 5.2 +++ b/xen/include/asm-x86/msr.h Thu Mar 29 19:18:43 2007 +0100 5.3 @@ -126,8 +126,8 @@ static inline void wrmsrl(unsigned int m 5.4 #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ 5.5 #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ 5.6 #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ 5.7 -#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */ 5.8 -#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */ 5.9 +#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ 5.10 +#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ 5.11 #define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ 5.12 /* EFER bits: */ 5.13 #define _EFER_SCE 0 /* SYSCALL/SYSRET */