debuggers.hg
changeset 16422:e82fb0729b51
vmx: wbinvd optimization for pass-through domain.
Optimise wbinvd exit emulation for pass-through domains to avoid
"always wbinvd" when a VCPU is migrated. Instead, do host wbinvd on
all host CPUs when wbinvd exit.
Signed-off-by Yaozu (Eddie) Dong <eddie.dong@intel.com>
Optimise wbinvd exit emulation for pass-through domains to avoid
"always wbinvd" when a VCPU is migrated. Instead, do host wbinvd on
all host CPUs when wbinvd exit.
Signed-off-by Yaozu (Eddie) Dong <eddie.dong@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Nov 16 16:36:38 2007 +0000 (2007-11-16) |
parents | ef4b60c99735 |
children | 1ad85cdcca3d |
files | xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmcs.h |
line diff
1.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Fri Nov 16 16:22:00 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Fri Nov 16 16:36:38 2007 +0000 1.3 @@ -763,7 +763,7 @@ void vm_resume_fail(unsigned long eflags 1.4 domain_crash_synchronous(); 1.5 } 1.6 1.7 -static void flush_cache(void *info) 1.8 +static void wbinvd_ipi(void *info) 1.9 { 1.10 wbinvd(); 1.11 } 1.12 @@ -779,16 +779,21 @@ void vmx_do_resume(struct vcpu *v) 1.13 } 1.14 else 1.15 { 1.16 - /* For pass-through domain, guest PCI-E device driver may leverage the 1.17 - * "Non-Snoop" I/O, and explicitly "WBINVD" or "CFLUSH" to a RAM space. 1.18 - * In that case, if migration occurs before "WBINVD" or "CFLUSH", need 1.19 - * to maintain data consistency. 1.20 + /* 1.21 + * For pass-through domain, guest PCI-E device driver may leverage the 1.22 + * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space. 1.23 + * Since migration may occur before WBINVD or CLFLUSH, we need to 1.24 + * maintain data consistency either by: 1.25 + * 1: flushing cache (wbinvd) when the guest is scheduled out if 1.26 + * there is no wbinvd exit, or 1.27 + * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits. 1.28 */ 1.29 - if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) 1.30 + if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) && 1.31 + !cpu_has_wbinvd_exiting ) 1.32 { 1.33 int cpu = v->arch.hvm_vmx.active_cpu; 1.34 if ( cpu != -1 ) 1.35 - on_selected_cpus(cpumask_of_cpu(cpu), flush_cache, NULL, 1, 1); 1.36 + on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1); 1.37 } 1.38 1.39 vmx_clear_vmcs(v);
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Nov 16 16:22:00 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Nov 16 16:36:38 2007 +0000 2.3 @@ -2638,6 +2638,11 @@ static void vmx_do_extint(struct cpu_use 2.4 } 2.5 } 2.6 2.7 +static void wbinvd_ipi(void *info) 2.8 +{ 2.9 + wbinvd(); 2.10 +} 2.11 + 2.12 static void vmx_failed_vmentry(unsigned int exit_reason, 2.13 struct cpu_user_regs *regs) 2.14 { 2.15 @@ -2913,14 +2918,21 @@ asmlinkage void vmx_vmexit_handler(struc 2.16 __update_guest_eip(inst_len); 2.17 if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) 2.18 { 2.19 - wbinvd(); 2.20 - /* Disable further WBINVD intercepts. */ 2.21 - if ( (exit_reason == EXIT_REASON_WBINVD) && 2.22 - (vmx_cpu_based_exec_control & 2.23 - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ) 2.24 - __vmwrite(SECONDARY_VM_EXEC_CONTROL, 2.25 - vmx_secondary_exec_control & 2.26 - ~SECONDARY_EXEC_WBINVD_EXITING); 2.27 + if ( cpu_has_wbinvd_exiting ) 2.28 + { 2.29 + on_each_cpu(wbinvd_ipi, NULL, 1, 1); 2.30 + } 2.31 + else 2.32 + { 2.33 + wbinvd(); 2.34 + /* Disable further WBINVD intercepts. */ 2.35 + if ( (exit_reason == EXIT_REASON_WBINVD) && 2.36 + (vmx_cpu_based_exec_control & 2.37 + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ) 2.38 + __vmwrite(SECONDARY_VM_EXEC_CONTROL, 2.39 + vmx_secondary_exec_control & 2.40 + ~SECONDARY_EXEC_WBINVD_EXITING); 2.41 + } 2.42 } 2.43 break; 2.44 }
3.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Fri Nov 16 16:22:00 2007 +0000 3.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Fri Nov 16 16:36:38 2007 +0000 3.3 @@ -136,6 +136,8 @@ extern u32 vmx_secondary_exec_control; 3.4 3.5 extern bool_t cpu_has_vmx_ins_outs_instr_info; 3.6 3.7 +#define cpu_has_wbinvd_exiting \ 3.8 + (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING) 3.9 #define cpu_has_vmx_virtualize_apic_accesses \ 3.10 (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 3.11 #define cpu_has_vmx_tpr_shadow \