xcp-1.6-updates/xen-4.1.hg

changeset 23280:4ad262a48a71

HVM/SVM: enable tsc scaling ratio for SVM

Future AMD CPUs support TSC scaling. It allows guests to have a
different TSC frequency from host system using this formula: guest_tsc
= host_tsc * tsc_ratio + vmcb_offset. The tsc_ratio is a 64bit MSR
contains a fixed-point number in 8.32 format (8 bits for integer part
and 32bits for fractional part). For instance 0x00000003_80000000
means tsc_ratio=3.5.

This patch enables TSC scaling ratio for SVM. With it, guest VMs don't
need take #VMEXIT to calculate a translated TSC value when it is
running under TSC emulation mode. This can substancially reduce the
rdtsc overhead.

Signed-off-by: Wei Huang <wei.huang2@amd.com>
xen-unstable changeset: 23437:d7c755c25bb9
xen-unstable date: Sat May 28 08:58:08 2011 +0100
author Wei Huang <wei.huang2@amd.com>
date Thu Apr 12 09:13:14 2012 +0100 (2012-04-12)
parents 7d9df818d302
children 00881b29bfe2
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/include/asm-x86/hvm/svm/svm.h xen/include/asm-x86/msr-index.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Apr 12 09:08:13 2012 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Apr 12 09:13:14 2012 +0100
     1.3 @@ -588,6 +588,22 @@ static void svm_set_segment_register(str
     1.4  static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
     1.5  {
     1.6      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     1.7 +    struct domain *d = v->domain;
     1.8 +
     1.9 +    /* Re-adjust the offset value when TSC_RATIO is available */
    1.10 +    if ( cpu_has_tsc_ratio && d->arch.vtsc )
    1.11 +    {
    1.12 +        uint64_t host_tsc, guest_tsc;
    1.13 +
    1.14 +        rdtscll(host_tsc);
    1.15 +        guest_tsc = hvm_get_guest_tsc(v);
    1.16 +            
    1.17 +        /* calculate hi,lo parts in 64bits to prevent overflow */
    1.18 +        offset = (((host_tsc >> 32) * d->arch.tsc_khz / cpu_khz) << 32) +
    1.19 +            (host_tsc & 0xffffffffULL) * d->arch.tsc_khz / cpu_khz;
    1.20 +        offset = guest_tsc - offset;
    1.21 +    }
    1.22 +
    1.23      vmcb_set_tsc_offset(vmcb, offset);
    1.24  }
    1.25  
    1.26 @@ -638,6 +654,19 @@ static void svm_init_hypercall_page(stru
    1.27      *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
    1.28  }
    1.29  
    1.30 +static inline void svm_tsc_ratio_save(struct vcpu *v)
    1.31 +{
    1.32 +    /* Other vcpus might not have vtsc enabled. So disable TSC_RATIO here. */
    1.33 +    if ( cpu_has_tsc_ratio && v->domain->arch.vtsc )
    1.34 +        wrmsrl(MSR_AMD64_TSC_RATIO, DEFAULT_TSC_RATIO);
    1.35 +}
    1.36 +
    1.37 +static inline void svm_tsc_ratio_load(struct vcpu *v)
    1.38 +{
    1.39 +    if ( cpu_has_tsc_ratio && v->domain->arch.vtsc ) 
    1.40 +        wrmsrl(MSR_AMD64_TSC_RATIO, vcpu_tsc_ratio(v));
    1.41 +}
    1.42 +
    1.43  static void svm_ctxt_switch_from(struct vcpu *v)
    1.44  {
    1.45      int cpu = smp_processor_id();
    1.46 @@ -646,6 +675,7 @@ static void svm_ctxt_switch_from(struct 
    1.47  
    1.48      svm_save_dr(v);
    1.49      vpmu_save(v);
    1.50 +    svm_tsc_ratio_save(v);
    1.51  
    1.52      svm_sync_vmcb(v);
    1.53      svm_vmload(per_cpu(root_vmcb, cpu));
    1.54 @@ -689,6 +719,7 @@ static void svm_ctxt_switch_to(struct vc
    1.55      svm_vmload(vmcb);
    1.56      vmcb->cleanbits.bytes = 0;
    1.57      vpmu_load(v);
    1.58 +    svm_tsc_ratio_load(v);
    1.59  
    1.60      if ( cpu_has_rdtscp )
    1.61          wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
     2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu Apr 12 09:08:13 2012 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu Apr 12 09:13:14 2012 +0100
     2.3 @@ -165,7 +165,9 @@ static int construct_vmcb(struct vcpu *v
     2.4  
     2.5      /* TSC. */
     2.6      vmcb->_tsc_offset = 0;
     2.7 -    if ( v->domain->arch.vtsc )
     2.8 +
     2.9 +    /* Don't need to intercept RDTSC if CPU supports TSC rate scaling */
    2.10 +    if ( v->domain->arch.vtsc && !cpu_has_tsc_ratio )
    2.11      {
    2.12          vmcb->_general1_intercepts |= GENERAL1_INTERCEPT_RDTSC;
    2.13          vmcb->_general2_intercepts |= GENERAL2_INTERCEPT_RDTSCP;
     3.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Thu Apr 12 09:08:13 2012 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Thu Apr 12 09:13:14 2012 +0100
     3.3 @@ -82,5 +82,13 @@ extern u32 svm_feature_flags;
     3.4  #define cpu_has_svm_cleanbits cpu_has_svm_feature(SVM_FEATURE_VMCBCLEAN)
     3.5  #define cpu_has_svm_decode    cpu_has_svm_feature(SVM_FEATURE_DECODEASSISTS)
     3.6  #define cpu_has_pause_filter  cpu_has_svm_feature(SVM_FEATURE_PAUSEFILTER)
     3.7 +#define cpu_has_tsc_ratio     cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR)
     3.8 +
     3.9 +/* TSC rate */
    3.10 +#define DEFAULT_TSC_RATIO       0x0000000100000000ULL
    3.11 +#define TSC_RATIO_RSVD_BITS     0xffffff0000000000ULL
    3.12 +#define TSC_RATIO(g_khz, h_khz) ( (((u64)(g_khz)<<32)/(u64)(h_khz)) & \
    3.13 +                                  ~TSC_RATIO_RSVD_BITS )
    3.14 +#define vcpu_tsc_ratio(v)       TSC_RATIO((v)->domain->arch.tsc_khz, cpu_khz)
    3.15  
    3.16  #endif /* __ASM_X86_HVM_SVM_H__ */
     4.1 --- a/xen/include/asm-x86/msr-index.h	Thu Apr 12 09:08:13 2012 +0100
     4.2 +++ b/xen/include/asm-x86/msr-index.h	Thu Apr 12 09:13:14 2012 +0100
     4.3 @@ -265,6 +265,9 @@
     4.4  #define MSR_AMD_PATCHLEVEL		0x0000008b
     4.5  #define MSR_AMD_PATCHLOADER		0xc0010020
     4.6  
     4.7 +/* AMD TSC RATE MSR */
     4.8 +#define MSR_AMD64_TSC_RATIO		0xc0000104
     4.9 +
    4.10  /* AMD OS Visible Workaround MSRs */
    4.11  #define MSR_AMD_OSVW_ID_LENGTH          0xc0010140
    4.12  #define MSR_AMD_OSVW_STATUS             0xc0010141