xcp-1.6-updates/xen-4.1.hg

changeset 23279:7d9df818d302

hvm: vpmu: Add support for AMD Family 15h processors

AMD Family 15h CPU mirrors legacy K7 performance monitor counters to
a new location, and adds 2 new counters. This patch updates HVM VPMU
to take advantage of the new counters.

Signed-off-by: Jacob Shin <jacob.shin@amd.com>
xen-unstable changeset: 23306:e787d4f2e5ac
xen-unstable date: Mon May 09 09:54:46 2011 +0100


xenoprof: Add support for AMD Family 15h processors

AMD Family 15h CPU mirrors legacy K7 performance monitor counters to
a new location, and adds 2 new counters. This patch updates xenoprof
to take advantage of the new counters.

Signed-off-by: Jacob Shin <jacob.shin@amd.com>

Rename fam15h -> amd_fam15h in a few places, as suggested by Jan
Beulich.

Signed-off-by: Keir Fraser <keir@xen.org>
xen-unstable changeset: 23305:014ee4e09644
xen-unstable date: Mon May 09 09:53:07 2011 +0100


xenoprof: Update cpu_type to sync with upstream oprofile

Update xenoprof's cpu_type to match upstream oprofile. Currently AMD
Family 11h ~ Family 15h are broken due to string mismatches.

Signed-off-by: Jacob Shin <jacob.shin@amd.com>
xen-unstable changeset: 23304:8981b582be3e
xen-unstable date: Mon May 09 09:49:14 2011 +0100
author Jacob Shin <jacob.shin@amd.com>
date Thu Apr 12 09:08:13 2012 +0100 (2012-04-12)
parents 0aa6bc8f38a9
children 4ad262a48a71
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vpmu.c xen/arch/x86/hvm/vpmu.c xen/arch/x86/oprofile/nmi_int.c xen/arch/x86/oprofile/op_model_athlon.c xen/arch/x86/oprofile/op_x86_model.h xen/include/asm-x86/msr-index.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Apr 12 09:06:02 2012 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Apr 12 09:08:13 2012 +0100
     1.3 @@ -1142,6 +1142,18 @@ static int svm_msr_read_intercept(unsign
     1.4      case MSR_K7_EVNTSEL1:
     1.5      case MSR_K7_EVNTSEL2:
     1.6      case MSR_K7_EVNTSEL3:
     1.7 +    case MSR_AMD_FAM15H_PERFCTR0:
     1.8 +    case MSR_AMD_FAM15H_PERFCTR1:
     1.9 +    case MSR_AMD_FAM15H_PERFCTR2:
    1.10 +    case MSR_AMD_FAM15H_PERFCTR3:
    1.11 +    case MSR_AMD_FAM15H_PERFCTR4:
    1.12 +    case MSR_AMD_FAM15H_PERFCTR5:
    1.13 +    case MSR_AMD_FAM15H_EVNTSEL0:
    1.14 +    case MSR_AMD_FAM15H_EVNTSEL1:
    1.15 +    case MSR_AMD_FAM15H_EVNTSEL2:
    1.16 +    case MSR_AMD_FAM15H_EVNTSEL3:
    1.17 +    case MSR_AMD_FAM15H_EVNTSEL4:
    1.18 +    case MSR_AMD_FAM15H_EVNTSEL5:
    1.19          vpmu_do_rdmsr(msr, msr_content);
    1.20          break;
    1.21  
    1.22 @@ -1237,6 +1249,18 @@ static int svm_msr_write_intercept(unsig
    1.23      case MSR_K7_EVNTSEL1:
    1.24      case MSR_K7_EVNTSEL2:
    1.25      case MSR_K7_EVNTSEL3:
    1.26 +    case MSR_AMD_FAM15H_PERFCTR0:
    1.27 +    case MSR_AMD_FAM15H_PERFCTR1:
    1.28 +    case MSR_AMD_FAM15H_PERFCTR2:
    1.29 +    case MSR_AMD_FAM15H_PERFCTR3:
    1.30 +    case MSR_AMD_FAM15H_PERFCTR4:
    1.31 +    case MSR_AMD_FAM15H_PERFCTR5:
    1.32 +    case MSR_AMD_FAM15H_EVNTSEL0:
    1.33 +    case MSR_AMD_FAM15H_EVNTSEL1:
    1.34 +    case MSR_AMD_FAM15H_EVNTSEL2:
    1.35 +    case MSR_AMD_FAM15H_EVNTSEL3:
    1.36 +    case MSR_AMD_FAM15H_EVNTSEL4:
    1.37 +    case MSR_AMD_FAM15H_EVNTSEL5:
    1.38          vpmu_do_wrmsr(msr, msr_content);
    1.39          break;
    1.40  
     2.1 --- a/xen/arch/x86/hvm/svm/vpmu.c	Thu Apr 12 09:06:02 2012 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/vpmu.c	Thu Apr 12 09:08:13 2012 +0100
     2.3 @@ -36,7 +36,9 @@
     2.4  #include <public/hvm/save.h>
     2.5  #include <asm/hvm/vpmu.h>
     2.6  
     2.7 -#define NUM_COUNTERS 4
     2.8 +#define F10H_NUM_COUNTERS 4
     2.9 +#define F15H_NUM_COUNTERS 6
    2.10 +#define MAX_NUM_COUNTERS F15H_NUM_COUNTERS
    2.11  
    2.12  #define MSR_F10H_EVNTSEL_GO_SHIFT   40
    2.13  #define MSR_F10H_EVNTSEL_EN_SHIFT   22
    2.14 @@ -47,6 +49,11 @@
    2.15  #define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
    2.16  #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1))))
    2.17  
    2.18 +static int __read_mostly num_counters = 0;
    2.19 +static u32 __read_mostly *counters = NULL;
    2.20 +static u32 __read_mostly *ctrls = NULL;
    2.21 +static bool_t __read_mostly k7_counters_mirrored = 0;
    2.22 +
    2.23  /* PMU Counter MSRs. */
    2.24  u32 AMD_F10H_COUNTERS[] = {
    2.25      MSR_K7_PERFCTR0,
    2.26 @@ -63,10 +70,28 @@ u32 AMD_F10H_CTRLS[] = {
    2.27      MSR_K7_EVNTSEL3
    2.28  };
    2.29  
    2.30 +u32 AMD_F15H_COUNTERS[] = {
    2.31 +    MSR_AMD_FAM15H_PERFCTR0,
    2.32 +    MSR_AMD_FAM15H_PERFCTR1,
    2.33 +    MSR_AMD_FAM15H_PERFCTR2,
    2.34 +    MSR_AMD_FAM15H_PERFCTR3,
    2.35 +    MSR_AMD_FAM15H_PERFCTR4,
    2.36 +    MSR_AMD_FAM15H_PERFCTR5
    2.37 +};
    2.38 +
    2.39 +u32 AMD_F15H_CTRLS[] = {
    2.40 +    MSR_AMD_FAM15H_EVNTSEL0,
    2.41 +    MSR_AMD_FAM15H_EVNTSEL1,
    2.42 +    MSR_AMD_FAM15H_EVNTSEL2,
    2.43 +    MSR_AMD_FAM15H_EVNTSEL3,
    2.44 +    MSR_AMD_FAM15H_EVNTSEL4,
    2.45 +    MSR_AMD_FAM15H_EVNTSEL5
    2.46 +};
    2.47 +
    2.48  /* storage for context switching */
    2.49  struct amd_vpmu_context {
    2.50 -    u64 counters[NUM_COUNTERS];
    2.51 -    u64 ctrls[NUM_COUNTERS];
    2.52 +    u64 counters[MAX_NUM_COUNTERS];
    2.53 +    u64 ctrls[MAX_NUM_COUNTERS];
    2.54      u32 hw_lapic_lvtpc;
    2.55  };
    2.56  
    2.57 @@ -78,10 +103,45 @@ static inline int get_pmu_reg_type(u32 a
    2.58      if ( (addr >= MSR_K7_PERFCTR0) && (addr <= MSR_K7_PERFCTR3) )
    2.59          return MSR_TYPE_COUNTER;
    2.60  
    2.61 +    if ( (addr >= MSR_AMD_FAM15H_EVNTSEL0) &&
    2.62 +         (addr <= MSR_AMD_FAM15H_PERFCTR5 ) )
    2.63 +    {
    2.64 +        if (addr & 1)
    2.65 +            return MSR_TYPE_COUNTER;
    2.66 +        else
    2.67 +            return MSR_TYPE_CTRL;
    2.68 +    }
    2.69 +
    2.70      /* unsupported registers */
    2.71      return -1;
    2.72  }
    2.73  
    2.74 +static inline u32 get_fam15h_addr(u32 addr)
    2.75 +{
    2.76 +    switch ( addr )
    2.77 +    {
    2.78 +    case MSR_K7_PERFCTR0:
    2.79 +        return MSR_AMD_FAM15H_PERFCTR0;
    2.80 +    case MSR_K7_PERFCTR1:
    2.81 +        return MSR_AMD_FAM15H_PERFCTR1;
    2.82 +    case MSR_K7_PERFCTR2:
    2.83 +        return MSR_AMD_FAM15H_PERFCTR2;
    2.84 +    case MSR_K7_PERFCTR3:
    2.85 +        return MSR_AMD_FAM15H_PERFCTR3;
    2.86 +    case MSR_K7_EVNTSEL0:
    2.87 +        return MSR_AMD_FAM15H_EVNTSEL0;
    2.88 +    case MSR_K7_EVNTSEL1:
    2.89 +        return MSR_AMD_FAM15H_EVNTSEL1;
    2.90 +    case MSR_K7_EVNTSEL2:
    2.91 +        return MSR_AMD_FAM15H_EVNTSEL2;
    2.92 +    case MSR_K7_EVNTSEL3:
    2.93 +        return MSR_AMD_FAM15H_EVNTSEL3;
    2.94 +    default:
    2.95 +        break;
    2.96 +    }
    2.97 +
    2.98 +    return addr;
    2.99 +}
   2.100  
   2.101  static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
   2.102  {
   2.103 @@ -110,12 +170,12 @@ static inline void context_restore(struc
   2.104      struct vpmu_struct *vpmu = vcpu_vpmu(v);
   2.105      struct amd_vpmu_context *ctxt = vpmu->context;
   2.106  
   2.107 -    for ( i = 0; i < NUM_COUNTERS; i++ )
   2.108 -        wrmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]);
   2.109 +    for ( i = 0; i < num_counters; i++ )
   2.110 +        wrmsrl(ctrls[i], ctxt->ctrls[i]);
   2.111  
   2.112 -    for ( i = 0; i < NUM_COUNTERS; i++ )
   2.113 +    for ( i = 0; i < num_counters; i++ )
   2.114      {
   2.115 -        wrmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]);
   2.116 +        wrmsrl(counters[i], ctxt->counters[i]);
   2.117  
   2.118          /* Force an interrupt to allow guest reset the counter,
   2.119          if the value is positive */
   2.120 @@ -147,11 +207,11 @@ static inline void context_save(struct v
   2.121      struct vpmu_struct *vpmu = vcpu_vpmu(v);
   2.122      struct amd_vpmu_context *ctxt = vpmu->context;
   2.123  
   2.124 -    for ( i = 0; i < NUM_COUNTERS; i++ )
   2.125 -        rdmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]);
   2.126 +    for ( i = 0; i < num_counters; i++ )
   2.127 +        rdmsrl(counters[i], ctxt->counters[i]);
   2.128  
   2.129 -    for ( i = 0; i < NUM_COUNTERS; i++ )
   2.130 -        rdmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]);
   2.131 +    for ( i = 0; i < num_counters; i++ )
   2.132 +        rdmsrl(ctrls[i], ctxt->ctrls[i]);
   2.133  }
   2.134  
   2.135  static void amd_vpmu_save(struct vcpu *v)
   2.136 @@ -175,12 +235,18 @@ static void context_update(unsigned int 
   2.137      struct vpmu_struct *vpmu = vcpu_vpmu(v);
   2.138      struct amd_vpmu_context *ctxt = vpmu->context;
   2.139  
   2.140 -    for ( i = 0; i < NUM_COUNTERS; i++ )
   2.141 -        if ( msr == AMD_F10H_COUNTERS[i] )
   2.142 +    if ( k7_counters_mirrored &&
   2.143 +        ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
   2.144 +    {
   2.145 +        msr = get_fam15h_addr(msr);
   2.146 +    }
   2.147 +
   2.148 +    for ( i = 0; i < num_counters; i++ )
   2.149 +        if ( msr == counters[i] )
   2.150              ctxt->counters[i] = msr_content;
   2.151  
   2.152 -    for ( i = 0; i < NUM_COUNTERS; i++ )
   2.153 -        if ( msr == AMD_F10H_CTRLS[i] )
   2.154 +    for ( i = 0; i < num_counters; i++ )
   2.155 +        if ( msr == ctrls[i] )
   2.156              ctxt->ctrls[i] = msr_content;
   2.157  
   2.158      ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
   2.159 @@ -235,10 +301,31 @@ static void amd_vpmu_initialise(struct v
   2.160  {
   2.161      struct amd_vpmu_context *ctxt = NULL;
   2.162      struct vpmu_struct *vpmu = vcpu_vpmu(v);
   2.163 +    __u8 family = current_cpu_data.x86;
   2.164  
   2.165      if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
   2.166          return;
   2.167  
   2.168 +    if ( counters == NULL )
   2.169 +    {
   2.170 +         switch ( family )
   2.171 +	 {
   2.172 +	 case 0x15:
   2.173 +	     num_counters = F15H_NUM_COUNTERS;
   2.174 +	     counters = AMD_F15H_COUNTERS;
   2.175 +	     ctrls = AMD_F15H_CTRLS;
   2.176 +	     k7_counters_mirrored = 1;
   2.177 +	     break;
   2.178 +	 case 0x10:
   2.179 +	 default:
   2.180 +	     num_counters = F10H_NUM_COUNTERS;
   2.181 +	     counters = AMD_F10H_COUNTERS;
   2.182 +	     ctrls = AMD_F10H_CTRLS;
   2.183 +	     k7_counters_mirrored = 0;
   2.184 +	     break;
   2.185 +	 }
   2.186 +    }
   2.187 +
   2.188      ctxt = xmalloc_bytes(sizeof(struct amd_vpmu_context));
   2.189  
   2.190      if ( !ctxt )
     3.1 --- a/xen/arch/x86/hvm/vpmu.c	Thu Apr 12 09:06:02 2012 +0100
     3.2 +++ b/xen/arch/x86/hvm/vpmu.c	Thu Apr 12 09:08:13 2012 +0100
     3.3 @@ -101,6 +101,7 @@ void vpmu_initialise(struct vcpu *v)
     3.4          switch ( family )
     3.5          {
     3.6          case 0x10:
     3.7 +        case 0x15:
     3.8              vpmu->arch_vpmu_ops = &amd_vpmu_ops;
     3.9              break;
    3.10          default:
     4.1 --- a/xen/arch/x86/oprofile/nmi_int.c	Thu Apr 12 09:06:02 2012 +0100
     4.2 +++ b/xen/arch/x86/oprofile/nmi_int.c	Thu Apr 12 09:08:13 2012 +0100
     4.3 @@ -30,7 +30,7 @@
     4.4  struct op_counter_config counter_config[OP_MAX_COUNTER];
     4.5  struct op_ibs_config ibs_config;
     4.6  
     4.7 -static struct op_x86_model_spec const *__read_mostly model;
     4.8 +struct op_x86_model_spec const *__read_mostly model;
     4.9  static struct op_msrs cpu_msrs[NR_CPUS];
    4.10  static unsigned long saved_lvtpc[NR_CPUS];
    4.11  
    4.12 @@ -435,19 +435,19 @@ static int __init nmi_init(void)
    4.13  				break;
    4.14  			case 0x11:
    4.15  				model = &op_athlon_spec;
    4.16 -				cpu_type = "x86-64/family11";
    4.17 +				cpu_type = "x86-64/family11h";
    4.18  				break;
    4.19                          case 0x12:
    4.20  				model = &op_athlon_spec;
    4.21 -				cpu_type = "x86-64/family12";
    4.22 +				cpu_type = "x86-64/family12h";
    4.23  				break;
    4.24  			case 0x14:
    4.25                                  model = &op_athlon_spec;
    4.26 -                                cpu_type = "x86-64/family14";
    4.27 +                                cpu_type = "x86-64/family14h";
    4.28                                  break;
    4.29                          case 0x15:
    4.30 -                                model = &op_athlon_spec;
    4.31 -                                cpu_type = "x86-64/family15";
    4.32 +                                model = &op_amd_fam15h_spec;
    4.33 +                                cpu_type = "x86-64/family15h";
    4.34                                  break;
    4.35  			}
    4.36  			break;
     5.1 --- a/xen/arch/x86/oprofile/op_model_athlon.c	Thu Apr 12 09:06:02 2012 +0100
     5.2 +++ b/xen/arch/x86/oprofile/op_model_athlon.c	Thu Apr 12 09:08:13 2012 +0100
     5.3 @@ -24,8 +24,13 @@
     5.4  #include "op_x86_model.h"
     5.5  #include "op_counter.h"
     5.6  
     5.7 -#define NUM_COUNTERS 4
     5.8 -#define NUM_CONTROLS 4
     5.9 +#define K7_NUM_COUNTERS 4
    5.10 +#define K7_NUM_CONTROLS 4
    5.11 +
    5.12 +#define FAM15H_NUM_COUNTERS 6
    5.13 +#define FAM15H_NUM_CONTROLS 6
    5.14 +
    5.15 +#define MAX_COUNTERS FAM15H_NUM_COUNTERS
    5.16  
    5.17  #define CTR_READ(msr_content,msrs,c) do {rdmsrl(msrs->counters[(c)].addr, (msr_content));} while (0)
    5.18  #define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0)
    5.19 @@ -44,9 +49,10 @@
    5.20  #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 0x1ULL) << 41))
    5.21  #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 0x1ULL) << 40))
    5.22  
    5.23 -static unsigned long reset_value[NUM_COUNTERS];
    5.24 +static unsigned long reset_value[MAX_COUNTERS];
    5.25  
    5.26  extern char svm_stgi_label[];
    5.27 +extern struct op_x86_model_spec const *__read_mostly model;
    5.28  
    5.29  #ifdef CONFIG_X86_64
    5.30  u32 ibs_caps = 0;
    5.31 @@ -175,26 +181,44 @@ static void athlon_fill_in_addresses(str
    5.32  	msrs->controls[3].addr = MSR_K7_EVNTSEL3;
    5.33  }
    5.34  
    5.35 - 
    5.36 +static void fam15h_fill_in_addresses(struct op_msrs * const msrs)
    5.37 +{
    5.38 +	msrs->counters[0].addr = MSR_AMD_FAM15H_PERFCTR0;
    5.39 +	msrs->counters[1].addr = MSR_AMD_FAM15H_PERFCTR1;
    5.40 +	msrs->counters[2].addr = MSR_AMD_FAM15H_PERFCTR2;
    5.41 +	msrs->counters[3].addr = MSR_AMD_FAM15H_PERFCTR3;
    5.42 +	msrs->counters[4].addr = MSR_AMD_FAM15H_PERFCTR4;
    5.43 +	msrs->counters[5].addr = MSR_AMD_FAM15H_PERFCTR5;
    5.44 +
    5.45 +	msrs->controls[0].addr = MSR_AMD_FAM15H_EVNTSEL0;
    5.46 +	msrs->controls[1].addr = MSR_AMD_FAM15H_EVNTSEL1;
    5.47 +	msrs->controls[2].addr = MSR_AMD_FAM15H_EVNTSEL2;
    5.48 +	msrs->controls[3].addr = MSR_AMD_FAM15H_EVNTSEL3;
    5.49 +	msrs->controls[4].addr = MSR_AMD_FAM15H_EVNTSEL4;
    5.50 +	msrs->controls[5].addr = MSR_AMD_FAM15H_EVNTSEL5;
    5.51 +}
    5.52 +
    5.53  static void athlon_setup_ctrs(struct op_msrs const * const msrs)
    5.54  {
    5.55  	uint64_t msr_content;
    5.56  	int i;
    5.57 +	unsigned int const nr_ctrs = model->num_counters;
    5.58 +	unsigned int const nr_ctrls = model->num_controls;
    5.59   
    5.60  	/* clear all counters */
    5.61 -	for (i = 0 ; i < NUM_CONTROLS; ++i) {
    5.62 +	for (i = 0 ; i < nr_ctrls; ++i) {
    5.63  		CTRL_READ(msr_content, msrs, i);
    5.64  		CTRL_CLEAR(msr_content);
    5.65  		CTRL_WRITE(msr_content, msrs, i);
    5.66  	}
    5.67  	
    5.68  	/* avoid a false detection of ctr overflows in NMI handler */
    5.69 -	for (i = 0; i < NUM_COUNTERS; ++i) {
    5.70 +	for (i = 0; i < nr_ctrs; ++i) {
    5.71  		CTR_WRITE(1, msrs, i);
    5.72  	}
    5.73  
    5.74  	/* enable active counters */
    5.75 -	for (i = 0; i < NUM_COUNTERS; ++i) {
    5.76 +	for (i = 0; i < nr_ctrs; ++i) {
    5.77  		if (counter_config[i].enabled) {
    5.78  			reset_value[i] = counter_config[i].count;
    5.79  
    5.80 @@ -300,6 +324,7 @@ static int athlon_check_ctrs(unsigned in
    5.81  	int mode = 0;
    5.82  	struct vcpu *v = current;
    5.83  	struct cpu_user_regs *guest_regs = guest_cpu_user_regs();
    5.84 +	unsigned int const nr_ctrs = model->num_counters;
    5.85  
    5.86  	if (!guest_mode(regs) &&
    5.87  	    (regs->eip == (unsigned long)svm_stgi_label)) {
    5.88 @@ -312,7 +337,7 @@ static int athlon_check_ctrs(unsigned in
    5.89  		mode = xenoprofile_get_mode(v, regs);
    5.90  	}
    5.91  
    5.92 -	for (i = 0 ; i < NUM_COUNTERS; ++i) {
    5.93 +	for (i = 0 ; i < nr_ctrs; ++i) {
    5.94  		CTR_READ(msr_content, msrs, i);
    5.95  		if (CTR_OVERFLOWED(msr_content)) {
    5.96  			xenoprof_log_event(current, regs, eip, mode, i);
    5.97 @@ -373,7 +398,8 @@ static void athlon_start(struct op_msrs 
    5.98  {
    5.99  	uint64_t msr_content;
   5.100  	int i;
   5.101 -	for (i = 0 ; i < NUM_COUNTERS ; ++i) {
   5.102 +	unsigned int const nr_ctrs = model->num_counters;
   5.103 +	for (i = 0 ; i < nr_ctrs ; ++i) {
   5.104  		if (reset_value[i]) {
   5.105  			CTRL_READ(msr_content, msrs, i);
   5.106  			CTRL_SET_ACTIVE(msr_content);
   5.107 @@ -401,10 +427,11 @@ static void athlon_stop(struct op_msrs c
   5.108  {
   5.109  	uint64_t msr_content;
   5.110  	int i;
   5.111 +	unsigned int const nr_ctrs = model->num_counters;
   5.112  
   5.113  	/* Subtle: stop on all counters to avoid race with
   5.114  	 * setting our pm callback */
   5.115 -	for (i = 0 ; i < NUM_COUNTERS ; ++i) {
   5.116 +	for (i = 0 ; i < nr_ctrs ; ++i) {
   5.117  		CTRL_READ(msr_content, msrs, i);
   5.118  		CTRL_SET_INACTIVE(msr_content);
   5.119  		CTRL_WRITE(msr_content, msrs, i);
   5.120 @@ -512,11 +539,21 @@ void __init ibs_init(void)
   5.121  #endif /* CONFIG_X86_64 */
   5.122  
   5.123  struct op_x86_model_spec const op_athlon_spec = {
   5.124 -	.num_counters = NUM_COUNTERS,
   5.125 -	.num_controls = NUM_CONTROLS,
   5.126 +	.num_counters = K7_NUM_COUNTERS,
   5.127 +	.num_controls = K7_NUM_CONTROLS,
   5.128  	.fill_in_addresses = &athlon_fill_in_addresses,
   5.129  	.setup_ctrs = &athlon_setup_ctrs,
   5.130  	.check_ctrs = &athlon_check_ctrs,
   5.131  	.start = &athlon_start,
   5.132  	.stop = &athlon_stop
   5.133  };
   5.134 +
   5.135 +struct op_x86_model_spec const op_amd_fam15h_spec = {
   5.136 +	.num_counters = FAM15H_NUM_COUNTERS,
   5.137 +	.num_controls = FAM15H_NUM_CONTROLS,
   5.138 +	.fill_in_addresses = &fam15h_fill_in_addresses,
   5.139 +	.setup_ctrs = &athlon_setup_ctrs,
   5.140 +	.check_ctrs = &athlon_check_ctrs,
   5.141 +	.start = &athlon_start,
   5.142 +	.stop = &athlon_stop
   5.143 +};
     6.1 --- a/xen/arch/x86/oprofile/op_x86_model.h	Thu Apr 12 09:06:02 2012 +0100
     6.2 +++ b/xen/arch/x86/oprofile/op_x86_model.h	Thu Apr 12 09:08:13 2012 +0100
     6.3 @@ -48,6 +48,7 @@ extern struct op_x86_model_spec op_arch_
     6.4  extern struct op_x86_model_spec const op_p4_spec;
     6.5  extern struct op_x86_model_spec const op_p4_ht2_spec;
     6.6  extern struct op_x86_model_spec const op_athlon_spec;
     6.7 +extern struct op_x86_model_spec const op_amd_fam15h_spec;
     6.8  
     6.9  void arch_perfmon_setup_counters(void);
    6.10  #endif /* OP_X86_MODEL_H */
     7.1 --- a/xen/include/asm-x86/msr-index.h	Thu Apr 12 09:06:02 2012 +0100
     7.2 +++ b/xen/include/asm-x86/msr-index.h	Thu Apr 12 09:08:13 2012 +0100
     7.3 @@ -223,6 +223,19 @@
     7.4  #define MSR_K8_VM_CR			0xc0010114
     7.5  #define MSR_K8_VM_HSAVE_PA		0xc0010117
     7.6  
     7.7 +#define MSR_AMD_FAM15H_EVNTSEL0		0xc0010200
     7.8 +#define MSR_AMD_FAM15H_PERFCTR0		0xc0010201
     7.9 +#define MSR_AMD_FAM15H_EVNTSEL1		0xc0010202
    7.10 +#define MSR_AMD_FAM15H_PERFCTR1		0xc0010203
    7.11 +#define MSR_AMD_FAM15H_EVNTSEL2		0xc0010204
    7.12 +#define MSR_AMD_FAM15H_PERFCTR2		0xc0010205
    7.13 +#define MSR_AMD_FAM15H_EVNTSEL3		0xc0010206
    7.14 +#define MSR_AMD_FAM15H_PERFCTR3		0xc0010207
    7.15 +#define MSR_AMD_FAM15H_EVNTSEL4		0xc0010208
    7.16 +#define MSR_AMD_FAM15H_PERFCTR4		0xc0010209
    7.17 +#define MSR_AMD_FAM15H_EVNTSEL5		0xc001020a
    7.18 +#define MSR_AMD_FAM15H_PERFCTR5		0xc001020b
    7.19 +
    7.20  #define MSR_K8_FEATURE_MASK		0xc0011004
    7.21  #define MSR_K8_EXT_FEATURE_MASK		0xc0011005
    7.22