Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/cpu/vpmu_amd.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * vpmu.c: PMU virtualization for HVM domain.
3
 *
4
 * Copyright (c) 2010, Advanced Micro Devices, Inc.
5
 * Parts of this code are Copyright (c) 2007, Intel Corporation
6
 *
7
 * Author: Wei Wang <wei.wang2@amd.com>
8
 * Tested by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
9
 *
10
 * This program is free software; you can redistribute it and/or modify it
11
 * under the terms and conditions of the GNU General Public License,
12
 * version 2, as published by the Free Software Foundation.
13
 *
14
 * This program is distributed in the hope it will be useful, but WITHOUT
15
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17
 * more details.
18
 *
19
 * You should have received a copy of the GNU General Public License along with
20
 * this program; If not, see <http://www.gnu.org/licenses/>.
21
 *
22
 */
23
24
#include <xen/xenoprof.h>
25
#include <xen/sched.h>
26
#include <xen/irq.h>
27
#include <asm/apic.h>
28
#include <asm/vpmu.h>
29
#include <asm/hvm/save.h>
30
#include <asm/hvm/vlapic.h>
31
#include <public/pmu.h>
32
33
0
#define MSR_F10H_EVNTSEL_GO_SHIFT   40
34
0
#define MSR_F10H_EVNTSEL_EN_SHIFT   22
35
#define MSR_F10H_COUNTER_LENGTH     48
36
37
0
#define is_guest_mode(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
38
0
#define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT))
39
0
#define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
40
#define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1))))
41
42
static unsigned int __read_mostly num_counters;
43
static const u32 __read_mostly *counters;
44
static const u32 __read_mostly *ctrls;
45
static bool_t __read_mostly k7_counters_mirrored;
46
47
/* Total size of PMU registers block (copied to/from PV(H) guest) */
48
static unsigned int __read_mostly regs_sz;
49
50
0
#define F10H_NUM_COUNTERS   4
51
0
#define F15H_NUM_COUNTERS   6
52
#define MAX_NUM_COUNTERS    F15H_NUM_COUNTERS
53
54
/* PMU Counter MSRs. */
55
static const u32 AMD_F10H_COUNTERS[] = {
56
    MSR_K7_PERFCTR0,
57
    MSR_K7_PERFCTR1,
58
    MSR_K7_PERFCTR2,
59
    MSR_K7_PERFCTR3
60
};
61
62
/* PMU Control MSRs. */
63
static const u32 AMD_F10H_CTRLS[] = {
64
    MSR_K7_EVNTSEL0,
65
    MSR_K7_EVNTSEL1,
66
    MSR_K7_EVNTSEL2,
67
    MSR_K7_EVNTSEL3
68
};
69
70
static const u32 AMD_F15H_COUNTERS[] = {
71
    MSR_AMD_FAM15H_PERFCTR0,
72
    MSR_AMD_FAM15H_PERFCTR1,
73
    MSR_AMD_FAM15H_PERFCTR2,
74
    MSR_AMD_FAM15H_PERFCTR3,
75
    MSR_AMD_FAM15H_PERFCTR4,
76
    MSR_AMD_FAM15H_PERFCTR5
77
};
78
79
static const u32 AMD_F15H_CTRLS[] = {
80
    MSR_AMD_FAM15H_EVNTSEL0,
81
    MSR_AMD_FAM15H_EVNTSEL1,
82
    MSR_AMD_FAM15H_EVNTSEL2,
83
    MSR_AMD_FAM15H_EVNTSEL3,
84
    MSR_AMD_FAM15H_EVNTSEL4,
85
    MSR_AMD_FAM15H_EVNTSEL5
86
};
87
88
/* Bits [63:42], [39:36], 21 and 19 are reserved */
89
0
#define CTRL_RSVD_MASK ((-1ULL & (~((1ULL << 42) - 1))) | \
90
0
                        (0xfULL << 36) | (1ULL << 21) | (1ULL << 19))
91
static uint64_t __read_mostly ctrl_rsvd[MAX_NUM_COUNTERS];
92
93
/* Use private context as a flag for MSR bitmap */
94
0
#define msr_bitmap_on(vpmu)    do {                                    \
95
0
                                   (vpmu)->priv_context = (void *)-1L; \
96
0
                               } while (0)
97
0
#define msr_bitmap_off(vpmu)   do {                                    \
98
0
                                   (vpmu)->priv_context = NULL;        \
99
0
                               } while (0)
100
0
#define is_msr_bitmap_on(vpmu) ((vpmu)->priv_context != NULL)
101
102
static inline int get_pmu_reg_type(u32 addr, unsigned int *idx)
103
0
{
104
0
    if ( (addr >= MSR_K7_EVNTSEL0) && (addr <= MSR_K7_EVNTSEL3) )
105
0
    {
106
0
        *idx = addr - MSR_K7_EVNTSEL0;
107
0
        return MSR_TYPE_CTRL;
108
0
    }
109
0
110
0
    if ( (addr >= MSR_K7_PERFCTR0) && (addr <= MSR_K7_PERFCTR3) )
111
0
    {
112
0
        *idx = addr - MSR_K7_PERFCTR0;
113
0
        return MSR_TYPE_COUNTER;
114
0
    }
115
0
116
0
    if ( (addr >= MSR_AMD_FAM15H_EVNTSEL0) &&
117
0
         (addr <= MSR_AMD_FAM15H_PERFCTR5 ) )
118
0
    {
119
0
        *idx = (addr - MSR_AMD_FAM15H_EVNTSEL0) >> 1;
120
0
        if (addr & 1)
121
0
            return MSR_TYPE_COUNTER;
122
0
        else
123
0
            return MSR_TYPE_CTRL;
124
0
    }
125
0
126
0
    /* unsupported registers */
127
0
    return -1;
128
0
}
129
130
static inline u32 get_fam15h_addr(u32 addr)
131
0
{
132
0
    switch ( addr )
133
0
    {
134
0
    case MSR_K7_PERFCTR0:
135
0
        return MSR_AMD_FAM15H_PERFCTR0;
136
0
    case MSR_K7_PERFCTR1:
137
0
        return MSR_AMD_FAM15H_PERFCTR1;
138
0
    case MSR_K7_PERFCTR2:
139
0
        return MSR_AMD_FAM15H_PERFCTR2;
140
0
    case MSR_K7_PERFCTR3:
141
0
        return MSR_AMD_FAM15H_PERFCTR3;
142
0
    case MSR_K7_EVNTSEL0:
143
0
        return MSR_AMD_FAM15H_EVNTSEL0;
144
0
    case MSR_K7_EVNTSEL1:
145
0
        return MSR_AMD_FAM15H_EVNTSEL1;
146
0
    case MSR_K7_EVNTSEL2:
147
0
        return MSR_AMD_FAM15H_EVNTSEL2;
148
0
    case MSR_K7_EVNTSEL3:
149
0
        return MSR_AMD_FAM15H_EVNTSEL3;
150
0
    default:
151
0
        break;
152
0
    }
153
0
154
0
    return addr;
155
0
}
156
157
static void amd_vpmu_init_regs(struct xen_pmu_amd_ctxt *ctxt)
158
0
{
159
0
    unsigned i;
160
0
    uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
161
0
162
0
    memset(&ctxt->regs[0], 0, regs_sz);
163
0
    for ( i = 0; i < num_counters; i++ )
164
0
        ctrl_regs[i] = ctrl_rsvd[i];
165
0
}
166
167
static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
168
0
{
169
0
    unsigned int i;
170
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
171
0
172
0
    for ( i = 0; i < num_counters; i++ )
173
0
    {
174
0
        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_NONE);
175
0
        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE);
176
0
    }
177
0
178
0
    msr_bitmap_on(vpmu);
179
0
}
180
181
static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
182
0
{
183
0
    unsigned int i;
184
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
185
0
186
0
    for ( i = 0; i < num_counters; i++ )
187
0
    {
188
0
        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW);
189
0
        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
190
0
    }
191
0
192
0
    msr_bitmap_off(vpmu);
193
0
}
194
195
static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
196
0
{
197
0
    return 1;
198
0
}
199
200
static inline void context_load(struct vcpu *v)
201
0
{
202
0
    unsigned int i;
203
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
204
0
    struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
205
0
    uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
206
0
    uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
207
0
208
0
    for ( i = 0; i < num_counters; i++ )
209
0
    {
210
0
        wrmsrl(counters[i], counter_regs[i]);
211
0
        wrmsrl(ctrls[i], ctrl_regs[i]);
212
0
    }
213
0
}
214
215
static int amd_vpmu_load(struct vcpu *v, bool_t from_guest)
216
0
{
217
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
218
0
    struct xen_pmu_amd_ctxt *ctxt;
219
0
    uint64_t *ctrl_regs;
220
0
    unsigned int i;
221
0
222
0
    vpmu_reset(vpmu, VPMU_FROZEN);
223
0
224
0
    if ( !from_guest && vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
225
0
    {
226
0
        ctxt = vpmu->context;
227
0
        ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
228
0
229
0
        for ( i = 0; i < num_counters; i++ )
230
0
            wrmsrl(ctrls[i], ctrl_regs[i]);
231
0
232
0
        return 0;
233
0
    }
234
0
235
0
    if ( from_guest )
236
0
    {
237
0
        bool_t is_running = 0;
238
0
        struct xen_pmu_amd_ctxt *guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
239
0
240
0
        ASSERT(!has_vlapic(v->domain));
241
0
242
0
        ctxt = vpmu->context;
243
0
        ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
244
0
245
0
        memcpy(&ctxt->regs[0], &guest_ctxt->regs[0], regs_sz);
246
0
247
0
        for ( i = 0; i < num_counters; i++ )
248
0
        {
249
0
            if ( (ctrl_regs[i] & CTRL_RSVD_MASK) != ctrl_rsvd[i] )
250
0
            {
251
0
                /*
252
0
                 * Not necessary to re-init context since we should never load
253
0
                 * it until guest provides valid values. But just to be safe.
254
0
                 */
255
0
                amd_vpmu_init_regs(ctxt);
256
0
                return -EINVAL;
257
0
            }
258
0
259
0
            if ( is_pmu_enabled(ctrl_regs[i]) )
260
0
                is_running = 1;
261
0
        }
262
0
263
0
        if ( is_running )
264
0
            vpmu_set(vpmu, VPMU_RUNNING);
265
0
        else
266
0
            vpmu_reset(vpmu, VPMU_RUNNING);
267
0
    }
268
0
269
0
    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
270
0
271
0
    context_load(v);
272
0
273
0
    return 0;
274
0
}
275
276
static inline void context_save(struct vcpu *v)
277
0
{
278
0
    unsigned int i;
279
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
280
0
    struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
281
0
    uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
282
0
283
0
    /* No need to save controls -- they are saved in amd_vpmu_do_wrmsr */
284
0
    for ( i = 0; i < num_counters; i++ )
285
0
        rdmsrl(counters[i], counter_regs[i]);
286
0
}
287
288
static int amd_vpmu_save(struct vcpu *v,  bool_t to_guest)
289
0
{
290
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
291
0
    unsigned int i;
292
0
293
0
    /* Stop the counters. */
294
0
    for ( i = 0; i < num_counters; i++ )
295
0
        wrmsrl(ctrls[i], 0);
296
0
297
0
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
298
0
    {
299
0
        vpmu_set(vpmu, VPMU_FROZEN);
300
0
        return 0;
301
0
    }
302
0
303
0
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
304
0
        return 0;
305
0
306
0
    context_save(v);
307
0
308
0
    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
309
0
         is_msr_bitmap_on(vpmu) )
310
0
        amd_vpmu_unset_msr_bitmap(v);
311
0
312
0
    if ( to_guest )
313
0
    {
314
0
        struct xen_pmu_amd_ctxt *guest_ctxt, *ctxt;
315
0
316
0
        ASSERT(!has_vlapic(v->domain));
317
0
        ctxt = vpmu->context;
318
0
        guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
319
0
        memcpy(&guest_ctxt->regs[0], &ctxt->regs[0], regs_sz);
320
0
    }
321
0
322
0
    return 1;
323
0
}
324
325
static void context_update(unsigned int msr, u64 msr_content)
326
0
{
327
0
    unsigned int i;
328
0
    struct vcpu *v = current;
329
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
330
0
    struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
331
0
    uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
332
0
    uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
333
0
334
0
    if ( k7_counters_mirrored &&
335
0
        ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
336
0
    {
337
0
        msr = get_fam15h_addr(msr);
338
0
    }
339
0
340
0
    for ( i = 0; i < num_counters; i++ )
341
0
    {
342
0
       if ( msr == ctrls[i] )
343
0
       {
344
0
           ctrl_regs[i] = msr_content;
345
0
           return;
346
0
       }
347
0
        else if (msr == counters[i] )
348
0
        {
349
0
            counter_regs[i] = msr_content;
350
0
            return;
351
0
        }
352
0
    }
353
0
}
354
355
static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
356
                             uint64_t supported)
357
0
{
358
0
    struct vcpu *v = current;
359
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
360
0
    unsigned int idx = 0;
361
0
    int type = get_pmu_reg_type(msr, &idx);
362
0
363
0
    ASSERT(!supported);
364
0
365
0
    if ( (type == MSR_TYPE_CTRL ) &&
366
0
         ((msr_content & CTRL_RSVD_MASK) != ctrl_rsvd[idx]) )
367
0
        return -EINVAL;
368
0
369
0
    /* For all counters, enable guest only mode for HVM guest */
370
0
    if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
371
0
         !is_guest_mode(msr_content) )
372
0
    {
373
0
        set_guest_mode(msr_content);
374
0
    }
375
0
376
0
    /* check if the first counter is enabled */
377
0
    if ( (type == MSR_TYPE_CTRL) &&
378
0
        is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
379
0
    {
380
0
        if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
381
0
            return 0;
382
0
        vpmu_set(vpmu, VPMU_RUNNING);
383
0
384
0
        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
385
0
             amd_vpmu_set_msr_bitmap(v);
386
0
    }
387
0
388
0
    /* stop saving & restore if guest stops first counter */
389
0
    if ( (type == MSR_TYPE_CTRL) &&
390
0
        (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
391
0
    {
392
0
        vpmu_reset(vpmu, VPMU_RUNNING);
393
0
        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
394
0
             amd_vpmu_unset_msr_bitmap(v);
395
0
        release_pmu_ownership(PMU_OWNER_HVM);
396
0
    }
397
0
398
0
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
399
0
        || vpmu_is_set(vpmu, VPMU_FROZEN) )
400
0
    {
401
0
        context_load(v);
402
0
        vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
403
0
        vpmu_reset(vpmu, VPMU_FROZEN);
404
0
    }
405
0
406
0
    /* Update vpmu context immediately */
407
0
    context_update(msr, msr_content);
408
0
409
0
    /* Write to hw counters */
410
0
    wrmsrl(msr, msr_content);
411
0
    return 0;
412
0
}
413
414
static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
415
0
{
416
0
    struct vcpu *v = current;
417
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
418
0
419
0
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
420
0
        || vpmu_is_set(vpmu, VPMU_FROZEN) )
421
0
    {
422
0
        context_load(v);
423
0
        vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
424
0
        vpmu_reset(vpmu, VPMU_FROZEN);
425
0
    }
426
0
427
0
    rdmsrl(msr, *msr_content);
428
0
429
0
    return 0;
430
0
}
431
432
static void amd_vpmu_destroy(struct vcpu *v)
433
0
{
434
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
435
0
436
0
    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
437
0
        amd_vpmu_unset_msr_bitmap(v);
438
0
439
0
    xfree(vpmu->context);
440
0
    vpmu->context = NULL;
441
0
    vpmu->priv_context = NULL;
442
0
443
0
    if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
444
0
        release_pmu_ownership(PMU_OWNER_HVM);
445
0
446
0
    vpmu_clear(vpmu);
447
0
}
448
449
/* VPMU part of the 'q' keyhandler */
450
static void amd_vpmu_dump(const struct vcpu *v)
451
0
{
452
0
    const struct vpmu_struct *vpmu = vcpu_vpmu(v);
453
0
    const struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
454
0
    const uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
455
0
    const uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
456
0
    unsigned int i;
457
0
458
0
    printk("    VPMU state: 0x%x ", vpmu->flags);
459
0
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
460
0
    {
461
0
         printk("\n");
462
0
         return;
463
0
    }
464
0
465
0
    printk("(");
466
0
    if ( vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) )
467
0
        printk("PASSIVE_DOMAIN_ALLOCATED, ");
468
0
    if ( vpmu_is_set(vpmu, VPMU_FROZEN) )
469
0
        printk("FROZEN, ");
470
0
    if ( vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
471
0
        printk("SAVE, ");
472
0
    if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
473
0
        printk("RUNNING, ");
474
0
    if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
475
0
        printk("LOADED, ");
476
0
    printk("ALLOCATED)\n");
477
0
478
0
    for ( i = 0; i < num_counters; i++ )
479
0
    {
480
0
        uint64_t ctrl, cntr;
481
0
482
0
        rdmsrl(ctrls[i], ctrl);
483
0
        rdmsrl(counters[i], cntr);
484
0
        printk("      %#x: %#lx (%#lx in HW)    %#x: %#lx (%#lx in HW)\n",
485
0
               ctrls[i], ctrl_regs[i], ctrl,
486
0
               counters[i], counter_regs[i], cntr);
487
0
    }
488
0
}
489
490
static const struct arch_vpmu_ops amd_vpmu_ops = {
491
    .do_wrmsr = amd_vpmu_do_wrmsr,
492
    .do_rdmsr = amd_vpmu_do_rdmsr,
493
    .do_interrupt = amd_vpmu_do_interrupt,
494
    .arch_vpmu_destroy = amd_vpmu_destroy,
495
    .arch_vpmu_save = amd_vpmu_save,
496
    .arch_vpmu_load = amd_vpmu_load,
497
    .arch_vpmu_dump = amd_vpmu_dump
498
};
499
500
int svm_vpmu_initialise(struct vcpu *v)
501
0
{
502
0
    struct xen_pmu_amd_ctxt *ctxt;
503
0
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
504
0
505
0
    if ( vpmu_mode == XENPMU_MODE_OFF )
506
0
        return 0;
507
0
508
0
    if ( !counters )
509
0
        return -EINVAL;
510
0
511
0
    ctxt = xmalloc_bytes(sizeof(*ctxt) + regs_sz);
512
0
    if ( !ctxt )
513
0
    {
514
0
        printk(XENLOG_G_WARNING "Insufficient memory for PMU, "
515
0
               " PMU feature is unavailable on domain %d vcpu %d.\n",
516
0
               v->vcpu_id, v->domain->domain_id);
517
0
        return -ENOMEM;
518
0
    }
519
0
520
0
    ctxt->counters = sizeof(*ctxt);
521
0
    ctxt->ctrls = ctxt->counters + sizeof(uint64_t) * num_counters;
522
0
    amd_vpmu_init_regs(ctxt);
523
0
524
0
    vpmu->context = ctxt;
525
0
    vpmu->priv_context = NULL;
526
0
527
0
    if ( !has_vlapic(v->domain) )
528
0
    {
529
0
        /* Copy register offsets to shared area */
530
0
        ASSERT(vpmu->xenpmu_data);
531
0
        memcpy(&vpmu->xenpmu_data->pmu.c.amd, ctxt,
532
0
               offsetof(struct xen_pmu_amd_ctxt, regs));
533
0
    }
534
0
535
0
    vpmu->arch_vpmu_ops = &amd_vpmu_ops;
536
0
537
0
    vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
538
0
    return 0;
539
0
}
540
541
int __init amd_vpmu_init(void)
542
0
{
543
0
    unsigned int i;
544
0
545
0
    switch ( current_cpu_data.x86 )
546
0
    {
547
0
    case 0x15:
548
0
        num_counters = F15H_NUM_COUNTERS;
549
0
        counters = AMD_F15H_COUNTERS;
550
0
        ctrls = AMD_F15H_CTRLS;
551
0
        k7_counters_mirrored = 1;
552
0
        break;
553
0
    case 0x10:
554
0
    case 0x12:
555
0
    case 0x14:
556
0
    case 0x16:
557
0
        num_counters = F10H_NUM_COUNTERS;
558
0
        counters = AMD_F10H_COUNTERS;
559
0
        ctrls = AMD_F10H_CTRLS;
560
0
        k7_counters_mirrored = 0;
561
0
        break;
562
0
    default:
563
0
        printk(XENLOG_WARNING "VPMU: Unsupported CPU family %#x\n",
564
0
               current_cpu_data.x86);
565
0
        return -EINVAL;
566
0
    }
567
0
568
0
    if ( sizeof(struct xen_pmu_data) +
569
0
         2 * sizeof(uint64_t) * num_counters > PAGE_SIZE )
570
0
    {
571
0
        printk(XENLOG_WARNING
572
0
               "VPMU: Register bank does not fit into VPMU shared page\n");
573
0
        counters = ctrls = NULL;
574
0
        num_counters = 0;
575
0
        return -ENOSPC;
576
0
    }
577
0
578
0
    for ( i = 0; i < num_counters; i++ )
579
0
    {
580
0
        rdmsrl(ctrls[i], ctrl_rsvd[i]);
581
0
        ctrl_rsvd[i] &= CTRL_RSVD_MASK;
582
0
    }
583
0
584
0
    regs_sz = 2 * sizeof(uint64_t) * num_counters;
585
0
586
0
    return 0;
587
0
}
588