Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/acpi/cpufreq/cpufreq.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 *  cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
3
 *
4
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6
 *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7
 *  Copyright (C) 2006        Denis Sadykov <denis.m.sadykov@intel.com>
8
 *
9
 *  Feb 2008 - Liu Jinsong <jinsong.liu@intel.com>
10
 *      porting acpi-cpufreq.c from Linux 2.6.23 to Xen hypervisor
11
 *
12
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13
 *
14
 *  This program is free software; you can redistribute it and/or modify
15
 *  it under the terms of the GNU General Public License as published by
16
 *  the Free Software Foundation; either version 2 of the License, or (at
17
 *  your option) any later version.
18
 *
19
 *  This program is distributed in the hope that it will be useful, but
20
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
21
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22
 *  General Public License for more details.
23
 *
24
 *  You should have received a copy of the GNU General Public License along
25
 *  with this program; If not, see <http://www.gnu.org/licenses/>.
26
 *
27
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28
 */
29
30
#include <xen/types.h>
31
#include <xen/errno.h>
32
#include <xen/delay.h>
33
#include <xen/cpumask.h>
34
#include <xen/sched.h>
35
#include <xen/timer.h>
36
#include <xen/xmalloc.h>
37
#include <asm/bug.h>
38
#include <asm/msr.h>
39
#include <asm/io.h>
40
#include <asm/processor.h>
41
#include <asm/percpu.h>
42
#include <asm/cpufeature.h>
43
#include <acpi/acpi.h>
44
#include <acpi/cpufreq/cpufreq.h>
45
46
enum {
47
    UNDEFINED_CAPABLE = 0,
48
    SYSTEM_INTEL_MSR_CAPABLE,
49
    SYSTEM_IO_CAPABLE,
50
};
51
52
0
#define INTEL_MSR_RANGE         (0xffffull)
53
54
struct acpi_cpufreq_data *cpufreq_drv_data[NR_CPUS];
55
56
static struct cpufreq_driver acpi_cpufreq_driver;
57
58
static bool __read_mostly acpi_pstate_strict;
59
boolean_param("acpi_pstate_strict", acpi_pstate_strict);
60
61
static int check_est_cpu(unsigned int cpuid)
62
0
{
63
0
    struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
64
0
65
0
    if (cpu->x86_vendor != X86_VENDOR_INTEL ||
66
0
        !cpu_has(cpu, X86_FEATURE_EIST))
67
0
        return 0;
68
0
69
0
    return 1;
70
0
}
71
72
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
73
0
{
74
0
    struct processor_performance *perf;
75
0
    int i;
76
0
77
0
    perf = data->acpi_data;
78
0
79
0
    for (i=0; i<perf->state_count; i++) {
80
0
        if (value == perf->states[i].status)
81
0
            return data->freq_table[i].frequency;
82
0
    }
83
0
    return 0;
84
0
}
85
86
static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
87
0
{
88
0
    int i;
89
0
    struct processor_performance *perf;
90
0
91
0
    msr &= INTEL_MSR_RANGE;
92
0
    perf = data->acpi_data;
93
0
94
0
    for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
95
0
        if (msr == perf->states[data->freq_table[i].index].status)
96
0
            return data->freq_table[i].frequency;
97
0
    }
98
0
    return data->freq_table[0].frequency;
99
0
}
100
101
static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
102
0
{
103
0
    switch (data->arch_cpu_flags) {
104
0
    case SYSTEM_INTEL_MSR_CAPABLE:
105
0
        return extract_msr(val, data);
106
0
    case SYSTEM_IO_CAPABLE:
107
0
        return extract_io(val, data);
108
0
    default:
109
0
        return 0;
110
0
    }
111
0
}
112
113
struct msr_addr {
114
    u32 reg;
115
};
116
117
struct io_addr {
118
    u16 port;
119
    u8 bit_width;
120
};
121
122
typedef union {
123
    struct msr_addr msr;
124
    struct io_addr io;
125
} drv_addr_union;
126
127
struct drv_cmd {
128
    unsigned int type;
129
    const cpumask_t *mask;
130
    drv_addr_union addr;
131
    u32 val;
132
};
133
134
static void do_drv_read(void *drvcmd)
135
0
{
136
0
    struct drv_cmd *cmd;
137
0
138
0
    cmd = (struct drv_cmd *)drvcmd;
139
0
140
0
    switch (cmd->type) {
141
0
    case SYSTEM_INTEL_MSR_CAPABLE:
142
0
        rdmsrl(cmd->addr.msr.reg, cmd->val);
143
0
        break;
144
0
    case SYSTEM_IO_CAPABLE:
145
0
        acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
146
0
            &cmd->val, (u32)cmd->addr.io.bit_width);
147
0
        break;
148
0
    default:
149
0
        break;
150
0
    }
151
0
}
152
153
static void do_drv_write(void *drvcmd)
154
0
{
155
0
    struct drv_cmd *cmd;
156
0
    uint64_t msr_content;
157
0
158
0
    cmd = (struct drv_cmd *)drvcmd;
159
0
160
0
    switch (cmd->type) {
161
0
    case SYSTEM_INTEL_MSR_CAPABLE:
162
0
        rdmsrl(cmd->addr.msr.reg, msr_content);
163
0
        msr_content = (msr_content & ~INTEL_MSR_RANGE)
164
0
            | (cmd->val & INTEL_MSR_RANGE);
165
0
        wrmsrl(cmd->addr.msr.reg, msr_content);
166
0
        break;
167
0
    case SYSTEM_IO_CAPABLE:
168
0
        acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
169
0
            cmd->val, (u32)cmd->addr.io.bit_width);
170
0
        break;
171
0
    default:
172
0
        break;
173
0
    }
174
0
}
175
176
static void drv_read(struct drv_cmd *cmd)
177
0
{
178
0
    cmd->val = 0;
179
0
180
0
    ASSERT(cpumask_weight(cmd->mask) == 1);
181
0
182
0
    /* to reduce IPI for the sake of performance */
183
0
    if (likely(cpumask_test_cpu(smp_processor_id(), cmd->mask)))
184
0
        do_drv_read((void *)cmd);
185
0
    else
186
0
        on_selected_cpus(cmd->mask, do_drv_read, cmd, 1);
187
0
}
188
189
static void drv_write(struct drv_cmd *cmd)
190
0
{
191
0
    if (cpumask_equal(cmd->mask, cpumask_of(smp_processor_id())))
192
0
        do_drv_write((void *)cmd);
193
0
    else
194
0
        on_selected_cpus(cmd->mask, do_drv_write, cmd, 1);
195
0
}
196
197
static u32 get_cur_val(const cpumask_t *mask)
198
0
{
199
0
    struct cpufreq_policy *policy;
200
0
    struct processor_performance *perf;
201
0
    struct drv_cmd cmd;
202
0
    unsigned int cpu = smp_processor_id();
203
0
204
0
    if (unlikely(cpumask_empty(mask)))
205
0
        return 0;
206
0
207
0
    if (!cpumask_test_cpu(cpu, mask))
208
0
        cpu = cpumask_first(mask);
209
0
    if (cpu >= nr_cpu_ids || !cpu_online(cpu))
210
0
        return 0;
211
0
212
0
    policy = per_cpu(cpufreq_cpu_policy, cpu);
213
0
    if (!policy || !cpufreq_drv_data[policy->cpu])
214
0
        return 0;    
215
0
216
0
    switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) {
217
0
    case SYSTEM_INTEL_MSR_CAPABLE:
218
0
        cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
219
0
        cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
220
0
        break;
221
0
    case SYSTEM_IO_CAPABLE:
222
0
        cmd.type = SYSTEM_IO_CAPABLE;
223
0
        perf = cpufreq_drv_data[policy->cpu]->acpi_data;
224
0
        cmd.addr.io.port = perf->control_register.address;
225
0
        cmd.addr.io.bit_width = perf->control_register.bit_width;
226
0
        break;
227
0
    default:
228
0
        return 0;
229
0
    }
230
0
231
0
    cmd.mask = cpumask_of(cpu);
232
0
233
0
    drv_read(&cmd);
234
0
    return cmd.val;
235
0
}
236
237
struct perf_pair {
238
    union {
239
        struct {
240
            uint32_t lo;
241
            uint32_t hi;
242
        } split;
243
        uint64_t whole;
244
    } aperf, mperf;
245
};
246
static DEFINE_PER_CPU(struct perf_pair, gov_perf_pair);
247
static DEFINE_PER_CPU(struct perf_pair, usr_perf_pair);
248
249
static void read_measured_perf_ctrs(void *_readin)
250
0
{
251
0
    struct perf_pair *readin = _readin;
252
0
253
0
    rdmsrl(MSR_IA32_APERF, readin->aperf.whole);
254
0
    rdmsrl(MSR_IA32_MPERF, readin->mperf.whole);
255
0
}
256
257
/*
258
 * Return the measured active (C0) frequency on this CPU since last call
259
 * to this function.
260
 * Input: cpu number
261
 * Return: Average CPU frequency in terms of max frequency (zero on error)
262
 *
263
 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
264
 * over a period of time, while CPU is in C0 state.
265
 * IA32_MPERF counts at the rate of max advertised frequency
266
 * IA32_APERF counts at the rate of actual CPU frequency
267
 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
268
 * no meaning should be associated with absolute values of these MSRs.
269
 */
270
unsigned int get_measured_perf(unsigned int cpu, unsigned int flag)
271
0
{
272
0
    struct cpufreq_policy *policy;    
273
0
    struct perf_pair readin, cur, *saved;
274
0
    unsigned int perf_percent;
275
0
    unsigned int retval;
276
0
277
0
    if (!cpu_online(cpu))
278
0
        return 0;
279
0
280
0
    policy = per_cpu(cpufreq_cpu_policy, cpu);
281
0
    if (!policy || !policy->aperf_mperf)
282
0
        return 0;
283
0
284
0
    switch (flag)
285
0
    {
286
0
    case GOV_GETAVG:
287
0
    {
288
0
        saved = &per_cpu(gov_perf_pair, cpu);
289
0
        break;
290
0
    }
291
0
    case USR_GETAVG:
292
0
    {
293
0
        saved = &per_cpu(usr_perf_pair, cpu);
294
0
        break;
295
0
    }
296
0
    default:
297
0
        return 0;
298
0
    }
299
0
300
0
    if (cpu == smp_processor_id()) {
301
0
        read_measured_perf_ctrs((void *)&readin);
302
0
    } else {
303
0
        on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs,
304
0
                        &readin, 1);
305
0
    }
306
0
307
0
    cur.aperf.whole = readin.aperf.whole - saved->aperf.whole;
308
0
    cur.mperf.whole = readin.mperf.whole - saved->mperf.whole;
309
0
    saved->aperf.whole = readin.aperf.whole;
310
0
    saved->mperf.whole = readin.mperf.whole;
311
0
312
0
    if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
313
0
        int shift_count = 7;
314
0
        cur.aperf.whole >>= shift_count;
315
0
        cur.mperf.whole >>= shift_count;
316
0
    }
317
0
318
0
    if (cur.aperf.whole && cur.mperf.whole)
319
0
        perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
320
0
    else
321
0
        perf_percent = 0;
322
0
323
0
    retval = policy->cpuinfo.max_freq * perf_percent / 100;
324
0
325
0
    return retval;
326
0
}
327
328
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
329
0
{
330
0
    struct cpufreq_policy *policy;
331
0
    struct acpi_cpufreq_data *data;
332
0
    unsigned int freq;
333
0
334
0
    if (!cpu_online(cpu))
335
0
        return 0;
336
0
337
0
    policy = per_cpu(cpufreq_cpu_policy, cpu);
338
0
    if (!policy)
339
0
        return 0;
340
0
341
0
    data = cpufreq_drv_data[policy->cpu];
342
0
    if (unlikely(data == NULL ||
343
0
        data->acpi_data == NULL || data->freq_table == NULL))
344
0
        return 0;
345
0
346
0
    freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
347
0
    return freq;
348
0
}
349
350
static void feature_detect(void *info)
351
0
{
352
0
    struct cpufreq_policy *policy = info;
353
0
    unsigned int eax;
354
0
355
0
    if ( cpu_has_aperfmperf )
356
0
    {
357
0
        policy->aperf_mperf = 1;
358
0
        acpi_cpufreq_driver.getavg = get_measured_perf;
359
0
    }
360
0
361
0
    eax = cpuid_eax(6);
362
0
    if (eax & 0x2) {
363
0
        policy->turbo = CPUFREQ_TURBO_ENABLED;
364
0
        if (cpufreq_verbose)
365
0
            printk(XENLOG_INFO "CPU%u: Turbo Mode detected and enabled\n",
366
0
                   smp_processor_id());
367
0
    }
368
0
}
369
370
static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
371
                                struct acpi_cpufreq_data *data)
372
0
{
373
0
    unsigned int cur_freq;
374
0
    unsigned int i;
375
0
376
0
    for (i=0; i<100; i++) {
377
0
        cur_freq = extract_freq(get_cur_val(mask), data);
378
0
        if (cur_freq == freq)
379
0
            return 1;
380
0
        udelay(10);
381
0
    }
382
0
    return 0;
383
0
}
384
385
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
386
                               unsigned int target_freq, unsigned int relation)
387
0
{
388
0
    struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu];
389
0
    struct processor_performance *perf;
390
0
    struct cpufreq_freqs freqs;
391
0
    cpumask_t online_policy_cpus;
392
0
    struct drv_cmd cmd;
393
0
    unsigned int next_state = 0; /* Index into freq_table */
394
0
    unsigned int next_perf_state = 0; /* Index into perf table */
395
0
    unsigned int j;
396
0
    int result = 0;
397
0
398
0
    if (unlikely(data == NULL ||
399
0
        data->acpi_data == NULL || data->freq_table == NULL)) {
400
0
        return -ENODEV;
401
0
    }
402
0
403
0
    if (policy->turbo == CPUFREQ_TURBO_DISABLED)
404
0
        if (target_freq > policy->cpuinfo.second_max_freq)
405
0
            target_freq = policy->cpuinfo.second_max_freq;
406
0
407
0
    perf = data->acpi_data;
408
0
    result = cpufreq_frequency_table_target(policy,
409
0
                                            data->freq_table,
410
0
                                            target_freq,
411
0
                                            relation, &next_state);
412
0
    if (unlikely(result))
413
0
        return -ENODEV;
414
0
415
0
    cpumask_and(&online_policy_cpus, &cpu_online_map, policy->cpus);
416
0
417
0
    next_perf_state = data->freq_table[next_state].index;
418
0
    if (perf->state == next_perf_state) {
419
0
        if (unlikely(policy->resume))
420
0
            policy->resume = 0;
421
0
        else
422
0
            return 0;
423
0
    }
424
0
425
0
    switch (data->arch_cpu_flags) {
426
0
    case SYSTEM_INTEL_MSR_CAPABLE:
427
0
        cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
428
0
        cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
429
0
        cmd.val = (u32) perf->states[next_perf_state].control;
430
0
        break;
431
0
    case SYSTEM_IO_CAPABLE:
432
0
        cmd.type = SYSTEM_IO_CAPABLE;
433
0
        cmd.addr.io.port = perf->control_register.address;
434
0
        cmd.addr.io.bit_width = perf->control_register.bit_width;
435
0
        cmd.val = (u32) perf->states[next_perf_state].control;
436
0
        break;
437
0
    default:
438
0
        return -ENODEV;
439
0
    }
440
0
441
0
    if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
442
0
        cmd.mask = &online_policy_cpus;
443
0
    else
444
0
        cmd.mask = cpumask_of(policy->cpu);
445
0
446
0
    freqs.old = perf->states[perf->state].core_frequency * 1000;
447
0
    freqs.new = data->freq_table[next_state].frequency;
448
0
449
0
    drv_write(&cmd);
450
0
451
0
    if (acpi_pstate_strict && !check_freqs(cmd.mask, freqs.new, data)) {
452
0
        printk(KERN_WARNING "Fail transfer to new freq %d\n", freqs.new);
453
0
        return -EAGAIN;
454
0
    }
455
0
456
0
    for_each_cpu(j, &online_policy_cpus)
457
0
        cpufreq_statistic_update(j, perf->state, next_perf_state);
458
0
459
0
    perf->state = next_perf_state;
460
0
    policy->cur = freqs.new;
461
0
462
0
    return result;
463
0
}
464
465
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
466
0
{
467
0
    struct acpi_cpufreq_data *data;
468
0
    struct processor_performance *perf;
469
0
470
0
    if (!policy || !(data = cpufreq_drv_data[policy->cpu]) ||
471
0
        !processor_pminfo[policy->cpu])
472
0
        return -EINVAL;
473
0
474
0
    perf = &processor_pminfo[policy->cpu]->perf;
475
0
476
0
    cpufreq_verify_within_limits(policy, 0, 
477
0
        perf->states[perf->platform_limit].core_frequency * 1000);
478
0
479
0
    return cpufreq_frequency_table_verify(policy, data->freq_table);
480
0
}
481
482
static unsigned long
483
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
484
0
{
485
0
    struct processor_performance *perf = data->acpi_data;
486
0
487
0
    if (cpu_khz) {
488
0
        /* search the closest match to cpu_khz */
489
0
        unsigned int i;
490
0
        unsigned long freq;
491
0
        unsigned long freqn = perf->states[0].core_frequency * 1000;
492
0
493
0
        for (i=0; i<(perf->state_count-1); i++) {
494
0
            freq = freqn;
495
0
            freqn = perf->states[i+1].core_frequency * 1000;
496
0
            if ((2 * cpu_khz) > (freqn + freq)) {
497
0
                perf->state = i;
498
0
                return freq;
499
0
            }
500
0
        }
501
0
        perf->state = perf->state_count-1;
502
0
        return freqn;
503
0
    } else {
504
0
        /* assume CPU is at P0... */
505
0
        perf->state = 0;
506
0
        return perf->states[0].core_frequency * 1000;
507
0
    }
508
0
}
509
510
static int 
511
acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
512
0
{
513
0
    unsigned int i;
514
0
    unsigned int valid_states = 0;
515
0
    unsigned int cpu = policy->cpu;
516
0
    struct acpi_cpufreq_data *data;
517
0
    unsigned int result = 0;
518
0
    struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
519
0
    struct processor_performance *perf;
520
0
521
0
    data = xzalloc(struct acpi_cpufreq_data);
522
0
    if (!data)
523
0
        return -ENOMEM;
524
0
525
0
    cpufreq_drv_data[cpu] = data;
526
0
527
0
    data->acpi_data = &processor_pminfo[cpu]->perf;
528
0
529
0
    perf = data->acpi_data;
530
0
    policy->shared_type = perf->shared_type;
531
0
532
0
    switch (perf->control_register.space_id) {
533
0
    case ACPI_ADR_SPACE_SYSTEM_IO:
534
0
        if (cpufreq_verbose)
535
0
            printk("xen_pminfo: @acpi_cpufreq_cpu_init,"
536
0
                   "SYSTEM IO addr space\n");
537
0
        data->arch_cpu_flags = SYSTEM_IO_CAPABLE;
538
0
        break;
539
0
    case ACPI_ADR_SPACE_FIXED_HARDWARE:
540
0
        if (cpufreq_verbose)
541
0
            printk("xen_pminfo: @acpi_cpufreq_cpu_init,"
542
0
                   "HARDWARE addr space\n");
543
0
        if (!check_est_cpu(cpu)) {
544
0
            result = -ENODEV;
545
0
            goto err_unreg;
546
0
        }
547
0
        data->arch_cpu_flags = SYSTEM_INTEL_MSR_CAPABLE;
548
0
        break;
549
0
    default:
550
0
        result = -ENODEV;
551
0
        goto err_unreg;
552
0
    }
553
0
554
0
    data->freq_table = xmalloc_array(struct cpufreq_frequency_table, 
555
0
                                    (perf->state_count+1));
556
0
    if (!data->freq_table) {
557
0
        result = -ENOMEM;
558
0
        goto err_unreg;
559
0
    }
560
0
561
0
    /* detect transition latency */
562
0
    policy->cpuinfo.transition_latency = 0;
563
0
    for (i=0; i<perf->state_count; i++) {
564
0
        if ((perf->states[i].transition_latency * 1000) >
565
0
            policy->cpuinfo.transition_latency)
566
0
            policy->cpuinfo.transition_latency =
567
0
                perf->states[i].transition_latency * 1000;
568
0
    }
569
0
570
0
    policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR;
571
0
572
0
    /* table init */
573
0
    for (i=0; i<perf->state_count; i++) {
574
0
        if (i>0 && perf->states[i].core_frequency >=
575
0
            data->freq_table[valid_states-1].frequency / 1000)
576
0
            continue;
577
0
578
0
        data->freq_table[valid_states].index = i;
579
0
        data->freq_table[valid_states].frequency =
580
0
            perf->states[i].core_frequency * 1000;
581
0
        valid_states++;
582
0
    }
583
0
    data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
584
0
    perf->state = 0;
585
0
586
0
    result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
587
0
    if (result)
588
0
        goto err_freqfree;
589
0
590
0
    switch (perf->control_register.space_id) {
591
0
    case ACPI_ADR_SPACE_SYSTEM_IO:
592
0
        /* Current speed is unknown and not detectable by IO port */
593
0
        policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
594
0
        break;
595
0
    case ACPI_ADR_SPACE_FIXED_HARDWARE:
596
0
        acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
597
0
        policy->cur = get_cur_freq_on_cpu(cpu);
598
0
        break;
599
0
    default:
600
0
        break;
601
0
    }
602
0
603
0
    /* Check for APERF/MPERF support in hardware
604
0
     * also check for boost support */
605
0
    if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6)
606
0
        on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
607
0
608
0
    /*
609
0
     * the first call to ->target() should result in us actually
610
0
     * writing something to the appropriate registers.
611
0
     */
612
0
    policy->resume = 1;
613
0
614
0
    return result;
615
0
616
0
err_freqfree:
617
0
    xfree(data->freq_table);
618
0
err_unreg:
619
0
    xfree(data);
620
0
    cpufreq_drv_data[cpu] = NULL;
621
0
622
0
    return result;
623
0
}
624
625
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
626
0
{
627
0
    struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu];
628
0
629
0
    if (data) {
630
0
        cpufreq_drv_data[policy->cpu] = NULL;
631
0
        xfree(data->freq_table);
632
0
        xfree(data);
633
0
    }
634
0
635
0
    return 0;
636
0
}
637
638
static struct cpufreq_driver acpi_cpufreq_driver = {
639
    .name   = "acpi-cpufreq",
640
    .verify = acpi_cpufreq_verify,
641
    .target = acpi_cpufreq_target,
642
    .init   = acpi_cpufreq_cpu_init,
643
    .exit   = acpi_cpufreq_cpu_exit,
644
};
645
646
static int __init cpufreq_driver_init(void)
647
1
{
648
1
    int ret = 0;
649
1
650
1
    if ((cpufreq_controller == FREQCTL_xen) &&
651
1
        (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL))
652
1
        ret = cpufreq_register_driver(&acpi_cpufreq_driver);
653
0
    else if ((cpufreq_controller == FREQCTL_xen) &&
654
0
        (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
655
0
        ret = powernow_register_driver();
656
1
657
1
    return ret;
658
1
}
659
__initcall(cpufreq_driver_init);
660
661
int cpufreq_cpu_init(unsigned int cpuid)
662
0
{
663
0
    int ret;
664
0
665
0
    /* Currently we only handle Intel and AMD processor */
666
0
    if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) ||
667
0
         (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) )
668
0
        ret = cpufreq_add_cpu(cpuid);
669
0
    else
670
0
        ret = -EFAULT;
671
0
    return ret;
672
0
}