/root/src/xen/xen/arch/x86/acpi/cpufreq/powernow.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * powernow - AMD Architectural P-state Driver ($Revision: 1.4 $) |
3 | | * |
4 | | * Copyright (C) 2008 Mark Langsdorf <mark.langsdorf@amd.com> |
5 | | * |
6 | | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
7 | | * |
8 | | * This program is free software; you can redistribute it and/or modify |
9 | | * it under the terms of the GNU General Public License as published by |
10 | | * the Free Software Foundation; either version 2 of the License, or (at |
11 | | * your option) any later version. |
12 | | * |
13 | | * This program is distributed in the hope that it will be useful, but |
14 | | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | | * General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU General Public License along |
19 | | * with this program; If not, see <http://www.gnu.org/licenses/>. |
20 | | * |
21 | | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
22 | | */ |
23 | | |
24 | | #include <xen/types.h> |
25 | | #include <xen/errno.h> |
26 | | #include <xen/init.h> |
27 | | #include <xen/delay.h> |
28 | | #include <xen/cpumask.h> |
29 | | #include <xen/timer.h> |
30 | | #include <xen/xmalloc.h> |
31 | | #include <asm/bug.h> |
32 | | #include <asm/msr.h> |
33 | | #include <asm/io.h> |
34 | | #include <asm/processor.h> |
35 | | #include <asm/percpu.h> |
36 | | #include <asm/cpufeature.h> |
37 | | #include <acpi/acpi.h> |
38 | | #include <acpi/cpufreq/cpufreq.h> |
39 | | |
40 | 0 | #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 |
41 | 0 | #define CPB_CAPABLE 0x00000200 |
42 | 0 | #define USE_HW_PSTATE 0x00000080 |
43 | 0 | #define HW_PSTATE_MASK 0x00000007 |
44 | | #define HW_PSTATE_VALID_MASK 0x80000000 |
45 | 0 | #define HW_PSTATE_MAX_MASK 0x000000f0 |
46 | 0 | #define HW_PSTATE_MAX_SHIFT 4 |
47 | | #define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ |
48 | | #define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ |
49 | 0 | #define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ |
50 | | #define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ |
51 | 0 | #define MSR_HWCR_CPBDIS_MASK 0x02000000ULL |
52 | | |
53 | 0 | #define ARCH_CPU_FLAG_RESUME 1 |
54 | | |
55 | | static struct cpufreq_driver powernow_cpufreq_driver; |
56 | | |
57 | | static void transition_pstate(void *pstate) |
58 | 0 | { |
59 | 0 | wrmsrl(MSR_PSTATE_CTRL, *(unsigned int *)pstate); |
60 | 0 | } |
61 | | |
62 | | static void update_cpb(void *data) |
63 | 0 | { |
64 | 0 | struct cpufreq_policy *policy = (struct cpufreq_policy *)data; |
65 | 0 |
|
66 | 0 | if (policy->turbo != CPUFREQ_TURBO_UNSUPPORTED) { |
67 | 0 | uint64_t msr_content; |
68 | 0 | |
69 | 0 | rdmsrl(MSR_K8_HWCR, msr_content); |
70 | 0 |
|
71 | 0 | if (policy->turbo == CPUFREQ_TURBO_ENABLED) |
72 | 0 | msr_content &= ~MSR_HWCR_CPBDIS_MASK; |
73 | 0 | else |
74 | 0 | msr_content |= MSR_HWCR_CPBDIS_MASK; |
75 | 0 |
|
76 | 0 | wrmsrl(MSR_K8_HWCR, msr_content); |
77 | 0 | } |
78 | 0 | } |
79 | | |
80 | | static int powernow_cpufreq_update (int cpuid, |
81 | | struct cpufreq_policy *policy) |
82 | 0 | { |
83 | 0 | if (!cpumask_test_cpu(cpuid, &cpu_online_map)) |
84 | 0 | return -EINVAL; |
85 | 0 |
|
86 | 0 | on_selected_cpus(cpumask_of(cpuid), update_cpb, policy, 1); |
87 | 0 |
|
88 | 0 | return 0; |
89 | 0 | } |
90 | | |
91 | | static int powernow_cpufreq_target(struct cpufreq_policy *policy, |
92 | | unsigned int target_freq, unsigned int relation) |
93 | 0 | { |
94 | 0 | struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; |
95 | 0 | struct processor_performance *perf; |
96 | 0 | unsigned int next_state; /* Index into freq_table */ |
97 | 0 | unsigned int next_perf_state; /* Index into perf table */ |
98 | 0 | int result; |
99 | 0 |
|
100 | 0 | if (unlikely(data == NULL || |
101 | 0 | data->acpi_data == NULL || data->freq_table == NULL)) { |
102 | 0 | return -ENODEV; |
103 | 0 | } |
104 | 0 |
|
105 | 0 | perf = data->acpi_data; |
106 | 0 | result = cpufreq_frequency_table_target(policy, |
107 | 0 | data->freq_table, |
108 | 0 | target_freq, |
109 | 0 | relation, &next_state); |
110 | 0 | if (unlikely(result)) |
111 | 0 | return result; |
112 | 0 |
|
113 | 0 | next_perf_state = data->freq_table[next_state].index; |
114 | 0 | if (perf->state == next_perf_state) { |
115 | 0 | if (unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME)) |
116 | 0 | data->arch_cpu_flags &= ~ARCH_CPU_FLAG_RESUME; |
117 | 0 | else |
118 | 0 | return 0; |
119 | 0 | } |
120 | 0 |
|
121 | 0 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_HW && |
122 | 0 | likely(policy->cpu == smp_processor_id())) { |
123 | 0 | transition_pstate(&next_perf_state); |
124 | 0 | cpufreq_statistic_update(policy->cpu, perf->state, next_perf_state); |
125 | 0 | } else { |
126 | 0 | cpumask_t online_policy_cpus; |
127 | 0 | unsigned int cpu; |
128 | 0 |
|
129 | 0 | cpumask_and(&online_policy_cpus, policy->cpus, &cpu_online_map); |
130 | 0 |
|
131 | 0 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || |
132 | 0 | unlikely(policy->cpu != smp_processor_id())) |
133 | 0 | on_selected_cpus(&online_policy_cpus, transition_pstate, |
134 | 0 | &next_perf_state, 1); |
135 | 0 | else |
136 | 0 | transition_pstate(&next_perf_state); |
137 | 0 |
|
138 | 0 | for_each_cpu(cpu, &online_policy_cpus) |
139 | 0 | cpufreq_statistic_update(cpu, perf->state, next_perf_state); |
140 | 0 | } |
141 | 0 |
|
142 | 0 | perf->state = next_perf_state; |
143 | 0 | policy->cur = data->freq_table[next_state].frequency; |
144 | 0 |
|
145 | 0 | return 0; |
146 | 0 | } |
147 | | |
148 | | static void amd_fixup_frequency(struct xen_processor_px *px) |
149 | 0 | { |
150 | 0 | u32 hi, lo, fid, did; |
151 | 0 | int index = px->control & 0x00000007; |
152 | 0 | const struct cpuinfo_x86 *c = ¤t_cpu_data; |
153 | 0 |
|
154 | 0 | if ((c->x86 != 0x10 || c->x86_model >= 10) && c->x86 != 0x11) |
155 | 0 | return; |
156 | 0 |
|
157 | 0 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); |
158 | 0 | /* |
159 | 0 | * MSR C001_0064+: |
160 | 0 | * Bit 63: PstateEn. Read-write. If set, the P-state is valid. |
161 | 0 | */ |
162 | 0 | if (!(hi & (1U << 31))) |
163 | 0 | return; |
164 | 0 |
|
165 | 0 | fid = lo & 0x3f; |
166 | 0 | did = (lo >> 6) & 7; |
167 | 0 | if (c->x86 == 0x10) |
168 | 0 | px->core_frequency = (100 * (fid + 16)) >> did; |
169 | 0 | else |
170 | 0 | px->core_frequency = (100 * (fid + 8)) >> did; |
171 | 0 | } |
172 | | |
173 | | struct amd_cpu_data { |
174 | | struct processor_performance *perf; |
175 | | u32 max_hw_pstate; |
176 | | }; |
177 | | |
178 | | static void get_cpu_data(void *arg) |
179 | 0 | { |
180 | 0 | struct amd_cpu_data *data = arg; |
181 | 0 | struct processor_performance *perf = data->perf; |
182 | 0 | uint64_t msr_content; |
183 | 0 | unsigned int i; |
184 | 0 |
|
185 | 0 | rdmsrl(MSR_PSTATE_CUR_LIMIT, msr_content); |
186 | 0 | data->max_hw_pstate = (msr_content & HW_PSTATE_MAX_MASK) >> |
187 | 0 | HW_PSTATE_MAX_SHIFT; |
188 | 0 |
|
189 | 0 | for (i = 0; i < perf->state_count && i <= data->max_hw_pstate; i++) |
190 | 0 | amd_fixup_frequency(&perf->states[i]); |
191 | 0 | } |
192 | | |
193 | | static int powernow_cpufreq_verify(struct cpufreq_policy *policy) |
194 | 0 | { |
195 | 0 | struct acpi_cpufreq_data *data; |
196 | 0 | struct processor_performance *perf; |
197 | 0 |
|
198 | 0 | if (!policy || !(data = cpufreq_drv_data[policy->cpu]) || |
199 | 0 | !processor_pminfo[policy->cpu]) |
200 | 0 | return -EINVAL; |
201 | 0 |
|
202 | 0 | perf = &processor_pminfo[policy->cpu]->perf; |
203 | 0 |
|
204 | 0 | cpufreq_verify_within_limits(policy, 0, |
205 | 0 | perf->states[perf->platform_limit].core_frequency * 1000); |
206 | 0 |
|
207 | 0 | return cpufreq_frequency_table_verify(policy, data->freq_table); |
208 | 0 | } |
209 | | |
210 | | static void feature_detect(void *info) |
211 | 0 | { |
212 | 0 | struct cpufreq_policy *policy = info; |
213 | 0 | unsigned int edx; |
214 | 0 |
|
215 | 0 | if ( cpu_has_aperfmperf ) |
216 | 0 | { |
217 | 0 | policy->aperf_mperf = 1; |
218 | 0 | powernow_cpufreq_driver.getavg = get_measured_perf; |
219 | 0 | } |
220 | 0 |
|
221 | 0 | edx = cpuid_edx(CPUID_FREQ_VOLT_CAPABILITIES); |
222 | 0 | if ((edx & CPB_CAPABLE) == CPB_CAPABLE) { |
223 | 0 | policy->turbo = CPUFREQ_TURBO_ENABLED; |
224 | 0 | if (cpufreq_verbose) |
225 | 0 | printk(XENLOG_INFO |
226 | 0 | "CPU%u: Core Boost/Turbo detected and enabled\n", |
227 | 0 | smp_processor_id()); |
228 | 0 | } |
229 | 0 | } |
230 | | |
231 | | static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy) |
232 | 0 | { |
233 | 0 | unsigned int i; |
234 | 0 | unsigned int valid_states = 0; |
235 | 0 | unsigned int cpu = policy->cpu; |
236 | 0 | struct acpi_cpufreq_data *data; |
237 | 0 | unsigned int result = 0; |
238 | 0 | struct processor_performance *perf; |
239 | 0 | struct amd_cpu_data info; |
240 | 0 | struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; |
241 | 0 |
|
242 | 0 | data = xzalloc(struct acpi_cpufreq_data); |
243 | 0 | if (!data) |
244 | 0 | return -ENOMEM; |
245 | 0 |
|
246 | 0 | cpufreq_drv_data[cpu] = data; |
247 | 0 |
|
248 | 0 | data->acpi_data = &processor_pminfo[cpu]->perf; |
249 | 0 |
|
250 | 0 | info.perf = perf = data->acpi_data; |
251 | 0 | policy->shared_type = perf->shared_type; |
252 | 0 |
|
253 | 0 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || |
254 | 0 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
255 | 0 | cpumask_set_cpu(cpu, policy->cpus); |
256 | 0 | if (cpumask_weight(policy->cpus) != 1) { |
257 | 0 | printk(XENLOG_WARNING "Unsupported sharing type %d (%u CPUs)\n", |
258 | 0 | policy->shared_type, cpumask_weight(policy->cpus)); |
259 | 0 | result = -ENODEV; |
260 | 0 | goto err_unreg; |
261 | 0 | } |
262 | 0 | } else { |
263 | 0 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
264 | 0 | } |
265 | 0 |
|
266 | 0 | /* capability check */ |
267 | 0 | if (perf->state_count <= 1) { |
268 | 0 | printk("No P-States\n"); |
269 | 0 | result = -ENODEV; |
270 | 0 | goto err_unreg; |
271 | 0 | } |
272 | 0 |
|
273 | 0 | if (perf->control_register.space_id != perf->status_register.space_id) { |
274 | 0 | result = -ENODEV; |
275 | 0 | goto err_unreg; |
276 | 0 | } |
277 | 0 |
|
278 | 0 | data->freq_table = xmalloc_array(struct cpufreq_frequency_table, |
279 | 0 | (perf->state_count+1)); |
280 | 0 | if (!data->freq_table) { |
281 | 0 | result = -ENOMEM; |
282 | 0 | goto err_unreg; |
283 | 0 | } |
284 | 0 |
|
285 | 0 | /* detect transition latency */ |
286 | 0 | policy->cpuinfo.transition_latency = 0; |
287 | 0 | for (i=0; i<perf->state_count; i++) { |
288 | 0 | if ((perf->states[i].transition_latency * 1000) > |
289 | 0 | policy->cpuinfo.transition_latency) |
290 | 0 | policy->cpuinfo.transition_latency = |
291 | 0 | perf->states[i].transition_latency * 1000; |
292 | 0 | } |
293 | 0 |
|
294 | 0 | policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR; |
295 | 0 |
|
296 | 0 | on_selected_cpus(cpumask_of(cpu), get_cpu_data, &info, 1); |
297 | 0 |
|
298 | 0 | /* table init */ |
299 | 0 | for (i = 0; i < perf->state_count && i <= info.max_hw_pstate; i++) { |
300 | 0 | if (i > 0 && perf->states[i].core_frequency >= |
301 | 0 | data->freq_table[valid_states-1].frequency / 1000) |
302 | 0 | continue; |
303 | 0 |
|
304 | 0 | data->freq_table[valid_states].index = perf->states[i].control & HW_PSTATE_MASK; |
305 | 0 | data->freq_table[valid_states].frequency = |
306 | 0 | perf->states[i].core_frequency * 1000; |
307 | 0 | valid_states++; |
308 | 0 | } |
309 | 0 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; |
310 | 0 | perf->state = 0; |
311 | 0 |
|
312 | 0 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); |
313 | 0 | if (result) |
314 | 0 | goto err_freqfree; |
315 | 0 |
|
316 | 0 | if (c->cpuid_level >= 6) |
317 | 0 | on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1); |
318 | 0 | |
319 | 0 | /* |
320 | 0 | * the first call to ->target() should result in us actually |
321 | 0 | * writing something to the appropriate registers. |
322 | 0 | */ |
323 | 0 | data->arch_cpu_flags |= ARCH_CPU_FLAG_RESUME; |
324 | 0 |
|
325 | 0 | policy->cur = data->freq_table[i].frequency; |
326 | 0 | return result; |
327 | 0 |
|
328 | 0 | err_freqfree: |
329 | 0 | xfree(data->freq_table); |
330 | 0 | err_unreg: |
331 | 0 | xfree(data); |
332 | 0 | cpufreq_drv_data[cpu] = NULL; |
333 | 0 |
|
334 | 0 | return result; |
335 | 0 | } |
336 | | |
337 | | static int powernow_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
338 | 0 | { |
339 | 0 | struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; |
340 | 0 |
|
341 | 0 | if (data) { |
342 | 0 | cpufreq_drv_data[policy->cpu] = NULL; |
343 | 0 | xfree(data->freq_table); |
344 | 0 | xfree(data); |
345 | 0 | } |
346 | 0 |
|
347 | 0 | return 0; |
348 | 0 | } |
349 | | |
350 | | static struct cpufreq_driver powernow_cpufreq_driver = { |
351 | | .verify = powernow_cpufreq_verify, |
352 | | .target = powernow_cpufreq_target, |
353 | | .init = powernow_cpufreq_cpu_init, |
354 | | .exit = powernow_cpufreq_cpu_exit, |
355 | | .update = powernow_cpufreq_update |
356 | | }; |
357 | | |
358 | | unsigned int __init powernow_register_driver() |
359 | 0 | { |
360 | 0 | unsigned int i, ret = 0; |
361 | 0 |
|
362 | 0 | for_each_online_cpu(i) { |
363 | 0 | struct cpuinfo_x86 *c = &cpu_data[i]; |
364 | 0 | if (c->x86_vendor != X86_VENDOR_AMD) |
365 | 0 | ret = -ENODEV; |
366 | 0 | else |
367 | 0 | { |
368 | 0 | u32 eax, ebx, ecx, edx; |
369 | 0 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); |
370 | 0 | if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) |
371 | 0 | ret = -ENODEV; |
372 | 0 | } |
373 | 0 | if (ret) |
374 | 0 | return ret; |
375 | 0 | } |
376 | 0 |
|
377 | 0 | ret = cpufreq_register_driver(&powernow_cpufreq_driver); |
378 | 0 | return ret; |
379 | 0 | } |