/root/src/xen/xen/drivers/cpufreq/utility.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * utility.c - misc functions for cpufreq driver and Px statistic |
3 | | * |
4 | | * Copyright (C) 2001 Russell King |
5 | | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
6 | | * |
7 | | * Oct 2005 - Ashok Raj <ashok.raj@intel.com> |
8 | | * Added handling for CPU hotplug |
9 | | * Feb 2006 - Jacob Shin <jacob.shin@amd.com> |
10 | | * Fix handling for CPU hotplug -- affected CPUs |
11 | | * Feb 2008 - Liu Jinsong <jinsong.liu@intel.com> |
12 | | * 1. Merge cpufreq.c and freq_table.c of linux 2.6.23 |
13 | | * And poring to Xen hypervisor |
14 | | * 2. some Px statistic interface funcdtions |
15 | | * |
16 | | * This program is free software; you can redistribute it and/or modify |
17 | | * it under the terms of the GNU General Public License version 2 as |
18 | | * published by the Free Software Foundation. |
19 | | * |
20 | | */ |
21 | | |
22 | | #include <xen/errno.h> |
23 | | #include <xen/cpumask.h> |
24 | | #include <xen/types.h> |
25 | | #include <xen/spinlock.h> |
26 | | #include <xen/percpu.h> |
27 | | #include <xen/types.h> |
28 | | #include <xen/sched.h> |
29 | | #include <xen/timer.h> |
30 | | #include <xen/trace.h> |
31 | | #include <acpi/cpufreq/cpufreq.h> |
32 | | #include <public/sysctl.h> |
33 | | |
34 | | struct cpufreq_driver *cpufreq_driver; |
35 | | struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS]; |
36 | | DEFINE_PER_CPU_READ_MOSTLY(struct cpufreq_policy *, cpufreq_cpu_policy); |
37 | | |
38 | | DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock); |
39 | | |
40 | | /********************************************************************* |
41 | | * Px STATISTIC INFO * |
42 | | *********************************************************************/ |
43 | | |
44 | | void cpufreq_residency_update(unsigned int cpu, uint8_t state) |
45 | 0 | { |
46 | 0 | uint64_t now, total_idle_ns; |
47 | 0 | int64_t delta; |
48 | 0 | struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu); |
49 | 0 |
|
50 | 0 | total_idle_ns = get_cpu_idle_time(cpu); |
51 | 0 | now = NOW(); |
52 | 0 |
|
53 | 0 | delta = (now - pxpt->prev_state_wall) - |
54 | 0 | (total_idle_ns - pxpt->prev_idle_wall); |
55 | 0 |
|
56 | 0 | if ( likely(delta >= 0) ) |
57 | 0 | pxpt->u.pt[state].residency += delta; |
58 | 0 |
|
59 | 0 | pxpt->prev_state_wall = now; |
60 | 0 | pxpt->prev_idle_wall = total_idle_ns; |
61 | 0 | } |
62 | | |
63 | | void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to) |
64 | 0 | { |
65 | 0 | struct pm_px *pxpt; |
66 | 0 | struct processor_pminfo *pmpt = processor_pminfo[cpu]; |
67 | 0 | spinlock_t *cpufreq_statistic_lock = |
68 | 0 | &per_cpu(cpufreq_statistic_lock, cpu); |
69 | 0 |
|
70 | 0 | spin_lock(cpufreq_statistic_lock); |
71 | 0 |
|
72 | 0 | pxpt = per_cpu(cpufreq_statistic_data, cpu); |
73 | 0 | if ( !pxpt || !pmpt ) { |
74 | 0 | spin_unlock(cpufreq_statistic_lock); |
75 | 0 | return; |
76 | 0 | } |
77 | 0 |
|
78 | 0 | pxpt->u.last = from; |
79 | 0 | pxpt->u.cur = to; |
80 | 0 | pxpt->u.pt[to].count++; |
81 | 0 |
|
82 | 0 | cpufreq_residency_update(cpu, from); |
83 | 0 |
|
84 | 0 | (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++; |
85 | 0 |
|
86 | 0 | spin_unlock(cpufreq_statistic_lock); |
87 | 0 | } |
88 | | |
89 | | int cpufreq_statistic_init(unsigned int cpuid) |
90 | 0 | { |
91 | 0 | uint32_t i, count; |
92 | 0 | struct pm_px *pxpt; |
93 | 0 | const struct processor_pminfo *pmpt = processor_pminfo[cpuid]; |
94 | 0 | spinlock_t *cpufreq_statistic_lock = |
95 | 0 | &per_cpu(cpufreq_statistic_lock, cpuid); |
96 | 0 |
|
97 | 0 | spin_lock_init(cpufreq_statistic_lock); |
98 | 0 |
|
99 | 0 | if ( !pmpt ) |
100 | 0 | return -EINVAL; |
101 | 0 |
|
102 | 0 | spin_lock(cpufreq_statistic_lock); |
103 | 0 |
|
104 | 0 | pxpt = per_cpu(cpufreq_statistic_data, cpuid); |
105 | 0 | if ( pxpt ) { |
106 | 0 | spin_unlock(cpufreq_statistic_lock); |
107 | 0 | return 0; |
108 | 0 | } |
109 | 0 |
|
110 | 0 | count = pmpt->perf.state_count; |
111 | 0 |
|
112 | 0 | pxpt = xzalloc(struct pm_px); |
113 | 0 | if ( !pxpt ) { |
114 | 0 | spin_unlock(cpufreq_statistic_lock); |
115 | 0 | return -ENOMEM; |
116 | 0 | } |
117 | 0 | per_cpu(cpufreq_statistic_data, cpuid) = pxpt; |
118 | 0 |
|
119 | 0 | pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count); |
120 | 0 | if (!pxpt->u.trans_pt) { |
121 | 0 | xfree(pxpt); |
122 | 0 | spin_unlock(cpufreq_statistic_lock); |
123 | 0 | return -ENOMEM; |
124 | 0 | } |
125 | 0 |
|
126 | 0 | pxpt->u.pt = xzalloc_array(struct pm_px_val, count); |
127 | 0 | if (!pxpt->u.pt) { |
128 | 0 | xfree(pxpt->u.trans_pt); |
129 | 0 | xfree(pxpt); |
130 | 0 | spin_unlock(cpufreq_statistic_lock); |
131 | 0 | return -ENOMEM; |
132 | 0 | } |
133 | 0 |
|
134 | 0 | pxpt->u.total = pmpt->perf.state_count; |
135 | 0 | pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit; |
136 | 0 |
|
137 | 0 | for (i=0; i < pmpt->perf.state_count; i++) |
138 | 0 | pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency; |
139 | 0 |
|
140 | 0 | pxpt->prev_state_wall = NOW(); |
141 | 0 | pxpt->prev_idle_wall = get_cpu_idle_time(cpuid); |
142 | 0 |
|
143 | 0 | spin_unlock(cpufreq_statistic_lock); |
144 | 0 |
|
145 | 0 | return 0; |
146 | 0 | } |
147 | | |
148 | | void cpufreq_statistic_exit(unsigned int cpuid) |
149 | 0 | { |
150 | 0 | struct pm_px *pxpt; |
151 | 0 | spinlock_t *cpufreq_statistic_lock = |
152 | 0 | &per_cpu(cpufreq_statistic_lock, cpuid); |
153 | 0 |
|
154 | 0 | spin_lock(cpufreq_statistic_lock); |
155 | 0 |
|
156 | 0 | pxpt = per_cpu(cpufreq_statistic_data, cpuid); |
157 | 0 | if (!pxpt) { |
158 | 0 | spin_unlock(cpufreq_statistic_lock); |
159 | 0 | return; |
160 | 0 | } |
161 | 0 |
|
162 | 0 | xfree(pxpt->u.trans_pt); |
163 | 0 | xfree(pxpt->u.pt); |
164 | 0 | xfree(pxpt); |
165 | 0 | per_cpu(cpufreq_statistic_data, cpuid) = NULL; |
166 | 0 |
|
167 | 0 | spin_unlock(cpufreq_statistic_lock); |
168 | 0 | } |
169 | | |
170 | | void cpufreq_statistic_reset(unsigned int cpuid) |
171 | 0 | { |
172 | 0 | uint32_t i, j, count; |
173 | 0 | struct pm_px *pxpt; |
174 | 0 | const struct processor_pminfo *pmpt = processor_pminfo[cpuid]; |
175 | 0 | spinlock_t *cpufreq_statistic_lock = |
176 | 0 | &per_cpu(cpufreq_statistic_lock, cpuid); |
177 | 0 |
|
178 | 0 | spin_lock(cpufreq_statistic_lock); |
179 | 0 |
|
180 | 0 | pxpt = per_cpu(cpufreq_statistic_data, cpuid); |
181 | 0 | if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) { |
182 | 0 | spin_unlock(cpufreq_statistic_lock); |
183 | 0 | return; |
184 | 0 | } |
185 | 0 |
|
186 | 0 | count = pmpt->perf.state_count; |
187 | 0 |
|
188 | 0 | for (i=0; i < count; i++) { |
189 | 0 | pxpt->u.pt[i].residency = 0; |
190 | 0 | pxpt->u.pt[i].count = 0; |
191 | 0 |
|
192 | 0 | for (j=0; j < count; j++) |
193 | 0 | *(pxpt->u.trans_pt + i*count + j) = 0; |
194 | 0 | } |
195 | 0 |
|
196 | 0 | pxpt->prev_state_wall = NOW(); |
197 | 0 | pxpt->prev_idle_wall = get_cpu_idle_time(cpuid); |
198 | 0 |
|
199 | 0 | spin_unlock(cpufreq_statistic_lock); |
200 | 0 | } |
201 | | |
202 | | |
203 | | /********************************************************************* |
204 | | * FREQUENCY TABLE HELPERS * |
205 | | *********************************************************************/ |
206 | | |
207 | | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
208 | | struct cpufreq_frequency_table *table) |
209 | 0 | { |
210 | 0 | unsigned int min_freq = ~0; |
211 | 0 | unsigned int max_freq = 0; |
212 | 0 | unsigned int second_max_freq = 0; |
213 | 0 | unsigned int i; |
214 | 0 |
|
215 | 0 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
216 | 0 | unsigned int freq = table[i].frequency; |
217 | 0 | if (freq == CPUFREQ_ENTRY_INVALID) |
218 | 0 | continue; |
219 | 0 | if (freq < min_freq) |
220 | 0 | min_freq = freq; |
221 | 0 | if (freq > max_freq) |
222 | 0 | max_freq = freq; |
223 | 0 | } |
224 | 0 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
225 | 0 | unsigned int freq = table[i].frequency; |
226 | 0 | if (freq == CPUFREQ_ENTRY_INVALID || freq == max_freq) |
227 | 0 | continue; |
228 | 0 | if (freq > second_max_freq) |
229 | 0 | second_max_freq = freq; |
230 | 0 | } |
231 | 0 | if (second_max_freq == 0) |
232 | 0 | second_max_freq = max_freq; |
233 | 0 | if (cpufreq_verbose) |
234 | 0 | printk("max_freq: %u second_max_freq: %u\n", |
235 | 0 | max_freq, second_max_freq); |
236 | 0 |
|
237 | 0 | policy->min = policy->cpuinfo.min_freq = min_freq; |
238 | 0 | policy->max = policy->cpuinfo.max_freq = max_freq; |
239 | 0 | policy->cpuinfo.second_max_freq = second_max_freq; |
240 | 0 |
|
241 | 0 | if (policy->min == ~0) |
242 | 0 | return -EINVAL; |
243 | 0 | else |
244 | 0 | return 0; |
245 | 0 | } |
246 | | |
247 | | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, |
248 | | struct cpufreq_frequency_table *table) |
249 | 0 | { |
250 | 0 | unsigned int next_larger = ~0; |
251 | 0 | unsigned int i; |
252 | 0 | unsigned int count = 0; |
253 | 0 |
|
254 | 0 | if (!cpu_online(policy->cpu)) |
255 | 0 | return -EINVAL; |
256 | 0 |
|
257 | 0 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
258 | 0 | policy->cpuinfo.max_freq); |
259 | 0 |
|
260 | 0 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
261 | 0 | unsigned int freq = table[i].frequency; |
262 | 0 | if (freq == CPUFREQ_ENTRY_INVALID) |
263 | 0 | continue; |
264 | 0 | if ((freq >= policy->min) && (freq <= policy->max)) |
265 | 0 | count++; |
266 | 0 | else if ((next_larger > freq) && (freq > policy->max)) |
267 | 0 | next_larger = freq; |
268 | 0 | } |
269 | 0 |
|
270 | 0 | if (!count) |
271 | 0 | policy->max = next_larger; |
272 | 0 |
|
273 | 0 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
274 | 0 | policy->cpuinfo.max_freq); |
275 | 0 |
|
276 | 0 | return 0; |
277 | 0 | } |
278 | | |
279 | | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
280 | | struct cpufreq_frequency_table *table, |
281 | | unsigned int target_freq, |
282 | | unsigned int relation, |
283 | | unsigned int *index) |
284 | 0 | { |
285 | 0 | struct cpufreq_frequency_table optimal = { |
286 | 0 | .index = ~0, |
287 | 0 | .frequency = 0, |
288 | 0 | }; |
289 | 0 | struct cpufreq_frequency_table suboptimal = { |
290 | 0 | .index = ~0, |
291 | 0 | .frequency = 0, |
292 | 0 | }; |
293 | 0 | unsigned int i; |
294 | 0 |
|
295 | 0 | switch (relation) { |
296 | 0 | case CPUFREQ_RELATION_H: |
297 | 0 | suboptimal.frequency = ~0; |
298 | 0 | break; |
299 | 0 | case CPUFREQ_RELATION_L: |
300 | 0 | optimal.frequency = ~0; |
301 | 0 | break; |
302 | 0 | } |
303 | 0 |
|
304 | 0 | if (!cpu_online(policy->cpu)) |
305 | 0 | return -EINVAL; |
306 | 0 |
|
307 | 0 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
308 | 0 | unsigned int freq = table[i].frequency; |
309 | 0 | if (freq == CPUFREQ_ENTRY_INVALID) |
310 | 0 | continue; |
311 | 0 | if ((freq < policy->min) || (freq > policy->max)) |
312 | 0 | continue; |
313 | 0 | switch(relation) { |
314 | 0 | case CPUFREQ_RELATION_H: |
315 | 0 | if (freq <= target_freq) { |
316 | 0 | if (freq >= optimal.frequency) { |
317 | 0 | optimal.frequency = freq; |
318 | 0 | optimal.index = i; |
319 | 0 | } |
320 | 0 | } else { |
321 | 0 | if (freq <= suboptimal.frequency) { |
322 | 0 | suboptimal.frequency = freq; |
323 | 0 | suboptimal.index = i; |
324 | 0 | } |
325 | 0 | } |
326 | 0 | break; |
327 | 0 | case CPUFREQ_RELATION_L: |
328 | 0 | if (freq >= target_freq) { |
329 | 0 | if (freq <= optimal.frequency) { |
330 | 0 | optimal.frequency = freq; |
331 | 0 | optimal.index = i; |
332 | 0 | } |
333 | 0 | } else { |
334 | 0 | if (freq >= suboptimal.frequency) { |
335 | 0 | suboptimal.frequency = freq; |
336 | 0 | suboptimal.index = i; |
337 | 0 | } |
338 | 0 | } |
339 | 0 | break; |
340 | 0 | } |
341 | 0 | } |
342 | 0 | if (optimal.index > i) { |
343 | 0 | if (suboptimal.index > i) |
344 | 0 | return -EINVAL; |
345 | 0 | *index = suboptimal.index; |
346 | 0 | } else |
347 | 0 | *index = optimal.index; |
348 | 0 |
|
349 | 0 | return 0; |
350 | 0 | } |
351 | | |
352 | | |
353 | | /********************************************************************* |
354 | | * GOVERNORS * |
355 | | *********************************************************************/ |
356 | | |
357 | | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
358 | | unsigned int target_freq, |
359 | | unsigned int relation) |
360 | 0 | { |
361 | 0 | int retval = -EINVAL; |
362 | 0 |
|
363 | 0 | if (cpu_online(policy->cpu) && cpufreq_driver->target) |
364 | 0 | { |
365 | 0 | unsigned int prev_freq = policy->cur; |
366 | 0 |
|
367 | 0 | retval = cpufreq_driver->target(policy, target_freq, relation); |
368 | 0 | if ( retval == 0 ) |
369 | 0 | TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq/1000, policy->cur/1000); |
370 | 0 | } |
371 | 0 |
|
372 | 0 | return retval; |
373 | 0 | } |
374 | | |
375 | | int cpufreq_driver_getavg(unsigned int cpu, unsigned int flag) |
376 | 0 | { |
377 | 0 | struct cpufreq_policy *policy; |
378 | 0 | int freq_avg; |
379 | 0 |
|
380 | 0 | if (!cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu))) |
381 | 0 | return 0; |
382 | 0 |
|
383 | 0 | if (cpufreq_driver->getavg) |
384 | 0 | { |
385 | 0 | freq_avg = cpufreq_driver->getavg(cpu, flag); |
386 | 0 | if (freq_avg > 0) |
387 | 0 | return freq_avg; |
388 | 0 | } |
389 | 0 |
|
390 | 0 | return policy->cur; |
391 | 0 | } |
392 | | |
393 | | int cpufreq_update_turbo(int cpuid, int new_state) |
394 | 0 | { |
395 | 0 | struct cpufreq_policy *policy; |
396 | 0 | int curr_state; |
397 | 0 | int ret = 0; |
398 | 0 |
|
399 | 0 | if (new_state != CPUFREQ_TURBO_ENABLED && |
400 | 0 | new_state != CPUFREQ_TURBO_DISABLED) |
401 | 0 | return -EINVAL; |
402 | 0 |
|
403 | 0 | policy = per_cpu(cpufreq_cpu_policy, cpuid); |
404 | 0 | if (!policy) |
405 | 0 | return -EACCES; |
406 | 0 |
|
407 | 0 | if (policy->turbo == CPUFREQ_TURBO_UNSUPPORTED) |
408 | 0 | return -EOPNOTSUPP; |
409 | 0 |
|
410 | 0 | curr_state = policy->turbo; |
411 | 0 | if (curr_state == new_state) |
412 | 0 | return 0; |
413 | 0 |
|
414 | 0 | policy->turbo = new_state; |
415 | 0 | if (cpufreq_driver->update) |
416 | 0 | { |
417 | 0 | ret = cpufreq_driver->update(cpuid, policy); |
418 | 0 | if (ret) |
419 | 0 | policy->turbo = curr_state; |
420 | 0 | } |
421 | 0 |
|
422 | 0 | return ret; |
423 | 0 | } |
424 | | |
425 | | |
426 | | int cpufreq_get_turbo_status(int cpuid) |
427 | 0 | { |
428 | 0 | struct cpufreq_policy *policy; |
429 | 0 |
|
430 | 0 | policy = per_cpu(cpufreq_cpu_policy, cpuid); |
431 | 0 | return policy && policy->turbo == CPUFREQ_TURBO_ENABLED; |
432 | 0 | } |
433 | | |
434 | | /********************************************************************* |
435 | | * POLICY * |
436 | | *********************************************************************/ |
437 | | |
438 | | /* |
439 | | * data : current policy. |
440 | | * policy : policy to be set. |
441 | | */ |
442 | | int __cpufreq_set_policy(struct cpufreq_policy *data, |
443 | | struct cpufreq_policy *policy) |
444 | 0 | { |
445 | 0 | int ret = 0; |
446 | 0 |
|
447 | 0 | memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); |
448 | 0 |
|
449 | 0 | if (policy->min > data->min && policy->min > policy->max) |
450 | 0 | return -EINVAL; |
451 | 0 |
|
452 | 0 | /* verify the cpu speed can be set within this limit */ |
453 | 0 | ret = cpufreq_driver->verify(policy); |
454 | 0 | if (ret) |
455 | 0 | return ret; |
456 | 0 |
|
457 | 0 | data->min = policy->min; |
458 | 0 | data->max = policy->max; |
459 | 0 | data->limits = policy->limits; |
460 | 0 | if (cpufreq_driver->setpolicy) |
461 | 0 | return cpufreq_driver->setpolicy(data); |
462 | 0 |
|
463 | 0 | if (policy->governor != data->governor) { |
464 | 0 | /* save old, working values */ |
465 | 0 | struct cpufreq_governor *old_gov = data->governor; |
466 | 0 |
|
467 | 0 | /* end old governor */ |
468 | 0 | if (data->governor) |
469 | 0 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
470 | 0 |
|
471 | 0 | /* start new governor */ |
472 | 0 | data->governor = policy->governor; |
473 | 0 | if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { |
474 | 0 | printk(KERN_WARNING "Fail change to %s governor\n", |
475 | 0 | data->governor->name); |
476 | 0 |
|
477 | 0 | /* new governor failed, so re-start old one */ |
478 | 0 | data->governor = old_gov; |
479 | 0 | if (old_gov) { |
480 | 0 | __cpufreq_governor(data, CPUFREQ_GOV_START); |
481 | 0 | printk(KERN_WARNING "Still stay at %s governor\n", |
482 | 0 | data->governor->name); |
483 | 0 | } |
484 | 0 | return -EINVAL; |
485 | 0 | } |
486 | 0 | /* might be a policy change, too, so fall through */ |
487 | 0 | } |
488 | 0 |
|
489 | 0 | return __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); |
490 | 0 | } |