debuggers.hg

view xen/arch/x86/acpi/cpufreq/utility.c @ 17943:baaea9f0db5e

x86: Add cpufreq logic to S3 suspend/resume

When suspend to S3, stop the cpufreq dbs governor. When resume from
S3, firstly sync cpu state and freq at the 1st dbs timer; from 2nd dbs
timer on, cpufreq dbs governor control cpu px transfer according to
its workload algorithm. Px statistic is also handled.

Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 27 16:16:47 2008 +0100 (2008-06-27)
parents d0817f08599a
children 0b4dbd9a9896
line source
1 /*
2 * utility.c - misc functions for cpufreq driver and Px statistic
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
11 * Feb 2008 - Liu Jinsong <jinsong.liu@intel.com>
12 * 1. Merge cpufreq.c and freq_table.c of linux 2.6.23
13 * And poring to Xen hypervisor
14 * 2. some Px statistic interface funcdtions
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 *
20 */
22 #include <xen/errno.h>
23 #include <xen/cpumask.h>
24 #include <xen/types.h>
25 #include <xen/spinlock.h>
26 #include <xen/percpu.h>
27 #include <xen/types.h>
28 #include <xen/sched.h>
29 #include <xen/timer.h>
30 #include <asm/config.h>
31 #include <acpi/cpufreq/cpufreq.h>
32 #include <public/sysctl.h>
34 struct cpufreq_driver *cpufreq_driver;
36 /*********************************************************************
37 * Px STATISTIC INFO *
38 *********************************************************************/
40 void px_statistic_suspend(void)
41 {
42 int cpu;
43 uint64_t now;
45 now = NOW();
47 for_each_online_cpu(cpu) {
48 struct pm_px *pxpt = &px_statistic_data[cpu];
49 pxpt->u.pt[pxpt->u.cur].residency +=
50 now - pxpt->prev_state_wall;
51 }
52 }
54 void px_statistic_resume(void)
55 {
56 int cpu;
57 uint64_t now;
59 now = NOW();
61 for_each_online_cpu(cpu) {
62 struct pm_px *pxpt = &px_statistic_data[cpu];
63 pxpt->prev_state_wall = now;
64 }
65 }
67 void px_statistic_update(cpumask_t cpumask, uint8_t from, uint8_t to)
68 {
69 uint32_t i;
70 uint64_t now;
72 now = NOW();
74 for_each_cpu_mask(i, cpumask) {
75 struct pm_px *pxpt = &px_statistic_data[i];
76 uint32_t statnum = processor_pminfo[i].perf.state_count;
78 pxpt->u.last = from;
79 pxpt->u.cur = to;
80 pxpt->u.pt[to].count++;
81 pxpt->u.pt[from].residency += now - pxpt->prev_state_wall;
83 (*(pxpt->u.trans_pt + from*statnum + to))++;
85 pxpt->prev_state_wall = now;
86 }
87 }
89 int px_statistic_init(int cpuid)
90 {
91 uint32_t i, count;
92 struct pm_px *pxpt = &px_statistic_data[cpuid];
93 struct processor_pminfo *pmpt = &processor_pminfo[cpuid];
95 count = pmpt->perf.state_count;
97 pxpt->u.trans_pt = xmalloc_array(uint64_t, count * count);
98 if (!pxpt->u.trans_pt)
99 return -ENOMEM;
101 pxpt->u.pt = xmalloc_array(struct pm_px_val, count);
102 if (!pxpt->u.pt) {
103 xfree(pxpt->u.trans_pt);
104 return -ENOMEM;
105 }
107 memset(pxpt->u.trans_pt, 0, count * count * (sizeof(uint64_t)));
108 memset(pxpt->u.pt, 0, count * (sizeof(struct pm_px_val)));
110 pxpt->u.total = pmpt->perf.state_count;
111 pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.ppc;
113 for (i=0; i < pmpt->perf.state_count; i++)
114 pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
116 pxpt->prev_state_wall = NOW();
118 return 0;
119 }
121 void px_statistic_reset(int cpuid)
122 {
123 uint32_t i, j, count;
124 struct pm_px *pxpt = &px_statistic_data[cpuid];
126 count = processor_pminfo[cpuid].perf.state_count;
128 for (i=0; i < count; i++) {
129 pxpt->u.pt[i].residency = 0;
130 pxpt->u.pt[i].count = 0;
132 for (j=0; j < count; j++)
133 *(pxpt->u.trans_pt + i*count + j) = 0;
134 }
136 pxpt->prev_state_wall = NOW();
137 }
140 /*********************************************************************
141 * FREQUENCY TABLE HELPERS *
142 *********************************************************************/
144 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
145 struct cpufreq_frequency_table *table)
146 {
147 unsigned int min_freq = ~0;
148 unsigned int max_freq = 0;
149 unsigned int i;
151 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
152 unsigned int freq = table[i].frequency;
153 if (freq == CPUFREQ_ENTRY_INVALID)
154 continue;
155 if (freq < min_freq)
156 min_freq = freq;
157 if (freq > max_freq)
158 max_freq = freq;
159 }
161 policy->min = policy->cpuinfo.min_freq = min_freq;
162 policy->max = policy->cpuinfo.max_freq = max_freq;
164 if (policy->min == ~0)
165 return -EINVAL;
166 else
167 return 0;
168 }
170 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
171 struct cpufreq_frequency_table *table,
172 unsigned int target_freq,
173 unsigned int relation,
174 unsigned int *index)
175 {
176 struct cpufreq_frequency_table optimal = {
177 .index = ~0,
178 .frequency = 0,
179 };
180 struct cpufreq_frequency_table suboptimal = {
181 .index = ~0,
182 .frequency = 0,
183 };
184 unsigned int i;
186 switch (relation) {
187 case CPUFREQ_RELATION_H:
188 suboptimal.frequency = ~0;
189 break;
190 case CPUFREQ_RELATION_L:
191 optimal.frequency = ~0;
192 break;
193 }
195 if (!cpu_online(policy->cpu))
196 return -EINVAL;
198 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
199 unsigned int freq = table[i].frequency;
200 if (freq == CPUFREQ_ENTRY_INVALID)
201 continue;
202 if ((freq < policy->min) || (freq > policy->max))
203 continue;
204 switch(relation) {
205 case CPUFREQ_RELATION_H:
206 if (freq <= target_freq) {
207 if (freq >= optimal.frequency) {
208 optimal.frequency = freq;
209 optimal.index = i;
210 }
211 } else {
212 if (freq <= suboptimal.frequency) {
213 suboptimal.frequency = freq;
214 suboptimal.index = i;
215 }
216 }
217 break;
218 case CPUFREQ_RELATION_L:
219 if (freq >= target_freq) {
220 if (freq <= optimal.frequency) {
221 optimal.frequency = freq;
222 optimal.index = i;
223 }
224 } else {
225 if (freq >= suboptimal.frequency) {
226 suboptimal.frequency = freq;
227 suboptimal.index = i;
228 }
229 }
230 break;
231 }
232 }
233 if (optimal.index > i) {
234 if (suboptimal.index > i)
235 return -EINVAL;
236 *index = suboptimal.index;
237 } else
238 *index = optimal.index;
240 return 0;
241 }
244 /*********************************************************************
245 * GOVERNORS *
246 *********************************************************************/
248 int __cpufreq_driver_target(struct cpufreq_policy *policy,
249 unsigned int target_freq,
250 unsigned int relation)
251 {
252 int retval = -EINVAL;
254 if (cpu_online(policy->cpu) && cpufreq_driver->target)
255 retval = cpufreq_driver->target(policy, target_freq, relation);
257 return retval;
258 }
260 int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
261 {
262 int ret = 0;
264 if (!policy)
265 return -EINVAL;
267 if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
268 ret = cpufreq_driver->getavg(policy->cpu);
270 return ret;
271 }
274 /*********************************************************************
275 * CPUFREQ SUSPEND/RESUME *
276 *********************************************************************/
278 void cpufreq_suspend(void)
279 {
280 int cpu;
282 /* to protect the case when Px was controlled by dom0-kernel */
283 /* or when CPU_FREQ not set in which case ACPI Px objects not parsed */
284 for_each_online_cpu(cpu) {
285 struct processor_performance *perf = &processor_pminfo[cpu].perf;
287 if (!perf->init)
288 return;
289 }
291 cpufreq_dom_dbs(CPUFREQ_GOV_STOP);
293 cpufreq_dom_exit();
295 px_statistic_suspend();
296 }
298 int cpufreq_resume(void)
299 {
300 int cpu, ret = 0;
302 /* 1. to protect the case when Px was controlled by dom0-kernel */
303 /* or when CPU_FREQ not set in which case ACPI Px objects not parsed */
304 /* 2. set state and resume flag to sync cpu to right state and freq */
305 for_each_online_cpu(cpu) {
306 struct processor_performance *perf = &processor_pminfo[cpu].perf;
307 struct cpufreq_policy *policy = &xen_px_policy[cpu];
309 if (!perf->init)
310 goto err;
311 perf->state = 0;
312 policy->resume = 1;
313 }
315 px_statistic_resume();
317 ret = cpufreq_dom_init();
318 if (ret)
319 goto err;
321 ret = cpufreq_dom_dbs(CPUFREQ_GOV_START);
322 if (ret)
323 goto err;
325 return ret;
327 err:
328 cpufreq_dom_exit();
329 return ret;
330 }