debuggers.hg

view xen/arch/x86/acpi/cpufreq/cpufreq_ondemand.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /*
2 * xen/arch/x86/acpi/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * Feb 2008 Liu Jinsong <jinsong.liu@intel.com>
8 * Porting cpufreq_ondemand.c from Liunx 2.6.23 to Xen hypervisor
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
15 #include <xen/types.h>
16 #include <xen/percpu.h>
17 #include <xen/cpumask.h>
18 #include <xen/types.h>
19 #include <xen/sched.h>
20 #include <xen/timer.h>
21 #include <asm/config.h>
22 #include <acpi/cpufreq/cpufreq.h>
24 #define DEF_FREQUENCY_UP_THRESHOLD (80)
26 #define MIN_DBS_INTERVAL (MICROSECS(100))
27 #define MIN_SAMPLING_MILLISECS (20)
28 #define MIN_STAT_SAMPLING_RATE \
29 (MIN_SAMPLING_MILLISECS * MILLISECS(1))
30 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
31 #define TRANSITION_LATENCY_LIMIT (10 * 1000 )
33 static uint64_t def_sampling_rate;
35 /* Sampling types */
36 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
38 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
40 static unsigned int dbs_enable; /* number of CPUs using this policy */
42 static struct dbs_tuners {
43 uint64_t sampling_rate;
44 unsigned int up_threshold;
45 unsigned int ignore_nice;
46 unsigned int powersave_bias;
47 } dbs_tuners_ins = {
48 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
49 .ignore_nice = 0,
50 .powersave_bias = 0,
51 };
53 static struct timer dbs_timer[NR_CPUS];
55 static inline uint64_t get_cpu_idle_time(unsigned int cpu)
56 {
57 uint64_t idle_ns;
58 struct vcpu *v;
60 if ((v = idle_vcpu[cpu]) == NULL)
61 return 0;
63 idle_ns = v->runstate.time[RUNSTATE_running];
64 if (v->is_running)
65 idle_ns += NOW() - v->runstate.state_entry_time;
67 return idle_ns;
68 }
70 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
71 {
72 unsigned int load = 0;
73 uint64_t cur_ns, idle_ns, total_ns;
75 struct cpufreq_policy *policy;
76 unsigned int j;
78 if (!this_dbs_info->enable)
79 return;
81 policy = this_dbs_info->cur_policy;
82 cur_ns = NOW();
83 total_ns = cur_ns - this_dbs_info->prev_cpu_wall;
84 this_dbs_info->prev_cpu_wall = NOW();
86 if (total_ns < MIN_DBS_INTERVAL)
87 return;
89 /* Get Idle Time */
90 idle_ns = UINT_MAX;
91 for_each_cpu_mask(j, policy->cpus) {
92 uint64_t total_idle_ns;
93 unsigned int tmp_idle_ns;
94 struct cpu_dbs_info_s *j_dbs_info;
96 j_dbs_info = &per_cpu(cpu_dbs_info, j);
97 total_idle_ns = get_cpu_idle_time(j);
98 tmp_idle_ns = total_idle_ns - j_dbs_info->prev_cpu_idle;
99 j_dbs_info->prev_cpu_idle = total_idle_ns;
101 if (tmp_idle_ns < idle_ns)
102 idle_ns = tmp_idle_ns;
103 }
105 if (likely(total_ns > idle_ns))
106 load = (100 * (total_ns - idle_ns)) / total_ns;
108 /* Check for frequency increase */
109 if (load > dbs_tuners_ins.up_threshold) {
110 /* if we are already at full speed then break out early */
111 if (policy->cur == policy->max)
112 return;
113 __cpufreq_driver_target(policy, policy->max,CPUFREQ_RELATION_H);
114 return;
115 }
117 /* Check for frequency decrease */
118 /* if we cannot reduce the frequency anymore, break out early */
119 if (policy->cur == policy->min)
120 return;
122 /*
123 * The optimal frequency is the frequency that is the lowest that
124 * can support the current CPU usage without triggering the up
125 * policy. To be safe, we focus 10 points under the threshold.
126 */
127 if (load < (dbs_tuners_ins.up_threshold - 10)) {
128 unsigned int freq_next, freq_cur;
130 freq_cur = __cpufreq_driver_getavg(policy);
131 if (!freq_cur)
132 freq_cur = policy->cur;
134 freq_next = (freq_cur * load) / (dbs_tuners_ins.up_threshold - 10);
136 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
137 }
138 }
140 static void do_dbs_timer(void *dbs)
141 {
142 struct cpu_dbs_info_s *dbs_info = (struct cpu_dbs_info_s *)dbs;
144 if (!dbs_info->enable)
145 return;
147 dbs_check_cpu(dbs_info);
149 set_timer(&dbs_timer[dbs_info->cpu], NOW()+dbs_tuners_ins.sampling_rate);
150 }
152 static void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
153 {
154 dbs_info->enable = 1;
156 init_timer(&dbs_timer[dbs_info->cpu], do_dbs_timer,
157 (void *)dbs_info, dbs_info->cpu);
159 set_timer(&dbs_timer[dbs_info->cpu], NOW()+dbs_tuners_ins.sampling_rate);
160 }
162 static void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
163 {
164 dbs_info->enable = 0;
165 stop_timer(&dbs_timer[dbs_info->cpu]);
166 }
168 int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
169 {
170 unsigned int cpu = policy->cpu;
171 struct cpu_dbs_info_s *this_dbs_info;
172 unsigned int j;
174 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
176 switch (event) {
177 case CPUFREQ_GOV_START:
178 if ((!cpu_online(cpu)) || (!policy->cur))
179 return -EINVAL;
181 if (policy->cpuinfo.transition_latency >
182 (TRANSITION_LATENCY_LIMIT * 1000)) {
183 printk(KERN_WARNING "ondemand governor failed to load "
184 "due to too long transition latency\n");
185 return -EINVAL;
186 }
187 if (this_dbs_info->enable)
188 /* Already enabled */
189 break;
191 dbs_enable++;
193 for_each_cpu_mask(j, policy->cpus) {
194 struct cpu_dbs_info_s *j_dbs_info;
195 j_dbs_info = &per_cpu(cpu_dbs_info, j);
196 j_dbs_info->cur_policy = policy;
198 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
199 j_dbs_info->prev_cpu_wall = NOW();
200 }
201 this_dbs_info->cpu = cpu;
202 /*
203 * Start the timerschedule work, when this governor
204 * is used for first time
205 */
206 if (dbs_enable == 1) {
207 def_sampling_rate = policy->cpuinfo.transition_latency *
208 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
210 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
211 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
213 dbs_tuners_ins.sampling_rate = def_sampling_rate;
214 }
215 dbs_timer_init(this_dbs_info);
217 break;
219 case CPUFREQ_GOV_STOP:
220 if (this_dbs_info->enable)
221 dbs_timer_exit(this_dbs_info);
222 dbs_enable--;
224 break;
226 case CPUFREQ_GOV_LIMITS:
227 if (policy->max < this_dbs_info->cur_policy->cur)
228 __cpufreq_driver_target(this_dbs_info->cur_policy,
229 policy->max, CPUFREQ_RELATION_H);
230 else if (policy->min > this_dbs_info->cur_policy->cur)
231 __cpufreq_driver_target(this_dbs_info->cur_policy,
232 policy->min, CPUFREQ_RELATION_L);
233 break;
234 }
235 return 0;
236 }