debuggers.hg

view xen/common/sched_rrobin.c @ 3570:51052c8b6456

bitkeeper revision 1.1159.212.38 (41f6537aX7dfqsdH6-jWzX24faBDtQ)

manual merge.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jan 25 14:11:06 2005 +0000 (2005-01-25)
parents 3f929065a1d1 dee91b44a753
children d8ba911dce48 f98fa170a9f4 0ef6e8e6e85d
line source
1 /****************************************************************************
2 * Round Robin Scheduler for Xen
3 *
4 * by Mark Williamson (C) 2004 Intel Research Cambridge
5 */
7 #include <xen/sched.h>
8 #include <xen/sched-if.h>
9 #include <public/sched_ctl.h>
10 #include <xen/ac_timer.h>
11 #include <xen/softirq.h>
12 #include <xen/time.h>
13 #include <xen/slab.h>
15 #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
17 static s_time_t rr_slice = MILLISECS(10);
19 /* Only runqueue pointers and domain pointer*/
20 struct rrobin_dom_info
21 {
22 struct list_head run_list;
23 struct domain *domain;
24 };
26 #define RR_INFO(d) ((struct rrobin_dom_info *)d->sched_priv)
27 #define RUNLIST(d) ((struct list_head *)&(RR_INFO(d)->run_list))
28 #define RUNQUEUE(cpu) RUNLIST(schedule_data[cpu].idle)
30 static xmem_cache_t *dom_info_cache;
32 static inline void __add_to_runqueue_head(struct domain *d)
33 {
34 list_add(RUNLIST(d), RUNQUEUE(d->processor));
35 }
37 static inline void __add_to_runqueue_tail(struct domain *d)
38 {
39 list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
40 }
42 static inline void __del_from_runqueue(struct domain *d)
43 {
44 struct list_head *runlist = RUNLIST(d);
45 list_del(runlist);
46 runlist->next = NULL;
47 }
49 static inline int __task_on_runqueue(struct domain *d)
50 {
51 return (RUNLIST(d))->next != NULL;
52 }
54 /* Initialises the runqueues and creates the domain info cache */
55 static int rr_init_scheduler()
56 {
57 int i;
59 for ( i = 0; i < NR_CPUS; i++ )
60 INIT_LIST_HEAD(RUNQUEUE(i));
62 dom_info_cache = xmem_cache_create(
63 "RR dom info", sizeof(struct rrobin_dom_info), 0, 0, 0, NULL);
64 if ( dom_info_cache == NULL )
65 {
66 printk("Could not allocate SLAB cache.\n");
67 return -1;
68 }
70 return 0;
71 }
73 /* Allocates memory for per domain private scheduling data*/
74 static int rr_alloc_task(struct domain *d)
75 {
76 if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
77 return -1;
78 memset(d->sched_priv, 0, sizeof(struct rrobin_dom_info));
79 return 0;
80 }
82 /* Setup the rr_dom_info */
83 static void rr_add_task(struct domain *d)
84 {
85 struct rrobin_dom_info *inf;
86 RR_INFO(d)->domain = d;
87 inf = RR_INFO(d);
88 }
90 /* Frees memory used by domain info */
91 static void rr_free_task(struct domain *d)
92 {
93 ASSERT(d->sched_priv != NULL);
94 xmem_cache_free(dom_info_cache, d->sched_priv);
95 }
97 /* Initialises idle task */
98 static int rr_init_idle_task(struct domain *d)
99 {
100 if ( rr_alloc_task(d) < 0 )
101 return -1;
103 rr_add_task(d);
105 set_bit(DF_RUNNING, &d->flags);
106 if ( !__task_on_runqueue(d) )
107 __add_to_runqueue_head(d);
109 return 0;
110 }
112 /* Main scheduling function */
113 static task_slice_t rr_do_schedule(s_time_t now)
114 {
115 struct domain *prev = current;
116 int cpu = current->processor;
117 task_slice_t ret;
119 if ( !is_idle_task(prev) )
120 {
121 __del_from_runqueue(prev);
123 if ( domain_runnable(prev) )
124 __add_to_runqueue_tail(prev);
125 }
127 ret.task = list_entry(RUNQUEUE(cpu)->next,
128 struct rrobin_dom_info,
129 run_list)->domain;
130 ret.time = rr_slice;
131 return ret;
132 }
134 /* Set/retrive control parameter(s) */
135 static int rr_ctl(struct sched_ctl_cmd *cmd)
136 {
137 if ( cmd->direction == SCHED_INFO_PUT )
138 {
139 rr_slice = cmd->u.rrobin.slice;
140 }
141 else /* cmd->direction == SCHED_INFO_GET */
142 {
143 cmd->u.rrobin.slice = rr_slice;
144 }
146 return 0;
147 }
149 static void rr_dump_settings()
150 {
151 printk("rr_slice = %llu ", rr_slice);
152 }
154 static void rr_sleep(struct domain *d)
155 {
156 if ( test_bit(DF_RUNNING, &d->flags) )
157 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
158 else if ( __task_on_runqueue(d) )
159 __del_from_runqueue(d);
160 }
162 void rr_wake(struct domain *d)
163 {
164 struct domain *curr;
165 s_time_t now;
166 int cpu = d->processor;
168 if ( unlikely(__task_on_runqueue(d)) )
169 return;
171 __add_to_runqueue_head(d);
173 now = NOW();
175 curr = schedule_data[cpu].curr;
176 if ( is_idle_task(curr) )
177 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
178 }
181 static void rr_dump_domain(struct domain *d)
182 {
183 printk("%u has=%c ", d->id,
184 test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
185 printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
186 }
188 static void rr_dump_cpu_state(int i)
189 {
190 struct list_head *queue;
191 int loop = 0;
192 struct rrobin_dom_info *d_inf;
194 queue = RUNQUEUE(i);
195 printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
196 (unsigned long) queue->next, (unsigned long) queue->prev);
198 printk("%3d: ",loop++);
199 d_inf = list_entry(queue, struct rrobin_dom_info, run_list);
200 rr_dump_domain(d_inf->domain);
202 list_for_each_entry ( d_inf, queue, run_list )
203 {
204 printk("%3d: ",loop++);
205 rr_dump_domain(d_inf->domain);
206 }
207 }
210 struct scheduler sched_rrobin_def = {
211 .name = "Round-Robin Scheduler",
212 .opt_name = "rrobin",
213 .sched_id = SCHED_RROBIN,
215 .init_idle_task = rr_init_idle_task,
216 .alloc_task = rr_alloc_task,
217 .add_task = rr_add_task,
218 .free_task = rr_free_task,
219 .init_scheduler = rr_init_scheduler,
220 .do_schedule = rr_do_schedule,
221 .control = rr_ctl,
222 .dump_settings = rr_dump_settings,
223 .dump_cpu_state = rr_dump_cpu_state,
224 .sleep = rr_sleep,
225 .wake = rr_wake,
226 };