debuggers.hg

view xen/common/sched_rrobin.c @ 3651:f98fa170a9f4

bitkeeper revision 1.1159.238.2 (4200caf6iFnj85XmiFNAz7VursMGUw)

Slab caches for things allocated only on initialization seems to be
overkill. This patch replaces them with the previous typesafe
allocator.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (authored)
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 12:43:34 2005 +0000 (2005-02-02)
parents 51052c8b6456
children 0ef6e8e6e85d
line source
1 /****************************************************************************
2 * Round Robin Scheduler for Xen
3 *
4 * by Mark Williamson (C) 2004 Intel Research Cambridge
5 */
7 #include <xen/sched.h>
8 #include <xen/sched-if.h>
9 #include <public/sched_ctl.h>
10 #include <xen/ac_timer.h>
11 #include <xen/softirq.h>
12 #include <xen/time.h>
13 #include <xen/slab.h>
15 #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
17 static s_time_t rr_slice = MILLISECS(10);
19 /* Only runqueue pointers and domain pointer*/
20 struct rrobin_dom_info
21 {
22 struct list_head run_list;
23 struct domain *domain;
24 };
26 #define RR_INFO(d) ((struct rrobin_dom_info *)d->sched_priv)
27 #define RUNLIST(d) ((struct list_head *)&(RR_INFO(d)->run_list))
28 #define RUNQUEUE(cpu) RUNLIST(schedule_data[cpu].idle)
30 static inline void __add_to_runqueue_head(struct domain *d)
31 {
32 list_add(RUNLIST(d), RUNQUEUE(d->processor));
33 }
35 static inline void __add_to_runqueue_tail(struct domain *d)
36 {
37 list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
38 }
40 static inline void __del_from_runqueue(struct domain *d)
41 {
42 struct list_head *runlist = RUNLIST(d);
43 list_del(runlist);
44 runlist->next = NULL;
45 }
47 static inline int __task_on_runqueue(struct domain *d)
48 {
49 return (RUNLIST(d))->next != NULL;
50 }
52 /* Initialises the runqueues and creates the domain info cache */
53 static int rr_init_scheduler()
54 {
55 int i;
57 for ( i = 0; i < NR_CPUS; i++ )
58 INIT_LIST_HEAD(RUNQUEUE(i));
60 return 0;
61 }
62 /* Allocates memory for per domain private scheduling data*/
63 static int rr_alloc_task(struct domain *d)
64 {
65 if ( (d->sched_priv = new(struct rrobin_dom_info) == NULL )
66 return -1;
67 memset(d->sched_priv, 0, sizeof(struct rrobin_dom_info));
68 return 0;
69 }
71 /* Setup the rr_dom_info */
72 static void rr_add_task(struct domain *d)
73 {
74 struct rrobin_dom_info *inf;
75 RR_INFO(d)->domain = d;
76 inf = RR_INFO(d);
77 }
79 /* Frees memory used by domain info */
80 static void rr_free_task(struct domain *d)
81 {
82 ASSERT(d->sched_priv != NULL);
83 xfree(d->sched_priv);
84 }
86 /* Initialises idle task */
87 static int rr_init_idle_task(struct domain *d)
88 {
89 if ( rr_alloc_task(d) < 0 )
90 return -1;
92 rr_add_task(d);
94 set_bit(DF_RUNNING, &d->flags);
95 if ( !__task_on_runqueue(d) )
96 __add_to_runqueue_head(d);
98 return 0;
99 }
101 /* Main scheduling function */
102 static task_slice_t rr_do_schedule(s_time_t now)
103 {
104 struct domain *prev = current;
105 int cpu = current->processor;
106 task_slice_t ret;
108 if ( !is_idle_task(prev) )
109 {
110 __del_from_runqueue(prev);
112 if ( domain_runnable(prev) )
113 __add_to_runqueue_tail(prev);
114 }
116 ret.task = list_entry(RUNQUEUE(cpu)->next,
117 struct rrobin_dom_info,
118 run_list)->domain;
119 ret.time = rr_slice;
120 return ret;
121 }
123 /* Set/retrive control parameter(s) */
124 static int rr_ctl(struct sched_ctl_cmd *cmd)
125 {
126 if ( cmd->direction == SCHED_INFO_PUT )
127 {
128 rr_slice = cmd->u.rrobin.slice;
129 }
130 else /* cmd->direction == SCHED_INFO_GET */
131 {
132 cmd->u.rrobin.slice = rr_slice;
133 }
135 return 0;
136 }
138 static void rr_dump_settings()
139 {
140 printk("rr_slice = %llu ", rr_slice);
141 }
143 static void rr_sleep(struct domain *d)
144 {
145 if ( test_bit(DF_RUNNING, &d->flags) )
146 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
147 else if ( __task_on_runqueue(d) )
148 __del_from_runqueue(d);
149 }
151 void rr_wake(struct domain *d)
152 {
153 struct domain *curr;
154 s_time_t now;
155 int cpu = d->processor;
157 if ( unlikely(__task_on_runqueue(d)) )
158 return;
160 __add_to_runqueue_head(d);
162 now = NOW();
164 curr = schedule_data[cpu].curr;
165 if ( is_idle_task(curr) )
166 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
167 }
170 static void rr_dump_domain(struct domain *d)
171 {
172 printk("%u has=%c ", d->id,
173 test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
174 printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
175 }
177 static void rr_dump_cpu_state(int i)
178 {
179 struct list_head *queue;
180 int loop = 0;
181 struct rrobin_dom_info *d_inf;
183 queue = RUNQUEUE(i);
184 printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
185 (unsigned long) queue->next, (unsigned long) queue->prev);
187 printk("%3d: ",loop++);
188 d_inf = list_entry(queue, struct rrobin_dom_info, run_list);
189 rr_dump_domain(d_inf->domain);
191 list_for_each_entry ( d_inf, queue, run_list )
192 {
193 printk("%3d: ",loop++);
194 rr_dump_domain(d_inf->domain);
195 }
196 }
199 struct scheduler sched_rrobin_def = {
200 .name = "Round-Robin Scheduler",
201 .opt_name = "rrobin",
202 .sched_id = SCHED_RROBIN,
204 .init_idle_task = rr_init_idle_task,
205 .alloc_task = rr_alloc_task,
206 .add_task = rr_add_task,
207 .free_task = rr_free_task,
208 .init_scheduler = rr_init_scheduler,
209 .do_schedule = rr_do_schedule,
210 .control = rr_ctl,
211 .dump_settings = rr_dump_settings,
212 .dump_cpu_state = rr_dump_cpu_state,
213 .sleep = rr_sleep,
214 .wake = rr_wake,
215 };