debuggers.hg

view xen/common/sched_sedf.c @ 3621:3c2ccb0cf6c0

bitkeeper revision 1.1159.170.103 (41fa76f7AyfHPooBzZbsn0ctSduSJg)

Bugfixing in SEDF
author sd386@font.cl.cam.ac.uk
date Fri Jan 28 17:31:35 2005 +0000 (2005-01-28)
parents 3dc193a9786a
children c712c4935a13
line source
1 /****************************************************************************
2 * Simple EDF scheduler for xen
3 *
4 * by Stephan Diestelhorst (C) 2004 Cambridge University
5 * based on code by Mark Williamson (C) 2004 Intel Research Cambridge
6 */
8 #include <xen/sched.h>
9 #include <xen/sched-if.h>
10 #include <public/sched_ctl.h>
11 #include <xen/ac_timer.h>
12 #include <xen/softirq.h>
13 #include <xen/time.h>
14 #include <xen/slab.h>
16 #define SEDFLEVEL 2
17 #define PRINT(_f, _a...) \
18 if ((_f)<=SEDFLEVEL) printk(_a );
20 /*
21 TODO:
22 TESTING!
23 implement stylish features!
24 tracing instead of PRINTs
25 */
28 #define TRC_SEDF 0xBEEF0000
29 #define EXTRA_NONE (0)
30 #define EXTRA_AWARE (1)
31 #define EXTRA_RUNNING (2)
32 #define EXTRA_QUANTUM (MICROSECS(1000))
33 #define WEIGHT_PERIOD (MILLISECS(100))
34 #define WEIGHT_SAFETY (MILLISECS(5))
36 struct sedf_dom_info
37 {
38 struct domain *owner;
39 struct list_head list;
40 struct list_head extralist;
42 //Parameters for EDF
43 s_time_t period; //=(relative deadline)
44 s_time_t slice; //=worst case execution time
46 //Advaced Parameters
47 //Latency Scaling
48 s_time_t period_orig;
49 s_time_t slice_orig;
50 s_time_t latency;
51 //extra-time status of domain
52 short extra;
53 //weights for "Scheduling for Beginners/ Lazy/ etc."
54 short weight;
56 //Bookkeeping
57 s_time_t absdead;
58 s_time_t sched_start;
59 s_time_t cputime;
60 s_time_t absblock;
62 //Statistics
63 s_time_t block_time_tot;
64 s_time_t penalty_time_tot;
65 s_time_t extra_time_tot;
66 int block_tot;
67 int short_block_tot;
68 int long_block_tot;
69 int short_cont;
70 };
72 struct sedf_cpu_info {
73 struct list_head runnableq;
74 struct list_head waitq;
75 struct list_head extraq;
76 };
78 #define DOM_INFO(d) ((struct sedf_dom_info *)((d)->sched_priv))
79 #define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
80 #define LIST(d) (&DOM_INFO(d)->list)
81 #define EXTRALIST(d) (&DOM_INFO(d)->extralist)
82 #define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq)
83 #define WAITQ(cpu) (&CPU_INFO(cpu)->waitq)
84 #define EXTRAQ(cpu) (&CPU_INFO(cpu)->extraq)
85 #define IDLETASK(cpu) ((struct domain *)schedule_data[cpu].idle)
87 #define PERIOD_BEGIN(inf) ((inf)->absdead - (inf)->period)
89 static xmem_cache_t *dom_info_cache;
91 static inline void extraq_add_head(struct domain *d)
92 {
93 list_add(EXTRALIST(d), EXTRAQ(d->processor));
94 }
96 static inline void extraq_add_tail(struct domain *d)
97 {
98 list_add_tail(EXTRALIST(d), EXTRAQ(d->processor));
99 }
101 static inline void extraq_del(struct domain *d)
102 {
103 struct list_head *list = EXTRALIST(d);
104 list_del(list);
105 list->next = NULL;
106 }
108 static inline int extraq_on(struct domain *d) {
109 return (((EXTRALIST(d))->next != NULL) && (EXTRALIST(d)->next != EXTRALIST(d)));
110 }
112 static inline void extraq_check(struct domain *d) {
113 if (extraq_on(d)) {
114 PRINT(2,"Dom %i is on extraQ\n",d->id);
115 if (DOM_INFO(d)->extra == EXTRA_NONE) {
116 extraq_del(d);
117 PRINT(2,"Removed dom %i from extraQ\n",d->id);
118 }
119 } else {
120 PRINT(2,"Dom %i is NOT on extraQ\n",d->id);
121 if (DOM_INFO(d)->extra != EXTRA_NONE) {
122 PRINT(2,"Added dom %i to extraQ\n",d->id);
123 extraq_add_tail(d);
124 }
125 }
126 }
128 static inline void __del_from_queue(struct domain *d)
129 {
130 struct list_head *list = LIST(d);
131 list_del(list);
132 list->next = NULL;
133 }
135 /* adds a domain to the queue of processes which wait for the beginning of the next period
136 * this list is therefore sortet by this time, which is simply absol. deadline - period
137 */
138 static inline void __add_to_waitqueue_sort(struct domain *d) {
139 struct list_head *cur;
140 struct sedf_dom_info *curinf;
142 PRINT(3,"Adding domain %i (bop= %llu) to waitq\n",d->id,PERIOD_BEGIN(DOM_INFO(d)));
143 //iterate through all elements to find our "hole"
144 list_for_each(cur,WAITQ(d->processor)){
145 curinf = list_entry(cur,struct sedf_dom_info,list);
146 if (PERIOD_BEGIN(DOM_INFO(d)) < PERIOD_BEGIN(curinf))
147 break;
148 else
149 PRINT(4,"\tbehind domain %i (bop= %llu)\n",curinf->owner->id,PERIOD_BEGIN(curinf));
150 }
151 //cur now contains the element, before which we'll enqueue
152 PRINT(3,"\tlist_add to %x\n",cur->prev);
153 list_add(LIST(d),cur->prev);
155 }
157 /* adds a domain to the queue of processes which have started their current period and are
158 * runnable (i.e. not blocked, dieing,...). The first element on this list is running on the processor,
159 * if the list is empty the idle task will run. As we are implementing EDF, this list is sorted by
160 * deadlines.
161 */
162 static inline void __add_to_runqueue_sort(struct domain *d) {
163 struct list_head *cur;
164 struct sedf_dom_info *curinf;
166 PRINT(3,"Adding domain %i (deadl= %llu) to runq\n",d->id,DOM_INFO(d)->absdead);
167 //iterate through all elements to find our "hole"
168 list_for_each(cur,RUNQ(d->processor)){
169 curinf = list_entry(cur,struct sedf_dom_info,list);
170 if (DOM_INFO(d)->absdead < curinf->absdead)
171 break;
172 else
173 PRINT(4,"\tbehind domain %i (deadl= %llu)\n",curinf->owner->id,curinf->absdead);
174 }
175 //cur now contains the element, before which we'll enqueue
176 PRINT(3,"\tlist_add to %x\n",cur->prev);
177 list_add(LIST(d),cur->prev);
179 }
180 static inline int __task_on_queue(struct domain *d) {
181 return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
182 }
184 /* Initialises the queues and creates the domain info cache */
185 static int sedf_init_scheduler() {
186 int i;
187 PRINT(2,"sedf_init_scheduler was called\n");
189 for ( i = 0; i < NR_CPUS; i++ ) {
190 schedule_data[i].sched_priv = xmalloc(sizeof(struct sedf_cpu_info));
191 if ( schedule_data[i].sched_priv == NULL )
192 return -1;
193 INIT_LIST_HEAD(WAITQ(i));//used for Latency Scaling
194 INIT_LIST_HEAD(RUNQ(i));
195 INIT_LIST_HEAD(EXTRAQ(i));
196 }
197 //we could not find any suitable domain => look for domains that are aware of extratime
198 dom_info_cache = xmem_cache_create(
199 "SEDF dom info", sizeof(struct sedf_dom_info), 0, 0, 0, NULL);
200 if ( dom_info_cache == NULL )
201 {
202 printk("Could not allocate SLAB cache.\n");
203 return -1;
204 }
206 return 0;
207 }
209 /* Allocates memory for per domain private scheduling data*/
210 static int sedf_alloc_task(struct domain *d) {
211 PRINT(2,"sedf_alloc_task was called, domain-id %i\n",d->id);
212 if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
213 return -1;
214 memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
215 return 0;
216 }
218 /* Setup the sedf_dom_info */
219 static void sedf_add_task(struct domain *d)
220 {
221 //s_time_t now=NOW();
222 struct sedf_dom_info *inf=DOM_INFO(d);
223 inf->owner = d;
225 PRINT(2,"sedf_add_task was called, domain-id %i\n",d->id);
226 if (d->id==0) {
227 //set dom0 to something useful to boot the machine
228 inf->period = MILLISECS(20);
229 inf->slice = MILLISECS(15);
230 inf->latency = 0;
231 inf->absdead = 0;
232 inf->extra = EXTRA_NONE;//EXTRA_AWARE;
233 }
234 else {
235 //other domains don't get any execution time at all in the beginning!
236 inf->period = MILLISECS(20);
237 inf->slice = 0;
238 inf->absdead = 0;
239 inf->latency = 0;
240 inf->extra = EXTRA_NONE;//EXTRA_AWARE
241 }
242 inf->period_orig = inf->period; inf->slice_orig = inf->slice;
243 INIT_LIST_HEAD(&(inf->list));
244 INIT_LIST_HEAD(&(inf->extralist));
245 }
247 /* Frees memory used by domain info */
248 static void sedf_free_task(struct domain *d)
249 {
250 PRINT(2,"sedf_free_task was called, domain-id %i\n",d->id);
251 ASSERT(d->sched_priv != NULL);
252 xmem_cache_free(dom_info_cache, d->sched_priv);
253 }
255 /* Initialises idle task */
256 static int sedf_init_idle_task(struct domain *d) {
257 PRINT(2,"sedf_init_idle_task was called, domain-id %i\n",d->id);
258 if ( sedf_alloc_task(d) < 0 )
259 return -1;
261 sedf_add_task(d);
262 DOM_INFO(d)->absdead=0;
263 set_bit(DF_RUNNING, &d->flags);
264 //the idle task doesn't have to turn up on any list...
265 return 0;
266 }
268 #define MIN(x,y) (((x)<(y))?(x):(y))
269 /* Main scheduling function
270 * Reasons for calling this function are:
271 * -timeslice for the current period used up
272 * -domain on waitqueue has started it's period
273 * -and various others ;) in general: determin which domain to run next*/
274 static task_slice_t sedf_do_schedule(s_time_t now)
275 {
276 struct sedf_dom_info *inf = DOM_INFO(current);
277 int cpu = current->processor;
278 struct list_head *runq = RUNQ(cpu);
279 struct list_head *waitq = WAITQ(cpu);
280 struct list_head *extraq = EXTRAQ(cpu);
281 struct list_head *cur,*tmp;
282 struct sedf_dom_info *curinf;
283 task_slice_t ret;
286 //idle tasks don't need any of the following stuf
287 if (is_idle_task(inf->owner))
288 goto check_waitq; //idle task doesn't get scheduled on the runq
290 if (unlikely(inf->extra == EXTRA_RUNNING)) {
291 //special treatment of domains running in extra time
292 inf->extra = EXTRA_AWARE;
293 inf->cputime=0;
294 inf->extra_time_tot += now - inf->sched_start;
296 extraq_del(current); //remove extradomain from head of the queue
297 if (domain_runnable(inf->owner))
298 extraq_add_tail(current); //and add to the tail if it is runnable => round-robin
299 else
300 __del_from_queue(inf->owner); //if domain blocked in extratime remove it from waitq(/runq) as well
301 }
302 else {
303 //current domain is running in real time mode
304 //update the domains cputime
305 inf->cputime += now - inf->sched_start;
306 //scheduling decisions, which don't move the running domain to any queues
307 if ((inf->cputime < inf->slice) && domain_runnable(inf->owner))
308 goto check_waitq; //there is nothing to do with the running task
310 //remove tasks that can't run
311 __del_from_queue(inf->owner);
313 //manage bookkeeping (i.e. calculate next deadline, memorize overun time of slice) of finished domains
314 if (inf->cputime >= inf->slice) {
315 inf->cputime -= inf->slice;
317 if (inf->period < inf->period_orig) {
318 //this domain runs in latency scaling mode
319 inf->period *= 2;
320 inf->slice *= 2;
321 if ((inf->period > inf->period_orig) || (inf->slice > inf->slice_orig)) {
322 //now switch back to standard timing
323 inf->period = inf->period_orig;
324 inf->slice = inf->slice_orig;
325 }
326 }
327 inf->absdead += inf->period; //set next deadline
328 }
329 //if (inf->absdead<now);
330 //printk("Domain %i exceeded it't deadline!!!! (now: %llu ddl: %llu)\n",current->id,now,inf->absdead);
331 //add a runnable domain to the waitqueue
332 if (domain_runnable(inf->owner))
333 __add_to_waitqueue_sort(inf->owner);
334 else {
335 //we have a blocked realtime task
336 inf->absblock=now;
337 if (inf->extra == EXTRA_AWARE)
338 extraq_del(inf->owner); //remove a blocked domain from the extraq aswell
339 }
340 }
341 check_waitq:
342 //check for the first elements of the waitqueue, whether their next period has already started
343 list_for_each_safe(cur,tmp,waitq) {
344 curinf = list_entry(cur,struct sedf_dom_info,list);
345 if (PERIOD_BEGIN(curinf) <= now) {
346 __del_from_queue(curinf->owner);
347 __add_to_runqueue_sort(curinf->owner);
348 }
349 else
350 break;
351 }
353 //process the runq
354 list_for_each_safe(cur,tmp,runq) {
355 curinf = list_entry(cur,struct sedf_dom_info,list);
356 if (unlikely(curinf->slice == 0)) {
357 //special treatment of elements with empty slice
358 __del_from_queue(curinf->owner);
359 curinf->absdead += curinf->period;
360 __add_to_waitqueue_sort(curinf->owner);
361 }
362 else {
363 if (unlikely((curinf->absdead < now) || (curinf->cputime > curinf->slice))) {
364 //we missed the deadline or the slice was already finished... might hapen because of dom_adj.
365 //printk("Ouch! Domain %i missed deadline %llu\n",curinf->owner->id,curinf->absdead);
366 __del_from_queue(curinf->owner);
367 curinf->absdead += ((now - curinf->absdead + (curinf->period-1)) / curinf->period) * curinf->period;
368 //force start of period to be in future and aligned to period borders!
369 curinf->cputime = 0;
370 __add_to_runqueue_sort(curinf->owner);
371 }
372 else
373 break;
374 }
375 }
377 //now simply pick the first domain from the runqueue
378 struct sedf_dom_info *runinf, *waitinf;
380 if (!list_empty(runq)) {
381 runinf = list_entry(runq->next,struct sedf_dom_info,list);
382 ret.task = runinf->owner;
383 if (!list_empty(waitq)) {
384 //rerun scheduler, when scheduled domain reaches it's end of slice or the first domain from the waitqueue gets ready
385 waitinf = list_entry(waitq->next,struct sedf_dom_info,list);
386 ret.time = MIN(now + runinf->slice - runinf->cputime,PERIOD_BEGIN(waitinf)) - now;
387 }
388 else {
389 ret.time = runinf->slice - runinf->cputime;
390 }
391 }
392 else {
393 if (!list_empty(waitq)) {
394 waitinf = list_entry(waitq->next,struct sedf_dom_info,list);
395 //we could not find any suitable domain => look for domains that are aware of extratime
396 if (!list_empty(extraq) && (PERIOD_BEGIN(waitinf) - now >= EXTRA_QUANTUM)) {
397 runinf = list_entry(extraq->next,struct sedf_dom_info,extralist);
398 runinf->extra = EXTRA_RUNNING;
399 ret.task = runinf->owner;
400 ret.time = EXTRA_QUANTUM;
401 }
402 else {
403 //we have an empty run- and extraqueue or too less time => idle task!
404 ret.task = IDLETASK(cpu);
405 ret.time = PERIOD_BEGIN(waitinf) - now;
406 }
407 }
408 else {
409 //this could probably never happen, but one never knows...
410 //it can... imagine a second CPU, which is pure scifi ATM, but one never knows ;)
411 ret.task = IDLETASK(cpu);
412 ret.time = SECONDS(1);
413 }
414 }
415 if (ret.time<0)
416 printk("Ouch! We are seriously BEHIND schedule! %lli\n",ret.time);
417 DOM_INFO(ret.task)->sched_start=now;
418 return ret;
419 }
421 static void sedf_sleep(struct domain *d) {
422 PRINT(2,"sedf_sleep was called, domain-id %i\n",d->id);
423 if ( test_bit(DF_RUNNING, &d->flags) )
424 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
425 else {
426 if ( __task_on_queue(d) )
427 __del_from_queue(d);
428 if (extraq_on(d))
429 extraq_del(d);
430 }
431 }
433 /* This function wakes ifup a domain, i.e. moves them into the waitqueue
434 * things to mention are: admission control is taking place nowhere at
435 * the moment, so we can't be sure, whether it is safe to wake the domain
436 * up at all. Anyway, even if it is safe (total cpu usage <=100%) there are
437 * some considerations on when to allow the domain to wake up and have it's
438 * first deadline...
439 * I detected 3 cases, which could describe the possible behaviour of the scheduler,
440 * and I'll try to make them more clear:
441 *
442 * 1. Very conservative
443 * -when a blocked domain unblocks, it is allowed to start execution at
444 * the beginning of the next complete period
445 * (D..deadline, R..running, B..blocking/sleeping, U..unblocking/waking up
446 *
447 * DRRB_____D__U_____DRRRRR___D________ ...
448 *
449 * -this causes the domain to miss a period (and a deadlline)
450 * -doesn't disturb the schedule at all
451 * -deadlines keep occuring isochronous
452 *
453 * 2. Conservative Part 1: Short Unblocking
454 * -when a domain unblocks in the same period as it was blocked it unblocks and
455 * may consume the rest of it's original time-slice minus the time it was blocked
456 * (assume period=9, slice=5)
457 *
458 * DRB_UR___DRRRRR___D...
459 *
460 * -this also doesn't disturb scheduling, but might lead to the fact, that the domain
461 * can't finish it's workload in the period
462 * -in addition to that the domain can be treated prioritised when extratime is available
463 *
464 * Part2: Long Unblocking
465 * Part 2a
466 * -it is obvious that such behaviour, applied when then unblocking is happening in
467 * later periods, works fine aswell
468 * -the domain is treated as if it would have been running since the start of its new period
469 *
470 * DRB______D___UR___D...
471 *
472 * Part 2b
473 * -if one needs the full slice in the next period, it is necessary to treat the unblocking
474 * time as the start of the new period, i.e. move the deadline further back (later)
475 * -this doesn't disturb scheduling as well, because for EDF periods can be treated as minimal
476 * inter-release times and scheduling stays correct, when deadlines are kept relative to the time
477 * the process unblocks
478 *
479 * DRB______D___URRRR___D...
480 * (D)
481 * -problem: deadlines don't occur isochronous anymore
482 * Part 2c (Improved Atropos design)
483 * -when a domain unblocks it is given a very short period (=latency hint) and slice length scaled
484 * accordingly
485 * -both rise again to the original value (e.g. get doubled every period)
486 *
487 * 3. Unconservative (i.e. incorrect)
488 * -to boost the performance of I/O dependent domains it would be possible to put the domain into
489 * the runnable queue immediately, and let it run for the remainder of the slice of the current period
490 * (or even worse: allocate a new full slice for the domain)
491 * -either behaviour can lead to missed deadlines in other domains as opposed to approaches 1,2a,2b
492 */
493 static inline void unblock_short_vcons(struct sedf_dom_info* inf, s_time_t now) { inf->absdead += inf->period;}
494 static inline void unblock_long_vcons(struct sedf_dom_info* inf, s_time_t now) {
495 inf->absdead += ((now - inf->absdead) / inf->period + 1) * inf->period; //very conservative
496 inf->cputime = 0;
497 }
499 static inline void unblock_short_cons(struct sedf_dom_info* inf, s_time_t now) {
500 inf->cputime += now - inf->absblock; //treat blocked time as consumed by the domain
501 if (inf->cputime + EXTRA_QUANTUM > inf->slice) {
502 //we don't have a reasonable amount of time in our slice left :(
503 inf->cputime=0;
504 inf->absdead += inf->period; //start in next period!
505 }
506 else
507 inf->short_cont++;
508 }
509 static inline void unblock_long_cons_a(struct sedf_dom_info* inf, s_time_t now) {
510 inf->cputime = (now - inf->absdead) % inf->period; //treat the time the domain was blocked in the CURRENT
511 //period as consumed by the domain
512 if (inf->cputime + EXTRA_QUANTUM > inf->slice) {
513 //we don't have a reasonable amount of time in our slice left :(
514 inf->cputime=0;
515 inf->absdead += inf->period; //start in next period!
516 }
517 }
518 static inline void unblock_long_cons_b(struct sedf_dom_info* inf,s_time_t now) {
519 inf->absdead = now + inf->period; //Conservative 2b
520 inf->cputime = 0;
521 }
522 static inline void unblock_long_cons_c(struct sedf_dom_info* inf,s_time_t now) {
523 if (likely(inf->latency)) {
524 //sclae the slice and period accordingly to the latency hint
525 inf->period = inf->latency; //reduce period temporarily to the latency hint
526 ASSERT((inf->latency < ULONG_MAX) && (inf->slice_orig < ULONG_MAX)); //this results in max. 4s slice/period length
527 inf->slice = (inf->latency * inf->slice_orig) / inf->period_orig; //scale slice accordingly, so that utilisation stays the same
528 }
529 else {
530 //we don't have a latency hint.. use some other technique
531 inf->absdead = now + inf->period; //Conservative 2b...
532 inf->cputime = 0;
533 }
534 }
536 /*static inline void unblock_short_vcons
537 static inline void unblock_short_vcons*/
538 void sedf_wake(struct domain *d) {
539 //for the first try just implement the "very conservative" way of waking domains up
540 s_time_t now = NOW();
541 struct sedf_dom_info* inf = DOM_INFO(d);
543 PRINT(3,"sedf_wake was called, domain-id %i\n",d->id);
545 if (unlikely(is_idle_task(d)))
546 return;
548 if ( unlikely(__task_on_queue(d)) ) {
549 PRINT(3,"\tdomain %i is already in some queue\n",d->id);
550 return;
551 }
552 if ( unlikely(extraq_on(d)) ) {
553 PRINT(3,"\tdomain %i is already in the extraQ\n",d->id);
554 }
555 if (unlikely(inf->absdead == 0))
556 inf->absdead = now + inf->slice; //initial setup of the deadline
558 //very conservative way of unblocking
559 //make sure that the start of the period for this
560 //domain is happening in the future
561 PRINT(3,"waking up domain %i (deadl= %llu period= %llu now= %llu)\n",d->id,inf->absdead,inf->period,now);
563 inf->block_tot++;
564 if (unlikely(now< PERIOD_BEGIN(inf))) {
565 //this might happen, imagine unblocking in extra-time!
566 if (likely(inf->extra == EXTRA_AWARE))
567 extraq_add_tail(d); //SD: Could extraq_add_head be better?
568 //else
569 //This is very very unlikely, ie. might even be an error?!
570 }
571 else {
572 if (now < inf->absdead) {
573 //short blocking
574 inf->short_block_tot++;
575 //unblock_short_vcons(inf, now);
576 unblock_short_cons(inf, now);
577 if (inf->extra == EXTRA_AWARE)
578 extraq_add_head(d);
579 }
580 else {
581 //long blocking
582 inf->long_block_tot++;
583 //PRINT(3,"old=%llu ",inf->absdead);
584 //unblock_long_vcons(inf, now);
585 unblock_long_cons_c(inf,now);
586 if (inf->extra == EXTRA_AWARE)
587 extraq_add_tail(d);
588 }
589 }
590 PRINT(3,"waking up domain %i (deadl= %llu period= %llu now= %llu)\n",d->id,inf->absdead,inf->period,now);
591 __add_to_waitqueue_sort(d);
592 PRINT(3,"added to waitq\n");
594 //do some statistics here...
595 if (inf->absblock!=0) {
596 inf->block_time_tot += now - inf->absblock;
597 inf->penalty_time_tot += PERIOD_BEGIN(inf) + inf->cputime - inf->absblock;
598 /*if (DOM_INFO(d)->block_time_tot)
599 PRINT(3,"penalty: %lu\n",(DOM_INFO(d)->penalty_time_tot*100)/DOM_INFO(d)->block_time_tot);*/
600 }
601 /*if ( is_idle_task(schedule_data[d->processor].curr)) {
602 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
603 return;
604 }*/
606 //check whether the awakened task needs to get scheduled before the next sched. decision
607 //and check, whether we are idling and this domain is extratime aware
608 if ((PERIOD_BEGIN(inf) < schedule_data[d->processor].s_timer.expires) ||
609 (is_idle_task(schedule_data[d->processor].curr) && (now + EXTRA_QUANTUM < schedule_data[d->processor].s_timer.expires) &&
610 (inf->extra == EXTRA_AWARE)))
611 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
612 }
615 /* This could probably be a bit more specific!*/
616 static void sedf_dump_domain(struct domain *d) {
617 printk("%u has=%c ", d->id,
618 test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
619 printk("p=%llu sl=%llu ddl=%llu w=%u c=%llu xtr(%s)=%llu",
620 DOM_INFO(d)->period, DOM_INFO(d)->slice, DOM_INFO(d)->absdead, DOM_INFO(d)->weight, d->cpu_time,
621 DOM_INFO(d)->extra ? "yes" : "no", DOM_INFO(d)->extra_time_tot);
622 if (d->cpu_time !=0)
623 printf(" (%lu%)", (DOM_INFO(d)->extra_time_tot * 100) / d->cpu_time);
624 if (DOM_INFO(d)->block_time_tot!=0)
625 printf(" pen=%lu%", (DOM_INFO(d)->penalty_time_tot * 100) / DOM_INFO(d)->block_time_tot);
626 if (DOM_INFO(d)->block_tot!=0)
627 printf("\n blks=%lu sh=%lu (%lu%) (shc=%lu (%lu%)) l=%lu (%lu%) avg: b=%llu p=%llu", DOM_INFO(d)->block_tot,
628 DOM_INFO(d)->short_block_tot, (DOM_INFO(d)->short_block_tot * 100) / DOM_INFO(d)->block_tot,
629 DOM_INFO(d)->short_cont, (DOM_INFO(d)->short_cont * 100) / DOM_INFO(d)->block_tot,
630 DOM_INFO(d)->long_block_tot, (DOM_INFO(d)->long_block_tot * 100) / DOM_INFO(d)->block_tot,
631 (DOM_INFO(d)->block_time_tot) / DOM_INFO(d)->block_tot,
632 (DOM_INFO(d)->penalty_time_tot) / DOM_INFO(d)->block_tot);
633 printf("\n");
634 }
636 static void sedf_dump_cpu_state(int i)
637 {
638 struct list_head *list, *queue, *tmp;
639 int loop = 0;
640 /* int found = 0;
641 int id1st = 0;*/
642 struct sedf_dom_info *d_inf;
643 struct domain* d;
645 printk("now=%llu\n",NOW());
646 queue = RUNQ(i);
647 printk("RUNQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
648 (unsigned long) queue->next, (unsigned long) queue->prev);
649 list_for_each_safe ( list, tmp, queue )
650 {
651 printk("%3d: ",loop++);
652 d_inf = list_entry(list, struct sedf_dom_info, list);
653 sedf_dump_domain(d_inf->owner);
654 }
656 queue = WAITQ(i); loop = 0;
657 printk("\nWAITQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
658 (unsigned long) queue->next, (unsigned long) queue->prev);
659 list_for_each_safe ( list, tmp, queue )
660 {
661 printk("%3d: ",loop++);
662 d_inf = list_entry(list, struct sedf_dom_info, list);
663 sedf_dump_domain(d_inf->owner);
664 }
666 queue = EXTRAQ(i); loop = 0;
667 printk("\nEXTRAQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
668 (unsigned long) queue->next, (unsigned long) queue->prev);
669 list_for_each_safe ( list, tmp, queue )
670 {
671 d_inf = list_entry(list, struct sedf_dom_info, extralist);
673 /* if (found) {
674 if (id1st == d_inf->owner->id)
675 break;
676 }
677 else { id1st=d_inf->owner->id; found=1;}*/
678 printk("%3d: ",loop++);
679 sedf_dump_domain(d_inf->owner);
680 }
682 loop = 0;
683 printk("\nnot on Q\n");
684 for_each_domain(d) {
685 if (!extraq_on(d) && !__task_on_queue(d)) {
686 printk("%3d: ",loop++);
687 sedf_dump_domain(d);
688 }
689 }
690 }
691 //Adjusts periods and slices of the domains accordingly to their weights
692 static inline int sedf_adjust_weights(struct domain *p, struct sched_adjdom_cmd *cmd) {
693 int sumw = 0;
694 s_time_t sumt = 0;
696 //sum up all weights
697 for_each_domain(p) {
698 if (DOM_INFO(p)->weight)
699 sumw += DOM_INFO(p)->weight;
700 else {
701 //don't modify domains who don't have a weight, but sum up
702 //the time they need, projected to a WEIGHT_PERIOD, so that
703 //this time is not given to the weight-driven domains
704 ASSERT((WEIGHT_PERIOD < ULONG_MAX) && (DOM_INFO(p)->slice_orig < ULONG_MAX)); //this results in max. 4s slice/period length
705 sumt += (WEIGHT_PERIOD * DOM_INFO(p)->slice_orig) / DOM_INFO(p)->period_orig;
706 }
707 }
708 //adjust all slices (and periods) to the new weight
709 for_each_domain(p) {
710 if (DOM_INFO(p)->weight) {
711 DOM_INFO(p)->period_orig =
712 DOM_INFO(p)->period = WEIGHT_PERIOD;
713 DOM_INFO(p)->slice_orig =
714 DOM_INFO(p)->slice = (DOM_INFO(p)->weight * (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt)) / sumw;
715 }
716 }
717 return 0;
718 }
720 /* set or fetch domain scheduling parameters */
721 static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) {
722 PRINT(2,"sedf_adjdom was called, domain-id %i new period %llu new slice %llu\n"
723 "latency %llu extra: %s\n",p->id,cmd->u.sedf.period,cmd->u.sedf.slice,cmd->u.sedf.latency,(cmd->u.sedf.extratime)?"yes":"no");
724 if ( cmd->direction == SCHED_INFO_PUT )
725 {
726 if (!cmd->u.sedf.period && !cmd->u.sedf.weight) //check for sane parameters
727 return -EINVAL;
728 if (cmd->u.sedf.weight) {
729 DOM_INFO(p)->weight = cmd->u.sedf.weight; //weight driven domains
730 }
731 else {
732 //time driven domains
733 DOM_INFO(p)->weight = 0;
734 if(cmd->u.sedf.slice > cmd->u.sedf.period ) /* sanity checking! */
735 return -EINVAL;
736 DOM_INFO(p)->period_orig =
737 DOM_INFO(p)->period = cmd->u.sedf.period;
738 DOM_INFO(p)->slice_orig =
739 DOM_INFO(p)->slice = cmd->u.sedf.slice;
740 }
741 if (sedf_adjust_weights(p,cmd))
742 return -EINVAL;
743 DOM_INFO(p)->extra = cmd->u.sedf.extratime;
744 DOM_INFO(p)->latency = cmd->u.sedf.latency;
745 extraq_check(p);
746 }
747 else if ( cmd->direction == SCHED_INFO_GET )
748 {
749 cmd->u.sedf.period = DOM_INFO(p)->period;
750 cmd->u.sedf.slice = DOM_INFO(p)->slice;
751 cmd->u.sedf.extratime = DOM_INFO(p)->extra;
752 cmd->u.sedf.latency = DOM_INFO(p)->latency;
753 cmd->u.sedf.weight = DOM_INFO(p)->weight;
754 }
755 PRINT(2,"sedf_adjdom_finished\n");
756 return 0;
757 }
759 struct scheduler sched_sedf_def = {
760 .name = "Simple EDF Scheduler",
761 .opt_name = "sedf",
762 .sched_id = SCHED_SEDF,
764 .init_idle_task = sedf_init_idle_task,
765 .alloc_task = sedf_alloc_task,
766 .add_task = sedf_add_task,
767 .free_task = sedf_free_task,
768 .init_scheduler = sedf_init_scheduler,
769 .do_schedule = sedf_do_schedule,
770 .dump_cpu_state = sedf_dump_cpu_state,
771 .sleep = sedf_sleep,
772 .wake = sedf_wake,
773 .adjdom = sedf_adjdom,
774 };