debuggers.hg

view xen/common/sched_sedf.c @ 4647:9c88ba91d330

bitkeeper revision 1.1346.1.1 (42670505dNhgnJm5dQD81pCalXMZgw)

manual merge
author iap10@freefall.cl.cam.ac.uk
date Thu Apr 21 01:42:29 2005 +0000 (2005-04-21)
parents af870801ba62
children 214ccc480724
line source
1 /****************************************************************************
2 * Simple EDF scheduler for xen
3 *
4 * by Stephan Diestelhorst (C) 2004 Cambridge University
5 * based on code by Mark Williamson (C) 2004 Intel Research Cambridge
6 */
8 #include <xen/sched.h>
9 #include <xen/sched-if.h>
10 #include <public/sched_ctl.h>
11 #include <xen/ac_timer.h>
12 #include <xen/softirq.h>
13 #include <xen/time.h>
14 #include <xen/slab.h>
16 /*#include <xen/adv_sched_hist.h>*/
18 /*verbosity settings*/
19 #define SEDFLEVEL 0
20 #define PRINT(_f, _a...) \
21 if ((_f)<=SEDFLEVEL) printk(_a );
23 #ifdef DEBUG
24 #define SEDF_STATS
25 #endif
27 /*various ways of unblocking domains*/
28 #define UNBLOCK_ISOCHRONOUS_EDF 1
29 #define UNBLOCK_EDF 2
30 #define UNBLOCK_ATROPOS 3
31 #define UNBLOCK_SHORT_RESUME 4
32 #define UNBLOCK_BURST 5
33 #define UNBLOCK_EXTRA_SUPPORT 6
34 #define UNBLOCK UNBLOCK_EXTRA_SUPPORT
36 /*various ways of treating extra-time*/
37 #define EXTRA_OFF 1
38 #define EXTRA_ROUNDR 2
39 #define EXTRA_SLICE_WEIGHT 3
40 #define EXTRA_BLOCK_WEIGHT 4
42 #define EXTRA EXTRA_BLOCK_WEIGHT
44 #define EXTRA_NONE (0)
45 #define EXTRA_AWARE (1)
46 #define EXTRA_RUN_PEN (2)
47 #define EXTRA_RUN_UTIL (4)
48 #define EXTRA_WANT_PEN_Q (8)
49 #define EXTRA_PEN_Q (0)
50 #define EXTRA_UTIL_Q (1)
52 #define extra_runs(inf) ((inf->extra) & 6)
53 #define extra_get_cur_q(inf) (((inf->extra & 6) >> 1)-1)
55 #define EXTRA_QUANTUM (MICROSECS(500))
56 #define WEIGHT_PERIOD (MILLISECS(100))
57 #define WEIGHT_SAFETY (MILLISECS(5))
60 struct sedf_dom_info
61 {
62 struct domain *owner;
63 struct list_head list;
64 struct list_head extralist[2];
66 /*Parameters for EDF*/
67 s_time_t period; /*=(relative deadline)*/
68 s_time_t slice; /*=worst case execution time*/
70 /*Advaced Parameters*/
71 /*Latency Scaling*/
72 s_time_t period_orig;
73 s_time_t slice_orig;
74 s_time_t latency;
76 /*extra-time status of domain*/
77 short extra;
78 /*weights for "Scheduling for beginners/ lazy/ etc." ;)*/
79 short weight;
81 /*Bookkeeping*/
82 s_time_t absdead;
83 s_time_t sched_start;
84 s_time_t cputime;
85 s_time_t absblock;
87 /*time the domain unblocked, used to determine unblocking intervals*/
88 s_time_t absunblock;
90 /*scores for {util, block penalty}-weighted extratime distribution*/
91 int score[2];
92 s_time_t short_block_lost_tot;
94 /*Statistics*/
95 s_time_t extra_time_tot;
97 #ifdef SEDF_STATS
98 s_time_t block_time_tot;
99 s_time_t penalty_time_tot;
100 int block_tot;
101 int short_block_tot;
102 int long_block_tot;
103 int short_cont;
104 int pen_extra_blocks;
105 int pen_extra_slices;
106 #endif
107 };
109 struct sedf_cpu_info {
110 struct list_head runnableq;
111 struct list_head waitq;
112 struct list_head extraq[2];
113 };
115 #define DOM_INFO(d) ((struct sedf_dom_info *)((d)->sched_priv))
116 #define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
117 #define LIST(d) (&DOM_INFO(d)->list)
118 #define EXTRALIST(d,i) (&(DOM_INFO(d)->extralist[i]))
119 #define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq)
120 #define WAITQ(cpu) (&CPU_INFO(cpu)->waitq)
121 #define EXTRAQ(cpu,i) (&(CPU_INFO(cpu)->extraq[i]))
122 #define IDLETASK(cpu) ((struct domain *)schedule_data[cpu].idle)
124 #define PERIOD_BEGIN(inf) ((inf)->absdead - (inf)->period)
126 #define MIN(x,y) (((x)<(y))?(x):(y))
127 #define DIV_UP(x,y) (((x) + (y) - 1) / y)
129 static xmem_cache_t *dom_info_cache;
131 static void sedf_dump_cpu_state(int i);
133 static inline int extraq_on(struct domain *d, int i) {
134 return ((EXTRALIST(d,i)->next != NULL) &&
135 (EXTRALIST(d,i)->next != EXTRALIST(d,i)));
136 }
138 static inline void extraq_add_head(struct domain *d, int i)
139 {
140 list_add(EXTRALIST(d,i), EXTRAQ(d->processor,i));
141 }
143 static inline void extraq_add_tail(struct domain *d, int i)
144 {
145 list_add_tail(EXTRALIST(d,i), EXTRAQ(d->processor,i));
146 }
148 static inline void extraq_del(struct domain *d, int i)
149 {
150 struct list_head *list = EXTRALIST(d,i);
151 /*if (!extraq_on(d,i)) {
152 PRINT(0,"extraq_del: domain %i is NOT on L%i extraq "\
153 "HALTING\n",d->id,i);
154 sedf_dump_cpu_state(0);(*((int*)0))++;
155 }*/
156 PRINT(3, "Removing domain %i from L%i extraq\n", d->id,i);
157 list_del(list);
158 list->next = NULL;
159 }
161 /* adds a domain to the queue of processes which are aware of extra time. List
162 is sorted by score, where a lower score means higher priority for an extra
163 slice. It also updates the score, by simply subtracting a fixed value from
164 each entry, in order to avoid overflow. The algorithm works by simply
165 charging each domain that recieved extratime with an inverse of its weight.
166 */
167 static inline void extraq_add_sort_update(struct domain *d, int i, int sub) {
168 struct list_head *cur;
169 struct sedf_dom_info *curinf;
171 /*if (extraq_on(d,i)) {
172 PRINT(0,"extraq_add_sort_update: domain %i is already on "\
173 "L%i extraq! HALTING\n",d->id,i);
174 sedf_dump_cpu_state(0);(*((int*)0))++;
175 }*/
176 PRINT(3, "Adding domain %i (score= %i, short_pen= %lli) to L%i "\
177 "extraq\n", d->id, DOM_INFO(d)->score[i],
178 DOM_INFO(d)->short_block_lost_tot, i);
179 /*iterate through all elements to find our "hole" and on our way
180 update all the other scores*/
181 list_for_each(cur,EXTRAQ(d->processor,i)){
182 curinf = list_entry(cur,struct sedf_dom_info,extralist[i]);
183 curinf->score[i] -= sub;
184 if (DOM_INFO(d)->score[i] < curinf->score[i])
185 break;
186 else
187 PRINT(4,"\tbehind domain %i (score= %i)\n",
188 curinf->owner->id, curinf->score[i]);
189 }
190 /*cur now contains the element, before which we'll enqueue*/
191 PRINT(3, "\tlist_add to %x\n", cur->prev);
192 list_add(EXTRALIST(d,i),cur->prev);
194 /*continue updating the extraq*/
195 if ((cur != EXTRAQ(d->processor,i)) && sub)
196 for (cur = cur->next; cur != EXTRAQ(d->processor,i);
197 cur = cur-> next) {
198 curinf = list_entry(cur,struct sedf_dom_info,
199 extralist[i]);
200 curinf->score[i] -= sub;
201 PRINT(4, "\tupdating domain %i (score= %llu)\n",
202 curinf->owner->id, curinf->score[i]);
203 }
204 }
205 static inline void extraq_check(struct domain *d) {
206 if (extraq_on(d, EXTRA_UTIL_Q)) {
207 PRINT(2,"Dom %i is on extraQ\n",d->id);
208 if (!(DOM_INFO(d)->extra & EXTRA_AWARE) &&
209 !extra_runs(DOM_INFO(d))) {
210 extraq_del(d, EXTRA_UTIL_Q);
211 PRINT(2,"Removed dom %i from L1 extraQ\n",d->id);
212 }
213 } else {
214 PRINT(2,"Dom %i is NOT on L1 extraQ\n",d->id);
215 if ((DOM_INFO(d)->extra & EXTRA_AWARE) && domain_runnable(d))
216 {
217 #if (EXTRA == EXTRA_ROUNDR)
218 /*Favour domains which got short unblocked*/
219 extraq_add_tail(d, EXTRA_UTIL_Q);
220 #elif (EXTRA == EXTRA_SLICE_WEIGHT || \
221 EXTRA == EXTRA_BLOCK_WEIGHT)
222 extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
223 #elif
224 ;
225 #endif
226 PRINT(2,"Added dom %i to L1 extraQ\n",d->id);
227 }
228 }
229 }
230 static inline void __del_from_queue(struct domain *d)
231 {
232 struct list_head *list = LIST(d);
233 PRINT(3,"Removing domain %i (bop= %llu) from runq/waitq\n", d->id,
234 PERIOD_BEGIN(DOM_INFO(d)));
235 list_del(list);
236 list->next = NULL;
237 }
239 /* adds a domain to the queue of processes which wait for the beginning of the
240 next period; this list is therefore sortet by this time, which is simply
241 absol. deadline - period
242 */
243 static inline void __add_to_waitqueue_sort(struct domain *d) {
244 struct list_head *cur;
245 struct sedf_dom_info *curinf;
247 PRINT(3,"Adding domain %i (bop= %llu) to waitq\n", d->id,
248 PERIOD_BEGIN(DOM_INFO(d)));
250 /*iterate through all elements to find our "hole"*/
251 list_for_each(cur,WAITQ(d->processor)){
252 curinf = list_entry(cur,struct sedf_dom_info,list);
253 if (PERIOD_BEGIN(DOM_INFO(d)) < PERIOD_BEGIN(curinf))
254 break;
255 else
256 PRINT(4,"\tbehind domain %i (bop= %llu)\n",
257 curinf->owner->id, PERIOD_BEGIN(curinf));
258 }
259 /*cur now contains the element, before which we'll enqueue*/
260 PRINT(3,"\tlist_add to %x\n",cur->prev);
261 list_add(LIST(d),cur->prev);
263 }
265 /* adds a domain to the queue of processes which have started their current
266 period and are runnable (i.e. not blocked, dieing,...). The first element
267 on this list is running on the processor, if the list is empty the idle
268 task will run. As we are implementing EDF, this list is sorted by deadlines.
269 */
270 static inline void __add_to_runqueue_sort(struct domain *d) {
271 struct list_head *cur;
272 struct sedf_dom_info *curinf;
274 PRINT(3,"Adding domain %i (deadl= %llu) to runq\n", d->id,
275 DOM_INFO(d)->absdead);
277 /*iterate through all elements to find our "hole"*/
278 list_for_each(cur, RUNQ(d->processor)) {
279 curinf = list_entry(cur, struct sedf_dom_info, list);
280 if (DOM_INFO(d)->absdead < curinf->absdead)
281 break;
282 else
283 PRINT(4,"\tbehind domain %i (deadl= %llu)\n",
284 curinf->owner->id, curinf->absdead);
285 }
287 /*cur now contains the element, before which we'll enqueue*/
288 PRINT(3,"\tlist_add to %x\n",cur->prev);
289 list_add(LIST(d),cur->prev);
291 }
292 static inline int __task_on_queue(struct domain *d) {
293 return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
294 }
296 /* Initialises the queues and creates the domain info cache */
297 static int sedf_init_scheduler() {
298 int i;
299 PRINT(2,"sedf_init_scheduler was called\n");
301 for ( i = 0; i < NR_CPUS; i++ ) {
302 schedule_data[i].sched_priv =
303 xmalloc(sizeof(struct sedf_cpu_info));
304 if ( schedule_data[i].sched_priv == NULL )
305 return -1;
306 INIT_LIST_HEAD(WAITQ(i));
307 INIT_LIST_HEAD(RUNQ(i));
308 INIT_LIST_HEAD(EXTRAQ(i,EXTRA_PEN_Q));
309 INIT_LIST_HEAD(EXTRAQ(i,EXTRA_UTIL_Q));
310 }
311 dom_info_cache = xmem_cache_create("SEDF dom info",
312 sizeof(struct sedf_dom_info), 0, 0, 0, NULL);
313 if ( dom_info_cache == NULL )
314 {
315 printk("Could not allocate SLAB cache.\n");
316 return -1;
317 }
319 return 0;
320 }
322 /* Allocates memory for per domain private scheduling data*/
323 static int sedf_alloc_task(struct domain *d) {
324 PRINT(2,"sedf_alloc_task was called, domain-id %i\n",d->id);
325 if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
326 return -1;
327 memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
328 return 0;
329 }
331 /* Setup the sedf_dom_info */
332 static void sedf_add_task(struct domain *d)
333 {
334 struct sedf_dom_info *inf=DOM_INFO(d);
335 inf->owner = d;
337 PRINT(2,"sedf_add_task was called, domain-id %i\n",d->id);
338 if (d->id==0) {
339 /*set dom0 to something useful to boot the machine*/
340 inf->period = MILLISECS(20);
341 inf->slice = MILLISECS(15);
342 inf->latency = 0;
343 inf->absdead = 0;
344 inf->extra = EXTRA_NONE;/*EXTRA_AWARE; */
345 }
346 else {
347 /*other domains run in best effort mode*/
348 inf->period = MILLISECS(20);
349 inf->slice = 0;
350 inf->absdead = 0;
351 inf->latency = 0;
352 inf->extra = EXTRA_AWARE;
353 }
354 inf->period_orig = inf->period; inf->slice_orig = inf->slice;
355 INIT_LIST_HEAD(&(inf->list));
356 INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q]));
357 INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q]));
358 }
360 /* Frees memory used by domain info */
361 static void sedf_free_task(struct domain *d)
362 {
363 PRINT(2,"sedf_free_task was called, domain-id %i\n",d->id);
364 ASSERT(d->sched_priv != NULL);
365 xmem_cache_free(dom_info_cache, d->sched_priv);
366 }
368 /* Initialises idle task */
369 static int sedf_init_idle_task(struct domain *d) {
370 PRINT(2,"sedf_init_idle_task was called, domain-id %i\n",d->id);
371 if ( sedf_alloc_task(d) < 0 )
372 return -1;
374 sedf_add_task(d);
375 DOM_INFO(d)->absdead = 0;
376 set_bit(DF_RUNNING, &d->flags);
377 /*the idle task doesn't have to turn up on any list...*/
378 return 0;
379 }
381 /* handles the rescheduling, bookkeeping of domains running in their realtime-time :)*/
382 static inline void desched_edf_dom (s_time_t now, struct domain* d) {
383 struct sedf_dom_info* inf = DOM_INFO(d);
384 /*current domain is running in real time mode*/
386 /*update the domains cputime*/
387 inf->cputime += now - inf->sched_start;
389 /*scheduling decisions, which don't remove the running domain
390 from the runq*/
391 if ((inf->cputime < inf->slice) && domain_runnable(d))
392 return;
394 __del_from_queue(d);
395 /*if (__task_on_queue(current)) {
396 PRINT(0,"domain %i was removed but still on run/waitq => "\
397 "HALT\n",current->id);
398 sedf_dump_cpu_state(0);(*((int*)0))++;
399 }*/
401 /*manage bookkeeping (i.e. calculate next deadline,
402 memorize overun-time of slice) of finished domains*/
403 if (inf->cputime >= inf->slice) {
404 inf->cputime -= inf->slice;
406 if (inf->period < inf->period_orig) {
407 /*this domain runs in latency scaling or burst mode*/
408 #if (UNBLOCK == UNBLOCK_BURST)
409 if (now - inf->absunblock >= 2 * inf->period)
410 #endif
411 {
412 inf->period *= 2; inf->slice *= 2;
413 if ((inf->period > inf->period_orig) ||
414 (inf->slice > inf->slice_orig)) {
415 /*reset slice & period*/
416 inf->period = inf->period_orig;
417 inf->slice = inf->slice_orig;
418 }
419 }
420 }
421 /*set next deadline*/
422 inf->absdead += inf->period;
423 }
424 /*if (inf->absdead<now)
425 printk("Domain %i exceeded it't deadline!!!! "\
426 "(now: %llu ddl: %llu)\n", current->id, now,
427 inf->absdead);*/
429 /*add a runnable domain to the waitqueue*/
430 if (domain_runnable(d))
431 __add_to_waitqueue_sort(d);
432 else {
433 /*we have a blocked realtime task*/
434 inf->absblock = now;
435 #if (EXTRA > EXTRA_OFF)
436 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
437 if (extraq_on(d,EXTRA_PEN_Q)) extraq_del(d,EXTRA_PEN_Q);
438 #endif
439 if (extraq_on(d,EXTRA_UTIL_Q)) extraq_del(d,EXTRA_UTIL_Q);
440 #endif
441 }
442 }
444 /* Update all elements on the queues */
445 static inline void update_queues(
446 s_time_t now, struct list_head* runq, struct list_head* waitq) {
447 struct list_head *cur,*tmp;
448 struct sedf_dom_info *curinf;
450 PRINT(3,"Updating waitq..\n");
451 /*check for the first elements of the waitqueue, whether their
452 next period has already started*/
453 list_for_each_safe(cur, tmp, waitq) {
454 curinf = list_entry(cur, struct sedf_dom_info, list);
455 PRINT(4,"\tLooking @ dom %i\n", curinf->owner->id);
456 if (PERIOD_BEGIN(curinf) <= now) {
457 __del_from_queue(curinf->owner);
458 __add_to_runqueue_sort(curinf->owner);
459 }
460 else
461 break;
462 }
464 PRINT(3,"Updating runq..\n");
465 /*process the runq, find domains that are on
466 the runqueue which shouldn't be there*/
467 list_for_each_safe(cur, tmp, runq) {
468 curinf = list_entry(cur,struct sedf_dom_info,list);
469 PRINT(4,"\tLooking @ dom %i\n", curinf->owner->id);
470 if (unlikely(curinf->slice == 0)) {
471 /*ignore domains with empty slice*/
472 PRINT(4,"\tUpdating zero-slice domain %i\n",
473 curinf->owner->id);
474 __del_from_queue(curinf->owner);
476 /*move them to their next period*/
477 curinf->absdead += curinf->period;
478 /*and put them back into the queue*/
479 __add_to_waitqueue_sort(curinf->owner);
480 }
481 else {
482 if (unlikely((curinf->absdead < now) ||
483 (curinf->cputime > curinf->slice))) {
484 /*we missed the deadline or the slice was
485 already finished... might hapen because
486 of dom_adj.*/
487 PRINT(4,"\tDomain %i exceeded it's deadline/"\
488 "slice (%llu / %llu) now: %llu "\
489 "cputime: %llu\n", curinf->owner->id,
490 curinf->absdead, curinf->slice, now,
491 curinf->cputime);
492 __del_from_queue(curinf->owner);
493 /*common case: we miss one period!*/
494 curinf->absdead += curinf->period;
496 /*if we are still behind: modulo arithmetic,
497 force deadline to be in future and
498 aligned to period borders!*/
499 if (unlikely(curinf->absdead < now))
500 curinf->absdead +=
501 DIV_UP(now - curinf->absdead,
502 curinf->period) * curinf->period;
504 /*give a fresh slice*/
505 curinf->cputime = 0;
506 if (PERIOD_BEGIN(curinf) < now)
507 __add_to_waitqueue_sort(curinf->owner);
508 else
509 __add_to_runqueue_sort(curinf->owner);
510 }
511 else
512 break;
513 }
514 }
515 PRINT(3,"done updating the queues\n");
516 }
518 #if (EXTRA > EXTRA_OFF)
519 /* removes a domain from the head of the according extraQ and
520 requeues it at a specified position:
521 round-robin extratime: end of extraQ
522 weighted ext.: insert in sorted list by score
523 if the domain is blocked / has regained its short-block-loss
524 time it is not put on any queue */
525 static inline void desched_extra_dom(s_time_t now, struct domain* d) {
526 struct sedf_dom_info *inf = DOM_INFO(d);
527 int i = extra_get_cur_q(inf);
529 #if (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
530 unsigned long oldscore;
531 #endif
533 /*unset all running flags*/
534 inf->extra &= ~(EXTRA_RUN_PEN | EXTRA_RUN_UTIL);
535 /*fresh slice for the next run*/
536 inf->cputime = 0;
537 /*accumulate total extratime*/
538 inf->extra_time_tot += now - inf->sched_start;
539 /*remove extradomain from head of the queue*/
540 extraq_del(d, i);
542 #if (EXTRA == EXTRA_ROUNDR)
543 if (domain_runnable(d))
544 /*add to the tail if it is runnable => round-robin*/
545 extraq_add_tail(d, EXTRA_UTIL_Q);
546 #elif (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
547 /*update the score*/
548 oldscore = inf->score[i];
549 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
550 if (i == EXTRA_PEN_Q) {
551 /*domain was running in L0 extraq*/
552 /*reduce block lost, probably more sophistication here!*/
553 /*inf->short_block_lost_tot -= EXTRA_QUANTUM;*/
554 inf->short_block_lost_tot -= now - inf->sched_start;
555 PRINT(3,"Domain %i: Short_block_lost: %lli\n",
556 inf->owner->id, inf->short_block_lost_tot);
557 if (inf->short_block_lost_tot <= 0) {
558 PRINT(4,"Domain %i compensated short block loss!\n");
559 /*we have (over-)compensated our block penalty*/
560 inf->short_block_lost_tot = 0;
561 /*we don't want a place on the penalty queue anymore!*/
562 inf->extra &= ~EXTRA_WANT_PEN_Q;
563 /*do not add us on this block extraq again!*/
564 return;
565 }
566 /*we have to go again for another try in the block-extraq,
567 the score is not used incremantally here, as this is
568 already done by recalculating the block_lost*/
569 inf->score[EXTRA_PEN_Q] = (inf->period << 10) /
570 inf->short_block_lost_tot;
571 oldscore = 0;
572 } else
573 #endif
574 {
575 /*domain was running in L1 extraq => score is inverse of
576 utilization and is used somewhat incremental!*/
577 if (inf->slice)
578 /*NB: use fixed point arithmetic with 10 bits*/
579 inf->score[EXTRA_UTIL_Q] = (inf->period << 10) /
580 inf->slice;
581 else
582 /*set best effort domains to the maximum value*/
583 inf->score[EXTRA_UTIL_Q] = 2^10;
584 }
585 if (domain_runnable(d))
586 /*add according to score: weighted round robin*/
587 extraq_add_sort_update(d, i, oldscore);
588 else {
589 inf->absblock = now;
590 /*if (!__task_on_queue(d))
591 printf("Oops... We attempt to remove d %i from the "\
592 "waitq, but it is not on :(\n",d->id);*/
593 /*remove this blocked domain from the waitq!*/
594 __del_from_queue(d);
595 /*make sure that we remove a blocked domain from the other
596 extraq aswell (this caused hours of debugging!)*/
597 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
598 if (i == EXTRA_PEN_Q) {
599 if (extraq_on(d,EXTRA_UTIL_Q))
600 extraq_del(d,EXTRA_UTIL_Q);
601 }
602 else {
603 if (extraq_on(d,EXTRA_PEN_Q))
604 extraq_del(d,EXTRA_PEN_Q);
605 }
606 #endif
607 }
608 #endif
609 /*if (!domain_runnable(d)) {
610 if (extraq_on(d,EXTRA_UTIL_Q)) {
611 PRINT(0,"domain %i is blocked but still on L1 "\
612 "xq=> HALT\n",d->id);
613 sedf_dump_cpu_state(0);(*((int*)0))++;
614 }
615 if (__task_on_queue(d)) {
616 PRINT(0,"domain %i is blocked but still on run/waitq"\
617 "=> HALT\n",d->id);
618 sedf_dump_cpu_state(0);(*((int*)0))++;
619 }
620 }*/
621 }
622 #endif
625 static inline task_slice_t sedf_do_extra_schedule
626 (s_time_t now, s_time_t end_xt, struct list_head *extraq[], int cpu) {
627 task_slice_t ret;
628 struct sedf_dom_info *runinf;
630 if (end_xt - now < EXTRA_QUANTUM)
631 goto return_idle;
632 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
633 if (!list_empty(extraq[EXTRA_PEN_Q])) {
634 /*we still have elements on the level 0 extraq
635 => let those run first!*/
636 runinf = list_entry(extraq[EXTRA_PEN_Q]->next,
637 struct sedf_dom_info, extralist[EXTRA_PEN_Q]);
638 runinf->extra |= EXTRA_RUN_PEN;
639 ret.task = runinf->owner;
640 ret.time = EXTRA_QUANTUM;
641 #ifdef SEDF_STATS
642 runinf->pen_extra_slices++;
643 #endif
644 } else
645 #endif
646 if (!list_empty(extraq[EXTRA_UTIL_Q])) {
647 /*use elements from the normal extraqueue*/
648 runinf = list_entry(extraq[EXTRA_UTIL_Q]->next,
649 struct sedf_dom_info,extralist[EXTRA_UTIL_Q]);
650 runinf->extra |= EXTRA_RUN_UTIL;
651 ret.task = runinf->owner;
652 ret.time = EXTRA_QUANTUM;
653 }
654 else
655 goto return_idle;
657 return ret;
659 return_idle:
660 ret.task = IDLETASK(cpu);
661 ret.time = end_xt - now;
662 return ret;
663 }
664 /* Main scheduling function
665 Reasons for calling this function are:
666 -timeslice for the current period used up
667 -domain on waitqueue has started it's period
668 -and various others ;) in general: determine which domain to run next*/
669 static task_slice_t sedf_do_schedule(s_time_t now)
670 {
671 int cpu = current->processor;
672 struct list_head *runq = RUNQ(cpu);
673 struct list_head *waitq = WAITQ(cpu);
674 #if (EXTRA > EXTRA_OFF)
675 struct sedf_dom_info *inf = DOM_INFO(current);
676 struct list_head *extraq[] = {EXTRAQ(cpu,EXTRA_PEN_Q),
677 EXTRAQ(cpu, EXTRA_UTIL_Q)};
678 #endif
679 task_slice_t ret;
680 /*int i = 0;*/
681 /*idle tasks don't need any of the following stuf*/
682 if (is_idle_task(current))
683 goto check_waitq;
685 #if (EXTRA > EXTRA_OFF)
686 if (unlikely(extra_runs(inf))) {
687 /*i=1;*/
688 /*special treatment of domains running in extra time*/
689 desched_extra_dom(now, current);
690 }
691 else
692 #endif
693 {
694 /*i=2;*/
695 desched_edf_dom(now, current);
696 }
697 /*if (!domain_runnable(current)) {
698 if (extraq_on(current,EXTRA_UTIL_Q)) {
699 PRINT(0,"domain %i is blocked but still on L1 xq"\
700 " branch %i=> HALT\n", current->id, i);
701 sedf_dump_cpu_state(0);(*((int*)0))++;
702 }
703 if (__task_on_queue(current)) {
704 PRINT(0,"domain %i is blocked but still on run/waitq"\
705 " branch %i=> HALT\n",current->id,i);
706 sedf_dump_cpu_state(0);(*((int*)0))++;
707 }
708 }*/
709 check_waitq:
710 update_queues(now, runq, waitq);
712 /*now simply pick the first domain from the runqueue*/
713 struct sedf_dom_info *runinf, *waitinf;
715 if (!list_empty(runq)) {
716 runinf = list_entry(runq->next,struct sedf_dom_info,list);
717 ret.task = runinf->owner;
718 if (!list_empty(waitq)) {
719 waitinf = list_entry(waitq->next,
720 struct sedf_dom_info,list);
721 /*rerun scheduler, when scheduled domain reaches it's
722 end of slice or the first domain from the waitqueue
723 gets ready*/
724 ret.time = MIN(now + runinf->slice - runinf->cputime,
725 PERIOD_BEGIN(waitinf)) - now;
726 }
727 else {
728 ret.time = runinf->slice - runinf->cputime;
729 }
730 goto sched_done;
731 }
733 if (!list_empty(waitq)) {
734 waitinf = list_entry(waitq->next,struct sedf_dom_info,list);
735 /*we could not find any suitable domain
736 => look for domains that are aware of extratime*/
737 #if (EXTRA > EXTRA_OFF)
738 ret = sedf_do_extra_schedule(now, PERIOD_BEGIN(waitinf),
739 extraq, cpu);
740 #else
741 ret.task = IDLETASK(cpu);
742 ret.time = PERIOD_BEGIN(waitinf) - now;
743 #endif
744 }
745 else {
746 /*this could probably never happen, but one never knows...*/
747 /*it can... imagine a second CPU, which is pure scifi ATM,
748 but one never knows ;)*/
749 ret.task = IDLETASK(cpu);
750 ret.time = SECONDS(1);
751 }
753 sched_done:
754 /*TODO: Do something USEFUL when this happens and find out, why it
755 still can happen!!!*/
756 if (ret.time<0) {
757 printk("Ouch! We are seriously BEHIND schedule! %lli\n",
758 ret.time);
759 ret.time = EXTRA_QUANTUM;
760 }
761 DOM_INFO(ret.task)->sched_start=now;
762 return ret;
763 }
765 static void sedf_sleep(struct domain *d) {
766 PRINT(2,"sedf_sleep was called, domain-id %i\n",d->id);
767 if ( test_bit(DF_RUNNING, &d->flags) ) {
768 #ifdef ADV_SCHED_HISTO
769 adv_sched_hist_start(d->processor);
770 #endif
771 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
772 }
773 else {
774 if ( __task_on_queue(d) )
775 __del_from_queue(d);
776 #if (EXTRA > EXTRA_OFF)
777 if (extraq_on(d, EXTRA_UTIL_Q))
778 extraq_del(d, EXTRA_UTIL_Q);
779 #endif
780 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
781 if (extraq_on(d, EXTRA_PEN_Q))
782 extraq_del(d, EXTRA_PEN_Q);
783 #endif
784 }
785 }
787 /* This function wakes up a domain, i.e. moves them into the waitqueue
788 * things to mention are: admission control is taking place nowhere at
789 * the moment, so we can't be sure, whether it is safe to wake the domain
790 * up at all. Anyway, even if it is safe (total cpu usage <=100%) there are
791 * some considerations on when to allow the domain to wake up and have it's
792 * first deadline...
793 * I detected 3 cases, which could describe the possible behaviour of the
794 * scheduler,
795 * and I'll try to make them more clear:
796 *
797 * 1. Very conservative
798 * -when a blocked domain unblocks, it is allowed to start execution at
799 * the beginning of the next complete period
800 * (D..deadline, R..running, B..blocking/sleeping, U..unblocking/waking up
801 *
802 * DRRB_____D__U_____DRRRRR___D________ ...
803 *
804 * -this causes the domain to miss a period (and a deadlline)
805 * -doesn't disturb the schedule at all
806 * -deadlines keep occuring isochronous
807 *
808 * 2. Conservative Part 1: Short Unblocking
809 * -when a domain unblocks in the same period as it was blocked it
810 * unblocks and may consume the rest of it's original time-slice minus
811 * the time it was blocked
812 * (assume period=9, slice=5)
813 *
814 * DRB_UR___DRRRRR___D...
815 *
816 * -this also doesn't disturb scheduling, but might lead to the fact, that
817 * the domain can't finish it's workload in the period
818 * -in addition to that the domain can be treated prioritised when
819 * extratime is available
820 * -addition: experiments hve shown that this may have a HUGE impact on
821 * performance of other domains, becaus it can lead to excessive context
822 * switches
824 * Part2: Long Unblocking
825 * Part 2a
826 * -it is obvious that such accounting of block time, applied when
827 * unblocking is happening in later periods, works fine aswell
828 * -the domain is treated as if it would have been running since the start
829 * of its new period
830 *
831 * DRB______D___UR___D...
832 *
833 * Part 2b
834 * -if one needs the full slice in the next period, it is necessary to
835 * treat the unblocking time as the start of the new period, i.e. move
836 * the deadline further back (later)
837 * -this doesn't disturb scheduling as well, because for EDF periods can
838 * be treated as minimal inter-release times and scheduling stays
839 * correct, when deadlines are kept relative to the time the process
840 * unblocks
841 *
842 * DRB______D___URRRR___D...<prev [Thread] next>
843 * (D) <- old deadline was here
844 * -problem: deadlines don't occur isochronous anymore
845 * Part 2c (Improved Atropos design)
846 * -when a domain unblocks it is given a very short period (=latency hint)
847 * and slice length scaled accordingly
848 * -both rise again to the original value (e.g. get doubled every period)
849 *
850 * 3. Unconservative (i.e. incorrect)
851 * -to boost the performance of I/O dependent domains it would be possible
852 * to put the domain into the runnable queue immediately, and let it run
853 * for the remainder of the slice of the current period
854 * (or even worse: allocate a new full slice for the domain)
855 * -either behaviour can lead to missed deadlines in other domains as
856 * opposed to approaches 1,2a,2b
857 */
858 static inline void unblock_short_vcons
859 (struct sedf_dom_info* inf, s_time_t now) {
860 inf->absdead += inf->period;
861 inf->cputime = 0;
862 }
864 static inline void unblock_short_cons(struct sedf_dom_info* inf, s_time_t now)
865 {
866 /*treat blocked time as consumed by the domain*/
867 inf->cputime += now - inf->absblock;
868 if (inf->cputime + EXTRA_QUANTUM > inf->slice) {
869 /*we don't have a reasonable amount of time in
870 our slice left :( => start in next period!*/
871 unblock_short_vcons(inf, now);
872 }
873 #ifdef SEDF_STATS
874 else
875 inf->short_cont++;
876 #endif
877 }
878 static inline void unblock_short_extra_support (struct sedf_dom_info* inf,
879 s_time_t now) {
880 /*this unblocking scheme tries to support the domain, by assigning it
881 a priority in extratime distribution according to the loss of time
882 in this slice due to blocking*/
883 s_time_t pen;
885 /*no more realtime execution in this period!*/
886 inf->absdead += inf->period;
887 if (likely(inf->absblock)) {
888 //treat blocked time as consumed by the domain*/
889 /*inf->cputime += now - inf->absblock;*/
890 pen = (inf->slice - inf->cputime);
891 if (pen < 0) pen = 0;
892 /*accumulate all penalties over the periods*/
893 /*inf->short_block_lost_tot += pen;*/
894 /*set penalty to the current value*/
895 inf->short_block_lost_tot = pen;
896 /*not sure which one is better.. but seems to work well...*/
898 if (inf->short_block_lost_tot) {
899 inf->score[0] = (inf->period << 10) /
900 inf->short_block_lost_tot;
901 #ifdef SEDF_STATS
902 inf->pen_extra_blocks++;
903 #endif
904 if (extraq_on(inf->owner, EXTRA_PEN_Q))
905 /*remove domain for possible resorting!*/
906 extraq_del(inf->owner, EXTRA_PEN_Q);
907 else
908 /*remember that we want to be on the penalty q
909 so that we can continue when we (un-)block
910 in penalty-extratime*/
911 inf->extra |= EXTRA_WANT_PEN_Q;
913 /*(re-)add domain to the penalty extraq*/
914 extraq_add_sort_update(inf->owner,
915 EXTRA_PEN_Q, 0);
916 }
917 }
918 /*give it a fresh slice in the next period!*/
919 inf->cputime = 0;
920 }
921 static inline void unblock_long_vcons(struct sedf_dom_info* inf, s_time_t now)
922 {
923 /* align to next future period */
924 inf->absdead += ((now - inf->absdead) / inf->period + 1)
925 * inf->period;
926 inf->cputime = 0;
927 }
929 static inline void unblock_long_cons_a (struct sedf_dom_info* inf,
930 s_time_t now) {
931 /*treat the time the domain was blocked in the
932 CURRENT period as consumed by the domain*/
933 inf->cputime = (now - inf->absdead) % inf->period;
934 if (inf->cputime + EXTRA_QUANTUM > inf->slice) {
935 /*we don't have a reasonable amount of time in our slice
936 left :( => start in next period!*/
937 unblock_long_vcons(inf, now);
938 }
939 }
940 static inline void unblock_long_cons_b(struct sedf_dom_info* inf,s_time_t now) {
941 /*Conservative 2b*/
942 /*Treat the unblocking time as a start of a new period */
943 inf->absdead = now + inf->period;
944 inf->cputime = 0;
945 }
946 static inline void unblock_long_cons_c(struct sedf_dom_info* inf,s_time_t now) {
947 if (likely(inf->latency)) {
948 /*scale the slice and period accordingly to the latency hint*/
949 /*reduce period temporarily to the latency hint*/
950 inf->period = inf->latency;
951 /*this results in max. 4s slice/period length*/
952 ASSERT((inf->period < ULONG_MAX)
953 && (inf->slice_orig < ULONG_MAX));
954 /*scale slice accordingly, so that utilisation stays the same*/
955 inf->slice = (inf->period * inf->slice_orig)
956 / inf->period_orig;
957 inf->absdead = now + inf->period;
958 inf->cputime = 0;
959 }
960 else {
961 /*we don't have a latency hint.. use some other technique*/
962 unblock_long_cons_b(inf, now);
963 }
964 }
965 /*a new idea of dealing with short blocks: burst period scaling*/
966 static inline void unblock_short_burst(struct sedf_dom_info* inf, s_time_t now)
967 {
968 /*treat blocked time as consumed by the domain*/
969 inf->cputime += now - inf->absblock;
971 if (inf->cputime + EXTRA_QUANTUM <= inf->slice) {
972 /*if we can still use some time in the current slice
973 then use it!*/
974 #ifdef SEDF_STATS
975 /*we let the domain run in the current period*/
976 inf->short_cont++;
977 #endif
978 }
979 else {
980 /*we don't have a reasonable amount of time in
981 our slice left => switch to burst mode*/
982 if (likely(inf->absunblock)) {
983 /*set the period-length to the current blocking
984 interval, possible enhancements: average over last
985 blocking intervals, user-specified minimum,...*/
986 inf->period = now - inf->absunblock;
987 /*check for overflow on multiplication*/
988 ASSERT((inf->period < ULONG_MAX)
989 && (inf->slice_orig < ULONG_MAX));
990 /*scale slice accordingly, so that utilisation
991 stays the same*/
992 inf->slice = (inf->period * inf->slice_orig)
993 / inf->period_orig;
994 /*set new (shorter) deadline*/
995 inf->absdead += inf->period;
996 }
997 else {
998 /*in case we haven't unblocked before
999 start in next period!*/
1000 inf->cputime=0;
1001 inf->absdead += inf->period;
1004 inf->absunblock = now;
1006 static inline void unblock_long_burst(struct sedf_dom_info* inf,s_time_t now) {
1007 if (unlikely(inf->latency && (inf->period > inf->latency))) {
1008 /*scale the slice and period accordingly to the latency hint*/
1009 inf->period = inf->latency;
1010 /*check for overflows on multiplication*/
1011 ASSERT((inf->period < ULONG_MAX)
1012 && (inf->slice_orig < ULONG_MAX));
1013 /*scale slice accordingly, so that utilisation stays the same*/
1014 inf->slice = (inf->period * inf->slice_orig)
1015 / inf->period_orig;
1016 inf->absdead = now + inf->period;
1017 inf->cputime = 0;
1019 else {
1020 /*we don't have a latency hint.. or we are currently in
1021 "burst mode": use some other technique
1022 NB: this should be in fact the normal way of operation,
1023 when we are in sync with the device!*/
1024 unblock_long_cons_b(inf, now);
1026 inf->absunblock = now;
1029 #define DOMAIN_EDF 1
1030 #define DOMAIN_EXTRA_PEN 2
1031 #define DOMAIN_EXTRA_UTIL 3
1032 #define DOMAIN_IDLE 4
1033 static inline int get_run_type(struct domain* d) {
1034 struct sedf_dom_info* inf = DOM_INFO(d);
1035 if (is_idle_task(d))
1036 return DOMAIN_IDLE;
1037 if (inf->extra & EXTRA_RUN_PEN)
1038 return DOMAIN_EXTRA_PEN;
1039 if (inf->extra & EXTRA_RUN_UTIL)
1040 return DOMAIN_EXTRA_UTIL;
1041 return DOMAIN_EDF;
1043 /*Compares two domains in the relation of whether the one is allowed to
1044 interrupt the others execution.
1045 It returns true (!=0) if a switch to the other domain is good.
1046 Current Priority scheme is as follows:
1047 EDF > L0 (penalty based) extra-time >
1048 L1 (utilization) extra-time > idle-domain
1049 In the same class priorities are assigned as following:
1050 EDF: early deadline > late deadline
1051 L0 extra-time: lower score > higher score*/
1052 static inline int should_switch(struct domain* cur, struct domain* other,
1053 s_time_t now) {
1054 struct sedf_dom_info *cur_inf, *other_inf;
1055 cur_inf = DOM_INFO(cur);
1056 other_inf = DOM_INFO(other);
1058 /*check whether we need to make an earlier sched-decision*/
1059 if ((PERIOD_BEGIN(other_inf) <
1060 schedule_data[other->processor].s_timer.expires))
1061 return 1;
1062 /*no timing-based switches need to be taken into account here*/
1063 switch (get_run_type(cur)) {
1064 case DOMAIN_EDF:
1065 /* do not interrupt a running EDF domain */
1066 return 0;
1067 case DOMAIN_EXTRA_PEN:
1068 /*check whether we also want
1069 the L0 ex-q with lower score*/
1070 if ((other_inf->extra & EXTRA_WANT_PEN_Q)
1071 && (other_inf->score[EXTRA_PEN_Q] <
1072 cur_inf->score[EXTRA_PEN_Q]))
1073 return 1;
1074 else return 0;
1075 case DOMAIN_EXTRA_UTIL:
1076 /*check whether we want the L0 extraq, don't
1077 switch if both domains want L1 extraq */
1078 if (other_inf->extra & EXTRA_WANT_PEN_Q)
1079 return 1;
1080 else return 0;
1081 case DOMAIN_IDLE:
1082 return 1;
1085 void sedf_wake(struct domain *d) {
1086 s_time_t now = NOW();
1087 struct sedf_dom_info* inf = DOM_INFO(d);
1089 PRINT(3,"sedf_wake was called, domain-id %i\n",d->id);
1091 if (unlikely(is_idle_task(d)))
1092 return;
1094 if ( unlikely(__task_on_queue(d)) ) {
1095 PRINT(3,"\tdomain %i is already in some queue\n",d->id);
1096 return;
1098 if ( unlikely(extraq_on(d,EXTRA_UTIL_Q) || extraq_on(d,EXTRA_PEN_Q)) ) {
1099 PRINT(3,"\tdomain %i is already in the extraQ\n",d->id);
1101 if (unlikely(inf->absdead == 0))
1102 /*initial setup of the deadline*/
1103 inf->absdead = now + inf->slice;
1105 PRINT(3,"waking up domain %i (deadl= %llu period= %llu "\
1106 "now= %llu)\n",d->id,inf->absdead,inf->period,now);
1107 #ifdef SEDF_STATS
1108 inf->block_tot++;
1109 #endif
1110 if (unlikely(now< PERIOD_BEGIN(inf))) {
1111 PRINT(4,"extratime unblock\n");
1112 /*this might happen, imagine unblocking in extra-time!*/
1113 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
1114 if (inf->extra & EXTRA_WANT_PEN_Q) {
1115 /*we have a domain that wants compensation
1116 for block penalty and did just block in
1117 its compensation time. Give it another
1118 chance!*/
1119 extraq_add_sort_update(d, EXTRA_PEN_Q, 0);
1121 #endif
1122 if (inf->extra & EXTRA_AWARE)
1123 #if (EXTRA == EXTRA_ROUNDR)
1124 extraq_add_tail(d,EXTRA_UTIL_Q);
1125 #elif (EXTRA == EXTRA_SLICE_WEIGHT \
1126 || EXTRA == EXTRA_BLOCK_WEIGHT)
1127 /*put in on the weighted extraq,
1128 without updating any scores*/
1129 extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
1130 #else
1132 #endif
1133 /*else*/
1134 /*This is very very unlikely, ie. might even be an error?!*/
1136 else {
1137 if (now < inf->absdead) {
1138 PRINT(4,"short unblocking\n");
1139 /*short blocking*/
1140 #ifdef SEDF_STATS
1141 inf->short_block_tot++;
1142 #endif
1143 #if (UNBLOCK <= UNBLOCK_ATROPOS)
1144 unblock_short_vcons(inf, now);
1145 #elif (UNBLOCK == UNBLOCK_SHORT_RESUME)
1146 unblock_short_cons(inf, now);
1147 #elif (UNBLOCK == UNBLOCK_BURST)
1148 unblock_short_burst(inf, now);
1149 #elif (UNBLOCK == UNBLOCK_EXTRA_SUPPORT)
1150 unblock_short_extra_support(inf, now);
1151 #endif
1153 if (inf->extra & EXTRA_AWARE)
1154 #if (EXTRA == EXTRA_OFF)
1156 #elif (EXTRA == EXTRA_ROUNDR)
1157 /*Favour domains which got short unblocked*/
1158 extraq_add_head(d, EXTRA_UTIL_Q);
1159 #elif (EXTRA == EXTRA_SLICE_WEIGHT \
1160 || EXTRA == EXTRA_BLOCK_WEIGHT)
1161 extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
1162 #endif
1164 else {
1165 PRINT(4,"long unblocking\n");
1166 /*long unblocking*/
1167 #ifdef SEDF_STATS
1168 inf->long_block_tot++;
1169 #endif
1170 #if (UNBLOCK == UNBLOCK_ISOCHRONOUS_EDF)
1171 unblock_long_vcons(inf, now);
1172 #elif (UNBLOCK == UNBLOCK_EDF \
1173 || UNBLOCK == UNBLOCK_EXTRA_SUPPORT)
1174 unblock_long_cons_b(inf, now);
1175 #elif (UNBLOCK == UNBLOCK_ATROPOS)
1176 unblock_long_cons_c(inf, now);
1177 #elif (UNBLOCK == UNBLOCK_SHORT_RESUME)
1178 unblock_long_cons_b(inf, now);
1179 /*unblock_short_cons_c(inf, now);*/
1180 #elif (UNBLOCK == UNBLOCK_BURST)
1181 unblock_long_burst(inf, now);
1182 #endif
1184 if (inf->extra & EXTRA_AWARE) {
1185 #if (EXTRA == EXTRA_OFF)
1187 #elif (EXTRA == EXTRA_ROUNDR)
1188 extraq_add_head(d, EXTRA_UTIL_Q);
1189 #elif (EXTRA == EXTRA_SLICE_WEIGHT \
1190 || EXTRA == EXTRA_BLOCK_WEIGHT)
1191 extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
1192 #endif
1197 PRINT(3,"woke up domain %i (deadl= %llu period= %llu "\
1198 "now= %llu)\n",d->id,inf->absdead,inf->period,now);
1199 __add_to_waitqueue_sort(d);
1200 PRINT(3,"added to waitq\n");
1202 #ifdef SEDF_STATS
1203 /*do some statistics here...*/
1204 if (inf->absblock != 0) {
1205 inf->block_time_tot += now - inf->absblock;
1206 inf->penalty_time_tot +=
1207 PERIOD_BEGIN(inf) + inf->cputime - inf->absblock;
1209 #endif
1210 /*sanity check: make sure each extra-aware domain IS on the util-q!*/
1211 /*if (inf->extra & EXTRA_AWARE) {
1212 if (!extraq_on(d, EXTRA_UTIL_Q))
1213 printf("sedf_wake: domain %i is extra-aware, "\
1214 "but NOT on L1 extraq!\n",d->id);
1215 }*/
1217 /*check whether the awakened task needs to invoke the do_schedule
1218 routine. Try to avoid unnecessary runs but:
1219 Save approximation: Always switch to scheduler!*/
1220 if (should_switch(schedule_data[d->processor].curr, d, now)){
1221 #ifdef ADV_SCHED_HISTO
1222 adv_sched_hist_start(d->processor);
1223 #endif
1224 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
1228 /*Print a lot of use-{full, less} information about a domains in the system*/
1229 static void sedf_dump_domain(struct domain *d) {
1230 printk("%u has=%c ", d->id,
1231 test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
1232 printk("p=%llu sl=%llu ddl=%llu w=%hu c=%llu sc=%i xtr(%s)=%llu",
1233 DOM_INFO(d)->period, DOM_INFO(d)->slice, DOM_INFO(d)->absdead,
1234 DOM_INFO(d)->weight, d->cpu_time, DOM_INFO(d)->score[EXTRA_UTIL_Q],
1235 (DOM_INFO(d)->extra & EXTRA_AWARE) ? "yes" : "no",
1236 DOM_INFO(d)->extra_time_tot);
1237 if (d->cpu_time !=0)
1238 printf(" (%lu%)", (DOM_INFO(d)->extra_time_tot * 100)
1239 / d->cpu_time);
1240 #ifdef SEDF_STATS
1241 if (DOM_INFO(d)->block_time_tot!=0)
1242 printf(" pen=%lu%", (DOM_INFO(d)->penalty_time_tot * 100) /
1243 DOM_INFO(d)->block_time_tot);
1244 if (DOM_INFO(d)->block_tot!=0)
1245 printf("\n blks=%lu sh=%lu (%lu%) (shc=%lu (%lu%) shex=%i "\
1246 "shexsl=%i) l=%lu (%lu%) avg: b=%llu p=%llu",
1247 DOM_INFO(d)->block_tot, DOM_INFO(d)->short_block_tot,
1248 (DOM_INFO(d)->short_block_tot * 100)
1249 / DOM_INFO(d)->block_tot, DOM_INFO(d)->short_cont,
1250 (DOM_INFO(d)->short_cont * 100) / DOM_INFO(d)->block_tot,
1251 DOM_INFO(d)->pen_extra_blocks,
1252 DOM_INFO(d)->pen_extra_slices,
1253 DOM_INFO(d)->long_block_tot,
1254 (DOM_INFO(d)->long_block_tot * 100) / DOM_INFO(d)->block_tot,
1255 (DOM_INFO(d)->block_time_tot) / DOM_INFO(d)->block_tot,
1256 (DOM_INFO(d)->penalty_time_tot) / DOM_INFO(d)->block_tot);
1257 #endif
1258 printf("\n");
1261 /*dumps all domains on hte specified cpu*/
1262 static void sedf_dump_cpu_state(int i)
1264 struct list_head *list, *queue, *tmp;
1265 int loop = 0;
1266 struct sedf_dom_info *d_inf;
1267 struct domain* d;
1269 printk("now=%llu\n",NOW());
1270 queue = RUNQ(i);
1271 printk("RUNQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
1272 (unsigned long) queue->next, (unsigned long) queue->prev);
1273 list_for_each_safe ( list, tmp, queue ) {
1274 printk("%3d: ",loop++);
1275 d_inf = list_entry(list, struct sedf_dom_info, list);
1276 sedf_dump_domain(d_inf->owner);
1279 queue = WAITQ(i); loop = 0;
1280 printk("\nWAITQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
1281 (unsigned long) queue->next, (unsigned long) queue->prev);
1282 list_for_each_safe ( list, tmp, queue ) {
1283 printk("%3d: ",loop++);
1284 d_inf = list_entry(list, struct sedf_dom_info, list);
1285 sedf_dump_domain(d_inf->owner);
1288 queue = EXTRAQ(i,EXTRA_PEN_Q); loop = 0;
1289 printk("\nEXTRAQ (penalty) rq %lx n: %lx, p: %lx\n",
1290 (unsigned long)queue, (unsigned long) queue->next,
1291 (unsigned long) queue->prev);
1292 list_for_each_safe ( list, tmp, queue ) {
1293 d_inf = list_entry(list, struct sedf_dom_info,
1294 extralist[EXTRA_PEN_Q]);
1295 printk("%3d: ",loop++);
1296 sedf_dump_domain(d_inf->owner);
1299 queue = EXTRAQ(i,EXTRA_UTIL_Q); loop = 0;
1300 printk("\nEXTRAQ (utilization) rq %lx n: %lx, p: %lx\n",
1301 (unsigned long)queue, (unsigned long) queue->next,
1302 (unsigned long) queue->prev);
1303 list_for_each_safe ( list, tmp, queue ) {
1304 d_inf = list_entry(list, struct sedf_dom_info,
1305 extralist[EXTRA_UTIL_Q]);
1306 printk("%3d: ",loop++);
1307 sedf_dump_domain(d_inf->owner);
1310 loop = 0;
1311 printk("\nnot on Q\n");
1312 for_each_domain(d) {
1313 if (!__task_on_queue(d) && (d->processor == i)) {
1314 printk("%3d: ",loop++);
1315 sedf_dump_domain(d);
1319 /*Adjusts periods and slices of the domains accordingly to their weights*/
1320 static inline int sedf_adjust_weights(struct domain *p,
1321 struct sched_adjdom_cmd *cmd) {
1322 int sumw[NR_CPUS];
1323 s_time_t sumt[NR_CPUS];
1324 int cpu;
1326 for (cpu=0; cpu < NR_CPUS; cpu++) {
1327 sumw[cpu] = 0;
1328 sumt[cpu] = 0;
1330 /*sum up all weights*/
1331 for_each_domain(p) {
1332 if (DOM_INFO(p)->weight)
1333 sumw[p->processor] += DOM_INFO(p)->weight;
1334 else {
1335 /*don't modify domains who don't have a weight, but sum
1336 up the time they need, projected to a WEIGHT_PERIOD,
1337 so that this time is not given to the weight-driven
1338 domains*/
1339 /*check for overflows*/
1340 ASSERT((WEIGHT_PERIOD < ULONG_MAX)
1341 && (DOM_INFO(p)->slice_orig < ULONG_MAX));
1342 sumt[p->processor] += (WEIGHT_PERIOD *
1343 DOM_INFO(p)->slice_orig) / DOM_INFO(p)->period_orig;
1346 /*adjust all slices (and periods) to the new weight*/
1347 for_each_domain(p) {
1348 if (DOM_INFO(p)->weight) {
1349 DOM_INFO(p)->period_orig =
1350 DOM_INFO(p)->period = WEIGHT_PERIOD;
1351 DOM_INFO(p)->slice_orig =
1352 DOM_INFO(p)->slice = (DOM_INFO(p)->weight *
1353 (WEIGHT_PERIOD -WEIGHT_SAFETY -
1354 sumt[p->processor])) / sumw[p->processor];
1357 return 0;
1360 /* set or fetch domain scheduling parameters */
1361 static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) {
1362 PRINT(2,"sedf_adjdom was called, domain-id %i new period %llu "\
1363 "new slice %llu\nlatency %llu extra:%s\n",
1364 p->id, cmd->u.sedf.period, cmd->u.sedf.slice,
1365 cmd->u.sedf.latency, (cmd->u.sedf.extratime)?"yes":"no");
1366 if ( cmd->direction == SCHED_INFO_PUT )
1368 /*check for sane parameters*/
1369 if (!cmd->u.sedf.period && !cmd->u.sedf.weight)
1370 return -EINVAL;
1371 /*weight driven domains*/
1372 if (cmd->u.sedf.weight) {
1373 DOM_INFO(p)->weight = cmd->u.sedf.weight;
1375 else {
1376 /*time driven domains*/
1377 DOM_INFO(p)->weight = 0;
1378 /* sanity checking! */
1379 if(cmd->u.sedf.slice > cmd->u.sedf.period )
1380 return -EINVAL;
1381 DOM_INFO(p)->period_orig =
1382 DOM_INFO(p)->period = cmd->u.sedf.period;
1383 DOM_INFO(p)->slice_orig =
1384 DOM_INFO(p)->slice = cmd->u.sedf.slice;
1386 if (sedf_adjust_weights(p,cmd))
1387 return -EINVAL;
1388 DOM_INFO(p)->extra = (DOM_INFO(p)-> extra & ~EXTRA_AWARE)
1389 | (cmd->u.sedf.extratime & EXTRA_AWARE);
1390 DOM_INFO(p)->latency = cmd->u.sedf.latency;
1391 extraq_check(p);
1393 else if ( cmd->direction == SCHED_INFO_GET )
1395 cmd->u.sedf.period = DOM_INFO(p)->period;
1396 cmd->u.sedf.slice = DOM_INFO(p)->slice;
1397 cmd->u.sedf.extratime = DOM_INFO(p)->extra & EXTRA_AWARE;
1398 cmd->u.sedf.latency = DOM_INFO(p)->latency;
1399 cmd->u.sedf.weight = DOM_INFO(p)->weight;
1401 PRINT(2,"sedf_adjdom_finished\n");
1402 return 0;
1405 struct scheduler sched_sedf_def = {
1406 .name = "Simple EDF Scheduler",
1407 .opt_name = "sedf",
1408 .sched_id = SCHED_SEDF,
1410 .init_idle_task = sedf_init_idle_task,
1411 .alloc_task = sedf_alloc_task,
1412 .add_task = sedf_add_task,
1413 .free_task = sedf_free_task,
1414 .init_scheduler = sedf_init_scheduler,
1415 .do_schedule = sedf_do_schedule,
1416 .dump_cpu_state = sedf_dump_cpu_state,
1417 .sleep = sedf_sleep,
1418 .wake = sedf_wake,
1419 .adjdom = sedf_adjdom,
1420 };