debuggers.hg

view xen/common/timer.c @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents 1a64415c959f
children
line source
1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <xen/percpu.h>
21 #include <xen/cpu.h>
22 #include <xen/rcupdate.h>
23 #include <xen/symbols.h>
24 #include <asm/system.h>
25 #include <asm/desc.h>
26 #include <asm/atomic.h>
28 /* We program the time hardware this far behind the closest deadline. */
29 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
30 integer_param("timer_slop", timer_slop);
32 struct timers {
33 spinlock_t lock;
34 struct timer **heap;
35 struct timer *list;
36 struct timer *running;
37 struct list_head inactive;
38 } __cacheline_aligned;
40 static DEFINE_PER_CPU(struct timers, timers);
42 /* Protects lock-free access to per-timer cpu field against cpu offlining. */
43 static DEFINE_RCU_READ_LOCK(timer_cpu_read_lock);
45 DEFINE_PER_CPU(s_time_t, timer_deadline);
47 /****************************************************************************
48 * HEAP OPERATIONS.
49 */
51 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
52 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
54 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
55 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
57 /* Sink down element @pos of @heap. */
58 static void down_heap(struct timer **heap, int pos)
59 {
60 int sz = GET_HEAP_SIZE(heap), nxt;
61 struct timer *t = heap[pos];
63 while ( (nxt = (pos << 1)) <= sz )
64 {
65 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
66 nxt++;
67 if ( heap[nxt]->expires > t->expires )
68 break;
69 heap[pos] = heap[nxt];
70 heap[pos]->heap_offset = pos;
71 pos = nxt;
72 }
74 heap[pos] = t;
75 t->heap_offset = pos;
76 }
78 /* Float element @pos up @heap. */
79 static void up_heap(struct timer **heap, int pos)
80 {
81 struct timer *t = heap[pos];
83 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
84 {
85 heap[pos] = heap[pos>>1];
86 heap[pos]->heap_offset = pos;
87 pos >>= 1;
88 }
90 heap[pos] = t;
91 t->heap_offset = pos;
92 }
95 /* Delete @t from @heap. Return TRUE if new top of heap. */
96 static int remove_from_heap(struct timer **heap, struct timer *t)
97 {
98 int sz = GET_HEAP_SIZE(heap);
99 int pos = t->heap_offset;
101 if ( unlikely(pos == sz) )
102 {
103 SET_HEAP_SIZE(heap, sz-1);
104 goto out;
105 }
107 heap[pos] = heap[sz];
108 heap[pos]->heap_offset = pos;
110 SET_HEAP_SIZE(heap, --sz);
112 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
113 up_heap(heap, pos);
114 else
115 down_heap(heap, pos);
117 out:
118 return (pos == 1);
119 }
122 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
123 static int add_to_heap(struct timer **heap, struct timer *t)
124 {
125 int sz = GET_HEAP_SIZE(heap);
127 /* Fail if the heap is full. */
128 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
129 return 0;
131 SET_HEAP_SIZE(heap, ++sz);
132 heap[sz] = t;
133 t->heap_offset = sz;
134 up_heap(heap, sz);
136 return (t->heap_offset == 1);
137 }
140 /****************************************************************************
141 * LINKED LIST OPERATIONS.
142 */
144 static int remove_from_list(struct timer **pprev, struct timer *t)
145 {
146 struct timer *curr, **_pprev = pprev;
148 while ( (curr = *_pprev) != t )
149 _pprev = &curr->list_next;
151 *_pprev = t->list_next;
153 return (_pprev == pprev);
154 }
156 static int add_to_list(struct timer **pprev, struct timer *t)
157 {
158 struct timer *curr, **_pprev = pprev;
160 while ( ((curr = *_pprev) != NULL) && (curr->expires <= t->expires) )
161 _pprev = &curr->list_next;
163 t->list_next = curr;
164 *_pprev = t;
166 return (_pprev == pprev);
167 }
170 /****************************************************************************
171 * TIMER OPERATIONS.
172 */
174 static int remove_entry(struct timer *t)
175 {
176 struct timers *timers = &per_cpu(timers, t->cpu);
177 int rc;
179 switch ( t->status )
180 {
181 case TIMER_STATUS_in_heap:
182 rc = remove_from_heap(timers->heap, t);
183 break;
184 case TIMER_STATUS_in_list:
185 rc = remove_from_list(&timers->list, t);
186 break;
187 default:
188 rc = 0;
189 BUG();
190 }
192 t->status = TIMER_STATUS_invalid;
193 return rc;
194 }
196 static int add_entry(struct timer *t)
197 {
198 struct timers *timers = &per_cpu(timers, t->cpu);
199 int rc;
201 ASSERT(t->status == TIMER_STATUS_invalid);
203 /* Try to add to heap. t->heap_offset indicates whether we succeed. */
204 t->heap_offset = 0;
205 t->status = TIMER_STATUS_in_heap;
206 rc = add_to_heap(timers->heap, t);
207 if ( t->heap_offset != 0 )
208 return rc;
210 /* Fall back to adding to the slower linked list. */
211 t->status = TIMER_STATUS_in_list;
212 return add_to_list(&timers->list, t);
213 }
215 static inline void activate_timer(struct timer *timer)
216 {
217 ASSERT(timer->status == TIMER_STATUS_inactive);
218 timer->status = TIMER_STATUS_invalid;
219 list_del(&timer->inactive);
221 if ( add_entry(timer) )
222 cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
223 }
225 static inline void deactivate_timer(struct timer *timer)
226 {
227 if ( remove_entry(timer) )
228 cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
230 timer->status = TIMER_STATUS_inactive;
231 list_add(&timer->inactive, &per_cpu(timers, timer->cpu).inactive);
232 }
234 static inline bool_t timer_lock(struct timer *timer)
235 {
236 unsigned int cpu;
238 rcu_read_lock(&timer_cpu_read_lock);
240 for ( ; ; )
241 {
242 cpu = atomic_read16(&timer->cpu);
243 if ( unlikely(cpu == TIMER_CPU_status_killed) )
244 {
245 rcu_read_unlock(&timer_cpu_read_lock);
246 return 0;
247 }
248 spin_lock(&per_cpu(timers, cpu).lock);
249 if ( likely(timer->cpu == cpu) )
250 break;
251 spin_unlock(&per_cpu(timers, cpu).lock);
252 }
254 rcu_read_unlock(&timer_cpu_read_lock);
255 return 1;
256 }
258 #define timer_lock_irqsave(t, flags) ({ \
259 bool_t __x; \
260 local_irq_save(flags); \
261 if ( !(__x = timer_lock(t)) ) \
262 local_irq_restore(flags); \
263 __x; \
264 })
266 static inline void timer_unlock(struct timer *timer)
267 {
268 spin_unlock(&per_cpu(timers, timer->cpu).lock);
269 }
271 #define timer_unlock_irqrestore(t, flags) ({ \
272 timer_unlock(t); \
273 local_irq_restore(flags); \
274 })
277 static bool_t active_timer(struct timer *timer)
278 {
279 ASSERT(timer->status >= TIMER_STATUS_inactive);
280 ASSERT(timer->status <= TIMER_STATUS_in_list);
281 return (timer->status >= TIMER_STATUS_in_heap);
282 }
285 void init_timer(
286 struct timer *timer,
287 void (*function)(void *),
288 void *data,
289 unsigned int cpu)
290 {
291 unsigned long flags;
292 memset(timer, 0, sizeof(*timer));
293 timer->function = function;
294 timer->data = data;
295 atomic_write16(&timer->cpu, cpu);
296 timer->status = TIMER_STATUS_inactive;
297 if ( !timer_lock_irqsave(timer, flags) )
298 BUG();
299 list_add(&timer->inactive, &per_cpu(timers, cpu).inactive);
300 timer_unlock_irqrestore(timer, flags);
301 }
304 void set_timer(struct timer *timer, s_time_t expires)
305 {
306 unsigned long flags;
308 if ( !timer_lock_irqsave(timer, flags) )
309 return;
311 if ( active_timer(timer) )
312 deactivate_timer(timer);
314 timer->expires = expires;
316 activate_timer(timer);
318 timer_unlock_irqrestore(timer, flags);
319 }
322 void stop_timer(struct timer *timer)
323 {
324 unsigned long flags;
326 if ( !timer_lock_irqsave(timer, flags) )
327 return;
329 if ( active_timer(timer) )
330 deactivate_timer(timer);
332 timer_unlock_irqrestore(timer, flags);
333 }
336 void migrate_timer(struct timer *timer, unsigned int new_cpu)
337 {
338 unsigned int old_cpu;
339 bool_t active;
340 unsigned long flags;
342 rcu_read_lock(&timer_cpu_read_lock);
344 for ( ; ; )
345 {
346 old_cpu = atomic_read16(&timer->cpu);
347 if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) )
348 {
349 rcu_read_unlock(&timer_cpu_read_lock);
350 return;
351 }
353 if ( old_cpu < new_cpu )
354 {
355 spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags);
356 spin_lock(&per_cpu(timers, new_cpu).lock);
357 }
358 else
359 {
360 spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags);
361 spin_lock(&per_cpu(timers, old_cpu).lock);
362 }
364 if ( likely(timer->cpu == old_cpu) )
365 break;
367 spin_unlock(&per_cpu(timers, old_cpu).lock);
368 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
369 }
371 rcu_read_unlock(&timer_cpu_read_lock);
373 active = active_timer(timer);
374 if ( active )
375 deactivate_timer(timer);
377 list_del(&timer->inactive);
378 atomic_write16(&timer->cpu, new_cpu);
379 list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
381 if ( active )
382 activate_timer(timer);
384 spin_unlock(&per_cpu(timers, old_cpu).lock);
385 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
386 }
389 void kill_timer(struct timer *timer)
390 {
391 unsigned int old_cpu, cpu;
392 unsigned long flags;
394 BUG_ON(this_cpu(timers).running == timer);
396 if ( !timer_lock_irqsave(timer, flags) )
397 return;
399 if ( active_timer(timer) )
400 deactivate_timer(timer);
402 list_del(&timer->inactive);
403 timer->status = TIMER_STATUS_killed;
404 old_cpu = timer->cpu;
405 atomic_write16(&timer->cpu, TIMER_CPU_status_killed);
407 spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags);
409 for_each_online_cpu ( cpu )
410 while ( per_cpu(timers, cpu).running == timer )
411 cpu_relax();
412 }
415 static void execute_timer(struct timers *ts, struct timer *t)
416 {
417 void (*fn)(void *) = t->function;
418 void *data = t->data;
420 t->status = TIMER_STATUS_inactive;
421 list_add(&t->inactive, &ts->inactive);
423 ts->running = t;
424 spin_unlock_irq(&ts->lock);
425 (*fn)(data);
426 spin_lock_irq(&ts->lock);
427 ts->running = NULL;
428 }
431 static void timer_softirq_action(void)
432 {
433 struct timer *t, **heap, *next;
434 struct timers *ts;
435 s_time_t now, deadline;
437 ts = &this_cpu(timers);
438 heap = ts->heap;
440 /* If we overflowed the heap, try to allocate a larger heap. */
441 if ( unlikely(ts->list != NULL) )
442 {
443 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
444 int old_limit = GET_HEAP_LIMIT(heap);
445 int new_limit = ((old_limit + 1) << 4) - 1;
446 struct timer **newheap = xmalloc_array(struct timer *, new_limit + 1);
447 if ( newheap != NULL )
448 {
449 spin_lock_irq(&ts->lock);
450 memcpy(newheap, heap, (old_limit + 1) * sizeof(*heap));
451 SET_HEAP_LIMIT(newheap, new_limit);
452 ts->heap = newheap;
453 spin_unlock_irq(&ts->lock);
454 if ( old_limit != 0 )
455 xfree(heap);
456 heap = newheap;
457 }
458 }
460 spin_lock_irq(&ts->lock);
462 now = NOW();
464 /* Execute ready heap timers. */
465 while ( (GET_HEAP_SIZE(heap) != 0) &&
466 ((t = heap[1])->expires < now) )
467 {
468 remove_from_heap(heap, t);
469 execute_timer(ts, t);
470 }
472 /* Execute ready list timers. */
473 while ( ((t = ts->list) != NULL) && (t->expires < now) )
474 {
475 ts->list = t->list_next;
476 execute_timer(ts, t);
477 }
479 /* Try to move timers from linked list to more efficient heap. */
480 next = ts->list;
481 ts->list = NULL;
482 while ( unlikely((t = next) != NULL) )
483 {
484 next = t->list_next;
485 t->status = TIMER_STATUS_invalid;
486 add_entry(t);
487 }
489 /* Find earliest deadline from head of linked list and top of heap. */
490 deadline = STIME_MAX;
491 if ( GET_HEAP_SIZE(heap) != 0 )
492 deadline = heap[1]->expires;
493 if ( (ts->list != NULL) && (ts->list->expires < deadline) )
494 deadline = ts->list->expires;
495 this_cpu(timer_deadline) =
496 (deadline == STIME_MAX) ? 0 : deadline + timer_slop;
498 if ( !reprogram_timer(this_cpu(timer_deadline)) )
499 raise_softirq(TIMER_SOFTIRQ);
501 spin_unlock_irq(&ts->lock);
502 }
504 s_time_t align_timer(s_time_t firsttick, uint64_t period)
505 {
506 if ( !period )
507 return firsttick;
509 return firsttick + (period - 1) - ((firsttick - 1) % period);
510 }
512 static void dump_timer(struct timer *t, s_time_t now)
513 {
514 printk(" ex=%8"PRId64"us timer=%p cb=%p(%p)",
515 (t->expires - now) / 1000, t, t->function, t->data);
516 print_symbol(" %s\n", (unsigned long)t->function);
517 }
519 static void dump_timerq(unsigned char key)
520 {
521 struct timer *t;
522 struct timers *ts;
523 unsigned long flags;
524 s_time_t now = NOW();
525 int i, j;
527 printk("Dumping timer queues:\n");
529 for_each_online_cpu( i )
530 {
531 ts = &per_cpu(timers, i);
533 printk("CPU%02d:\n", i);
534 spin_lock_irqsave(&ts->lock, flags);
535 for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
536 dump_timer(ts->heap[j], now);
537 for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ )
538 dump_timer(t, now);
539 spin_unlock_irqrestore(&ts->lock, flags);
540 }
541 }
543 static struct keyhandler dump_timerq_keyhandler = {
544 .diagnostic = 1,
545 .u.fn = dump_timerq,
546 .desc = "dump timer queues"
547 };
549 static void migrate_timers_from_cpu(unsigned int old_cpu)
550 {
551 unsigned int new_cpu = first_cpu(cpu_online_map);
552 struct timers *old_ts, *new_ts;
553 struct timer *t;
554 bool_t notify = 0;
556 ASSERT(!cpu_online(old_cpu) && cpu_online(new_cpu));
558 old_ts = &per_cpu(timers, old_cpu);
559 new_ts = &per_cpu(timers, new_cpu);
561 if ( old_cpu < new_cpu )
562 {
563 spin_lock_irq(&old_ts->lock);
564 spin_lock(&new_ts->lock);
565 }
566 else
567 {
568 spin_lock_irq(&new_ts->lock);
569 spin_lock(&old_ts->lock);
570 }
572 while ( (t = GET_HEAP_SIZE(old_ts->heap)
573 ? old_ts->heap[1] : old_ts->list) != NULL )
574 {
575 remove_entry(t);
576 atomic_write16(&t->cpu, new_cpu);
577 notify |= add_entry(t);
578 }
580 while ( !list_empty(&old_ts->inactive) )
581 {
582 t = list_entry(old_ts->inactive.next, struct timer, inactive);
583 list_del(&t->inactive);
584 atomic_write16(&t->cpu, new_cpu);
585 list_add(&t->inactive, &new_ts->inactive);
586 }
588 spin_unlock(&old_ts->lock);
589 spin_unlock_irq(&new_ts->lock);
590 local_irq_enable();
592 if ( notify )
593 cpu_raise_softirq(new_cpu, TIMER_SOFTIRQ);
594 }
596 static struct timer *dummy_heap;
598 static int cpu_callback(
599 struct notifier_block *nfb, unsigned long action, void *hcpu)
600 {
601 unsigned int cpu = (unsigned long)hcpu;
602 struct timers *ts = &per_cpu(timers, cpu);
604 switch ( action )
605 {
606 case CPU_UP_PREPARE:
607 INIT_LIST_HEAD(&ts->inactive);
608 spin_lock_init(&ts->lock);
609 ts->heap = &dummy_heap;
610 break;
611 case CPU_UP_CANCELED:
612 case CPU_DEAD:
613 migrate_timers_from_cpu(cpu);
614 break;
615 default:
616 break;
617 }
619 return NOTIFY_DONE;
620 }
622 static struct notifier_block cpu_nfb = {
623 .notifier_call = cpu_callback,
624 .priority = 99
625 };
627 void __init timer_init(void)
628 {
629 void *cpu = (void *)(long)smp_processor_id();
631 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
633 /*
634 * All CPUs initially share an empty dummy heap. Only those CPUs that
635 * are brought online will be dynamically allocated their own heap.
636 */
637 SET_HEAP_SIZE(&dummy_heap, 0);
638 SET_HEAP_LIMIT(&dummy_heap, 0);
640 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
641 register_cpu_notifier(&cpu_nfb);
643 register_keyhandler('a', &dump_timerq_keyhandler);
644 }
646 #ifdef XEN_KDB_CONFIG
647 #include <xen/symbols.h>
648 void kdb_dump_timer_queues(void)
649 {
650 struct timer *t;
651 struct timers *ts;
652 unsigned long sz, offs;
653 char buf[KSYM_NAME_LEN+1];
654 int cpu, j;
655 s_time_t now = NOW();
657 for_each_online_cpu( cpu )
658 {
659 ts = &per_cpu(timers, cpu);
660 kdbp("CPU[%02d]: NOW:0x%08x%08x\n", cpu, (u32)(now>>32), (u32)now);
662 /* timers in the heap */
663 for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ ) {
664 t = ts->heap[j];
665 kdbp(" %d: exp=0x%08x%08x fn:%s data:%p\n",
666 j, (u32)(t->expires>>32), (u32)t->expires,
667 symbols_lookup((unsigned long)t->function, &sz, &offs, buf),
668 t->data);
669 }
670 /* timers on the link list */
671 for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ ) {
672 kdbp(" L%d: exp=0x%08x%08x fn:%s data:%p\n",
673 j, (u32)(t->expires>>32), (u32)t->expires,
674 symbols_lookup((unsigned long)t->function, &sz, &offs, buf),
675 t->data);
676 }
677 }
678 }
679 #endif
681 /*
682 * Local variables:
683 * mode: C
684 * c-set-style: "BSD"
685 * c-basic-offset: 4
686 * tab-width: 4
687 * indent-tabs-mode: nil
688 * End:
689 */