/root/src/xen/xen/common/timer.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * timer.c |
3 | | * |
4 | | * Copyright (c) 2002-2003 Rolf Neugebauer |
5 | | * Copyright (c) 2002-2005 K A Fraser |
6 | | */ |
7 | | |
8 | | #include <xen/init.h> |
9 | | #include <xen/types.h> |
10 | | #include <xen/errno.h> |
11 | | #include <xen/sched.h> |
12 | | #include <xen/lib.h> |
13 | | #include <xen/smp.h> |
14 | | #include <xen/perfc.h> |
15 | | #include <xen/time.h> |
16 | | #include <xen/softirq.h> |
17 | | #include <xen/timer.h> |
18 | | #include <xen/keyhandler.h> |
19 | | #include <xen/percpu.h> |
20 | | #include <xen/cpu.h> |
21 | | #include <xen/rcupdate.h> |
22 | | #include <xen/symbols.h> |
23 | | #include <asm/system.h> |
24 | | #include <asm/desc.h> |
25 | | #include <asm/atomic.h> |
26 | | |
27 | | /* We program the time hardware this far behind the closest deadline. */ |
28 | | static unsigned int timer_slop __read_mostly = 50000; /* 50 us */ |
29 | | integer_param("timer_slop", timer_slop); |
30 | | |
31 | | struct timers { |
32 | | spinlock_t lock; |
33 | | struct timer **heap; |
34 | | struct timer *list; |
35 | | struct timer *running; |
36 | | struct list_head inactive; |
37 | | } __cacheline_aligned; |
38 | | |
39 | | static DEFINE_PER_CPU(struct timers, timers); |
40 | | |
41 | | /* Protects lock-free access to per-timer cpu field against cpu offlining. */ |
42 | | static DEFINE_RCU_READ_LOCK(timer_cpu_read_lock); |
43 | | |
44 | | DEFINE_PER_CPU(s_time_t, timer_deadline); |
45 | | |
46 | | /**************************************************************************** |
47 | | * HEAP OPERATIONS. |
48 | | */ |
49 | | |
50 | 22.3M | #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0])) |
51 | 13.4M | #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v)) |
52 | | |
53 | 12 | #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1])) |
54 | 13 | #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v)) |
55 | | |
56 | | /* Sink down element @pos of @heap. */ |
57 | | static void down_heap(struct timer **heap, int pos) |
58 | 450k | { |
59 | 450k | int sz = GET_HEAP_SIZE(heap), nxt; |
60 | 450k | struct timer *t = heap[pos]; |
61 | 450k | |
62 | 621k | while ( (nxt = (pos << 1)) <= sz ) |
63 | 390k | { |
64 | 390k | if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) ) |
65 | 14.3k | nxt++; |
66 | 390k | if ( heap[nxt]->expires > t->expires ) |
67 | 220k | break; |
68 | 170k | heap[pos] = heap[nxt]; |
69 | 170k | heap[pos]->heap_offset = pos; |
70 | 170k | pos = nxt; |
71 | 170k | } |
72 | 450k | |
73 | 450k | heap[pos] = t; |
74 | 450k | t->heap_offset = pos; |
75 | 450k | } |
76 | | |
77 | | /* Float element @pos up @heap. */ |
78 | | static void up_heap(struct timer **heap, int pos) |
79 | 6.62M | { |
80 | 6.62M | struct timer *t = heap[pos]; |
81 | 6.62M | |
82 | 7.13M | while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) ) |
83 | 504k | { |
84 | 504k | heap[pos] = heap[pos>>1]; |
85 | 504k | heap[pos]->heap_offset = pos; |
86 | 504k | pos >>= 1; |
87 | 504k | } |
88 | 6.62M | |
89 | 6.62M | heap[pos] = t; |
90 | 6.62M | t->heap_offset = pos; |
91 | 6.62M | } |
92 | | |
93 | | |
94 | | /* Delete @t from @heap. Return TRUE if new top of heap. */ |
95 | | static int remove_from_heap(struct timer **heap, struct timer *t) |
96 | 6.79M | { |
97 | 6.79M | int sz = GET_HEAP_SIZE(heap); |
98 | 6.79M | int pos = t->heap_offset; |
99 | 6.79M | |
100 | 6.79M | if ( unlikely(pos == sz) ) |
101 | 6.41M | { |
102 | 6.41M | SET_HEAP_SIZE(heap, sz-1); |
103 | 6.41M | goto out; |
104 | 6.41M | } |
105 | 6.79M | |
106 | 378k | heap[pos] = heap[sz]; |
107 | 378k | heap[pos]->heap_offset = pos; |
108 | 378k | |
109 | 378k | SET_HEAP_SIZE(heap, --sz); |
110 | 378k | |
111 | 378k | if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) ) |
112 | 47 | up_heap(heap, pos); |
113 | 378k | else |
114 | 378k | down_heap(heap, pos); |
115 | 378k | |
116 | 6.81M | out: |
117 | 6.81M | return (pos == 1); |
118 | 378k | } |
119 | | |
120 | | |
121 | | /* Add new entry @t to @heap. Return TRUE if new top of heap. */ |
122 | | static int add_to_heap(struct timer **heap, struct timer *t) |
123 | 6.68M | { |
124 | 6.68M | int sz = GET_HEAP_SIZE(heap); |
125 | 6.68M | |
126 | 6.68M | /* Fail if the heap is full. */ |
127 | 6.68M | if ( unlikely(sz == GET_HEAP_LIMIT(heap)) ) |
128 | 16 | return 0; |
129 | 6.68M | |
130 | 6.68M | SET_HEAP_SIZE(heap, ++sz); |
131 | 6.68M | heap[sz] = t; |
132 | 6.68M | t->heap_offset = sz; |
133 | 6.68M | up_heap(heap, sz); |
134 | 6.68M | |
135 | 6.68M | return (t->heap_offset == 1); |
136 | 6.68M | } |
137 | | |
138 | | |
139 | | /**************************************************************************** |
140 | | * LINKED LIST OPERATIONS. |
141 | | */ |
142 | | |
143 | | static int remove_from_list(struct timer **pprev, struct timer *t) |
144 | 0 | { |
145 | 0 | struct timer *curr, **_pprev = pprev; |
146 | 0 |
|
147 | 0 | while ( (curr = *_pprev) != t ) |
148 | 0 | _pprev = &curr->list_next; |
149 | 0 |
|
150 | 0 | *_pprev = t->list_next; |
151 | 0 |
|
152 | 0 | return (_pprev == pprev); |
153 | 0 | } |
154 | | |
155 | | static int add_to_list(struct timer **pprev, struct timer *t) |
156 | 16 | { |
157 | 16 | struct timer *curr, **_pprev = pprev; |
158 | 16 | |
159 | 22 | while ( ((curr = *_pprev) != NULL) && (curr->expires <= t->expires) ) |
160 | 6 | _pprev = &curr->list_next; |
161 | 16 | |
162 | 16 | t->list_next = curr; |
163 | 16 | *_pprev = t; |
164 | 16 | |
165 | 16 | return (_pprev == pprev); |
166 | 16 | } |
167 | | |
168 | | |
169 | | /**************************************************************************** |
170 | | * TIMER OPERATIONS. |
171 | | */ |
172 | | |
173 | | static int remove_entry(struct timer *t) |
174 | 6.67M | { |
175 | 6.67M | struct timers *timers = &per_cpu(timers, t->cpu); |
176 | 6.67M | int rc; |
177 | 6.67M | |
178 | 6.67M | switch ( t->status ) |
179 | 6.67M | { |
180 | 6.76M | case TIMER_STATUS_in_heap: |
181 | 6.76M | rc = remove_from_heap(timers->heap, t); |
182 | 6.76M | break; |
183 | 0 | case TIMER_STATUS_in_list: |
184 | 0 | rc = remove_from_list(&timers->list, t); |
185 | 0 | break; |
186 | 0 | default: |
187 | 0 | rc = 0; |
188 | 0 | BUG(); |
189 | 6.67M | } |
190 | 6.67M | |
191 | 6.81M | t->status = TIMER_STATUS_invalid; |
192 | 6.81M | return rc; |
193 | 6.67M | } |
194 | | |
195 | | static int add_entry(struct timer *t) |
196 | 6.64M | { |
197 | 6.64M | struct timers *timers = &per_cpu(timers, t->cpu); |
198 | 6.64M | int rc; |
199 | 6.64M | |
200 | 6.64M | ASSERT(t->status == TIMER_STATUS_invalid); |
201 | 6.64M | |
202 | 6.64M | /* Try to add to heap. t->heap_offset indicates whether we succeed. */ |
203 | 6.64M | t->heap_offset = 0; |
204 | 6.64M | t->status = TIMER_STATUS_in_heap; |
205 | 6.64M | rc = add_to_heap(timers->heap, t); |
206 | 6.64M | if ( t->heap_offset != 0 ) |
207 | 6.72M | return rc; |
208 | 6.64M | |
209 | 6.64M | /* Fall back to adding to the slower linked list. */ |
210 | 18.4E | t->status = TIMER_STATUS_in_list; |
211 | 18.4E | return add_to_list(&timers->list, t); |
212 | 6.64M | } |
213 | | |
214 | | static inline void activate_timer(struct timer *timer) |
215 | 6.69M | { |
216 | 6.69M | ASSERT(timer->status == TIMER_STATUS_inactive); |
217 | 6.69M | timer->status = TIMER_STATUS_invalid; |
218 | 6.69M | list_del(&timer->inactive); |
219 | 6.69M | |
220 | 6.69M | if ( add_entry(timer) ) |
221 | 1.86M | cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ); |
222 | 6.69M | } |
223 | | |
224 | | static inline void deactivate_timer(struct timer *timer) |
225 | 6.68M | { |
226 | 6.68M | if ( remove_entry(timer) ) |
227 | 1.90M | cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ); |
228 | 6.68M | |
229 | 6.68M | timer->status = TIMER_STATUS_inactive; |
230 | 6.68M | list_add(&timer->inactive, &per_cpu(timers, timer->cpu).inactive); |
231 | 6.68M | } |
232 | | |
233 | | static inline bool_t timer_lock(struct timer *timer) |
234 | 13.3M | { |
235 | 13.3M | unsigned int cpu; |
236 | 13.3M | |
237 | 13.3M | rcu_read_lock(&timer_cpu_read_lock); |
238 | 13.3M | |
239 | 13.3M | for ( ; ; ) |
240 | 13.1M | { |
241 | 13.3M | cpu = read_atomic(&timer->cpu); |
242 | 13.3M | if ( unlikely(cpu == TIMER_CPU_status_killed) ) |
243 | 0 | { |
244 | 0 | rcu_read_unlock(&timer_cpu_read_lock); |
245 | 0 | return 0; |
246 | 0 | } |
247 | 13.3M | spin_lock(&per_cpu(timers, cpu).lock); |
248 | 13.3M | if ( likely(timer->cpu == cpu) ) |
249 | 13.5M | break; |
250 | 18.4E | spin_unlock(&per_cpu(timers, cpu).lock); |
251 | 18.4E | } |
252 | 13.3M | |
253 | 13.5M | rcu_read_unlock(&timer_cpu_read_lock); |
254 | 13.5M | return 1; |
255 | 13.3M | } |
256 | | |
257 | 13.5M | #define timer_lock_irqsave(t, flags) ({ \ |
258 | 13.5M | bool_t __x; \ |
259 | 13.5M | local_irq_save(flags); \ |
260 | 13.5M | if ( !(__x = timer_lock(t)) ) \ |
261 | 0 | local_irq_restore(flags); \ |
262 | 13.5M | __x; \ |
263 | 13.5M | }) |
264 | | |
265 | | static inline void timer_unlock(struct timer *timer) |
266 | 13.6M | { |
267 | 13.6M | spin_unlock(&per_cpu(timers, timer->cpu).lock); |
268 | 13.6M | } |
269 | | |
270 | 13.5M | #define timer_unlock_irqrestore(t, flags) ({ \ |
271 | 13.5M | timer_unlock(t); \ |
272 | 13.5M | local_irq_restore(flags); \ |
273 | 13.5M | }) |
274 | | |
275 | | |
276 | | static bool_t active_timer(struct timer *timer) |
277 | 13.4M | { |
278 | 13.4M | ASSERT(timer->status >= TIMER_STATUS_inactive); |
279 | 13.4M | ASSERT(timer->status <= TIMER_STATUS_in_list); |
280 | 13.4M | return (timer->status >= TIMER_STATUS_in_heap); |
281 | 13.4M | } |
282 | | |
283 | | |
284 | | void init_timer( |
285 | | struct timer *timer, |
286 | | void (*function)(void *), |
287 | | void *data, |
288 | | unsigned int cpu) |
289 | 179 | { |
290 | 179 | unsigned long flags; |
291 | 179 | memset(timer, 0, sizeof(*timer)); |
292 | 179 | timer->function = function; |
293 | 179 | timer->data = data; |
294 | 179 | write_atomic(&timer->cpu, cpu); |
295 | 179 | timer->status = TIMER_STATUS_inactive; |
296 | 179 | if ( !timer_lock_irqsave(timer, flags) ) |
297 | 0 | BUG(); |
298 | 179 | list_add(&timer->inactive, &per_cpu(timers, cpu).inactive); |
299 | 179 | timer_unlock_irqrestore(timer, flags); |
300 | 179 | } |
301 | | |
302 | | |
303 | | void set_timer(struct timer *timer, s_time_t expires) |
304 | 6.70M | { |
305 | 6.70M | unsigned long flags; |
306 | 6.70M | |
307 | 6.70M | if ( !timer_lock_irqsave(timer, flags) ) |
308 | 0 | return; |
309 | 6.70M | |
310 | 6.70M | if ( active_timer(timer) ) |
311 | 86.3k | deactivate_timer(timer); |
312 | 6.70M | |
313 | 6.70M | timer->expires = expires; |
314 | 6.70M | |
315 | 6.70M | activate_timer(timer); |
316 | 6.70M | |
317 | 6.70M | timer_unlock_irqrestore(timer, flags); |
318 | 6.70M | } |
319 | | |
320 | | |
321 | | void stop_timer(struct timer *timer) |
322 | 6.80M | { |
323 | 6.80M | unsigned long flags; |
324 | 6.80M | |
325 | 6.80M | if ( !timer_lock_irqsave(timer, flags) ) |
326 | 0 | return; |
327 | 6.80M | |
328 | 6.80M | if ( active_timer(timer) ) |
329 | 6.62M | deactivate_timer(timer); |
330 | 6.80M | |
331 | 6.80M | timer_unlock_irqrestore(timer, flags); |
332 | 6.80M | } |
333 | | |
334 | | bool timer_expires_before(struct timer *timer, s_time_t t) |
335 | 0 | { |
336 | 0 | unsigned long flags; |
337 | 0 | bool ret; |
338 | 0 |
|
339 | 0 | if ( !timer_lock_irqsave(timer, flags) ) |
340 | 0 | return false; |
341 | 0 |
|
342 | 0 | ret = active_timer(timer) && timer->expires <= t; |
343 | 0 |
|
344 | 0 | timer_unlock_irqrestore(timer, flags); |
345 | 0 |
|
346 | 0 | return ret; |
347 | 0 | } |
348 | | |
349 | | void migrate_timer(struct timer *timer, unsigned int new_cpu) |
350 | 94.9k | { |
351 | 94.9k | unsigned int old_cpu; |
352 | 94.9k | bool_t active; |
353 | 94.9k | unsigned long flags; |
354 | 94.9k | |
355 | 94.9k | rcu_read_lock(&timer_cpu_read_lock); |
356 | 94.9k | |
357 | 94.9k | for ( ; ; ) |
358 | 94.9k | { |
359 | 95.0k | old_cpu = read_atomic(&timer->cpu); |
360 | 95.0k | if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) ) |
361 | 94.9k | { |
362 | 94.9k | rcu_read_unlock(&timer_cpu_read_lock); |
363 | 94.9k | return; |
364 | 94.9k | } |
365 | 95.0k | |
366 | 11 | if ( old_cpu < new_cpu ) |
367 | 40 | { |
368 | 40 | spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags); |
369 | 40 | spin_lock(&per_cpu(timers, new_cpu).lock); |
370 | 40 | } |
371 | 11 | else |
372 | 18.4E | { |
373 | 18.4E | spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags); |
374 | 18.4E | spin_lock(&per_cpu(timers, old_cpu).lock); |
375 | 18.4E | } |
376 | 11 | |
377 | 11 | if ( likely(timer->cpu == old_cpu) ) |
378 | 71 | break; |
379 | 11 | |
380 | 18.4E | spin_unlock(&per_cpu(timers, old_cpu).lock); |
381 | 18.4E | spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags); |
382 | 18.4E | } |
383 | 94.9k | |
384 | 46 | rcu_read_unlock(&timer_cpu_read_lock); |
385 | 46 | |
386 | 46 | active = active_timer(timer); |
387 | 46 | if ( active ) |
388 | 42 | deactivate_timer(timer); |
389 | 46 | |
390 | 46 | list_del(&timer->inactive); |
391 | 46 | write_atomic(&timer->cpu, new_cpu); |
392 | 71 | list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive); |
393 | 71 | |
394 | 71 | if ( active ) |
395 | 42 | activate_timer(timer); |
396 | 71 | |
397 | 71 | spin_unlock(&per_cpu(timers, old_cpu).lock); |
398 | 71 | spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags); |
399 | 71 | } |
400 | | |
401 | | |
402 | | void kill_timer(struct timer *timer) |
403 | 0 | { |
404 | 0 | unsigned int old_cpu, cpu; |
405 | 0 | unsigned long flags; |
406 | 0 |
|
407 | 0 | BUG_ON(this_cpu(timers).running == timer); |
408 | 0 |
|
409 | 0 | if ( !timer_lock_irqsave(timer, flags) ) |
410 | 0 | return; |
411 | 0 |
|
412 | 0 | if ( active_timer(timer) ) |
413 | 0 | deactivate_timer(timer); |
414 | 0 |
|
415 | 0 | list_del(&timer->inactive); |
416 | 0 | timer->status = TIMER_STATUS_killed; |
417 | 0 | old_cpu = timer->cpu; |
418 | 0 | write_atomic(&timer->cpu, TIMER_CPU_status_killed); |
419 | 0 |
|
420 | 0 | spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags); |
421 | 0 |
|
422 | 0 | for_each_online_cpu ( cpu ) |
423 | 0 | while ( per_cpu(timers, cpu).running == timer ) |
424 | 0 | cpu_relax(); |
425 | 0 | } |
426 | | |
427 | | |
428 | | static void execute_timer(struct timers *ts, struct timer *t) |
429 | 25.8k | { |
430 | 25.8k | void (*fn)(void *) = t->function; |
431 | 25.8k | void *data = t->data; |
432 | 25.8k | |
433 | 25.8k | t->status = TIMER_STATUS_inactive; |
434 | 25.8k | list_add(&t->inactive, &ts->inactive); |
435 | 25.8k | |
436 | 25.8k | ts->running = t; |
437 | 25.8k | spin_unlock_irq(&ts->lock); |
438 | 25.8k | (*fn)(data); |
439 | 25.8k | spin_lock_irq(&ts->lock); |
440 | 25.8k | ts->running = NULL; |
441 | 25.8k | } |
442 | | |
443 | | |
444 | | static void timer_softirq_action(void) |
445 | 4.17M | { |
446 | 4.17M | struct timer *t, **heap, *next; |
447 | 4.17M | struct timers *ts; |
448 | 4.17M | s_time_t now, deadline; |
449 | 4.17M | |
450 | 4.17M | ts = &this_cpu(timers); |
451 | 4.17M | heap = ts->heap; |
452 | 4.17M | |
453 | 4.17M | /* If we overflowed the heap, try to allocate a larger heap. */ |
454 | 4.17M | if ( unlikely(ts->list != NULL) ) |
455 | 12 | { |
456 | 12 | /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */ |
457 | 12 | int old_limit = GET_HEAP_LIMIT(heap); |
458 | 12 | int new_limit = ((old_limit + 1) << 4) - 1; |
459 | 12 | struct timer **newheap = xmalloc_array(struct timer *, new_limit + 1); |
460 | 12 | if ( newheap != NULL ) |
461 | 12 | { |
462 | 12 | spin_lock_irq(&ts->lock); |
463 | 12 | memcpy(newheap, heap, (old_limit + 1) * sizeof(*heap)); |
464 | 12 | SET_HEAP_LIMIT(newheap, new_limit); |
465 | 12 | ts->heap = newheap; |
466 | 12 | spin_unlock_irq(&ts->lock); |
467 | 12 | if ( old_limit != 0 ) |
468 | 0 | xfree(heap); |
469 | 12 | heap = newheap; |
470 | 12 | } |
471 | 12 | } |
472 | 4.17M | |
473 | 4.17M | spin_lock_irq(&ts->lock); |
474 | 4.17M | |
475 | 4.17M | now = NOW(); |
476 | 4.17M | |
477 | 4.17M | /* Execute ready heap timers. */ |
478 | 4.20M | while ( (GET_HEAP_SIZE(heap) != 0) && |
479 | 2.07M | ((t = heap[1])->expires < now) ) |
480 | 25.9k | { |
481 | 25.9k | remove_from_heap(heap, t); |
482 | 25.9k | execute_timer(ts, t); |
483 | 25.9k | } |
484 | 4.17M | |
485 | 4.17M | /* Execute ready list timers. */ |
486 | 4.17M | while ( ((t = ts->list) != NULL) && (t->expires < now) ) |
487 | 3 | { |
488 | 3 | ts->list = t->list_next; |
489 | 3 | execute_timer(ts, t); |
490 | 3 | } |
491 | 4.17M | |
492 | 4.17M | /* Try to move timers from linked list to more efficient heap. */ |
493 | 4.17M | next = ts->list; |
494 | 4.17M | ts->list = NULL; |
495 | 4.17M | while ( unlikely((t = next) != NULL) ) |
496 | 13 | { |
497 | 13 | next = t->list_next; |
498 | 13 | t->status = TIMER_STATUS_invalid; |
499 | 13 | add_entry(t); |
500 | 13 | } |
501 | 4.17M | |
502 | 4.17M | /* Find earliest deadline from head of linked list and top of heap. */ |
503 | 4.17M | deadline = STIME_MAX; |
504 | 4.17M | if ( GET_HEAP_SIZE(heap) != 0 ) |
505 | 2.04M | deadline = heap[1]->expires; |
506 | 4.17M | if ( (ts->list != NULL) && (ts->list->expires < deadline) ) |
507 | 0 | deadline = ts->list->expires; |
508 | 4.17M | now = NOW(); |
509 | 4.17M | this_cpu(timer_deadline) = |
510 | 4.17M | (deadline == STIME_MAX) ? 0 : MAX(deadline, now + timer_slop); |
511 | 4.17M | |
512 | 4.17M | if ( !reprogram_timer(this_cpu(timer_deadline)) ) |
513 | 0 | raise_softirq(TIMER_SOFTIRQ); |
514 | 4.17M | |
515 | 4.17M | spin_unlock_irq(&ts->lock); |
516 | 4.17M | } |
517 | | |
518 | | s_time_t align_timer(s_time_t firsttick, uint64_t period) |
519 | 0 | { |
520 | 0 | if ( !period ) |
521 | 0 | return firsttick; |
522 | 0 |
|
523 | 0 | return firsttick + (period - 1) - ((firsttick - 1) % period); |
524 | 0 | } |
525 | | |
526 | | static void dump_timer(struct timer *t, s_time_t now) |
527 | 0 | { |
528 | 0 | printk(" ex=%12"PRId64"us timer=%p cb=%ps(%p)\n", |
529 | 0 | (t->expires - now) / 1000, t, t->function, t->data); |
530 | 0 | } |
531 | | |
532 | | static void dump_timerq(unsigned char key) |
533 | 0 | { |
534 | 0 | struct timer *t; |
535 | 0 | struct timers *ts; |
536 | 0 | unsigned long flags; |
537 | 0 | s_time_t now = NOW(); |
538 | 0 | int i, j; |
539 | 0 |
|
540 | 0 | printk("Dumping timer queues:\n"); |
541 | 0 |
|
542 | 0 | for_each_online_cpu( i ) |
543 | 0 | { |
544 | 0 | ts = &per_cpu(timers, i); |
545 | 0 |
|
546 | 0 | printk("CPU%02d:\n", i); |
547 | 0 | spin_lock_irqsave(&ts->lock, flags); |
548 | 0 | for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ ) |
549 | 0 | dump_timer(ts->heap[j], now); |
550 | 0 | for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ ) |
551 | 0 | dump_timer(t, now); |
552 | 0 | spin_unlock_irqrestore(&ts->lock, flags); |
553 | 0 | } |
554 | 0 | } |
555 | | |
556 | | static void migrate_timers_from_cpu(unsigned int old_cpu) |
557 | 0 | { |
558 | 0 | unsigned int new_cpu = cpumask_any(&cpu_online_map); |
559 | 0 | struct timers *old_ts, *new_ts; |
560 | 0 | struct timer *t; |
561 | 0 | bool_t notify = 0; |
562 | 0 |
|
563 | 0 | ASSERT(!cpu_online(old_cpu) && cpu_online(new_cpu)); |
564 | 0 |
|
565 | 0 | old_ts = &per_cpu(timers, old_cpu); |
566 | 0 | new_ts = &per_cpu(timers, new_cpu); |
567 | 0 |
|
568 | 0 | if ( old_cpu < new_cpu ) |
569 | 0 | { |
570 | 0 | spin_lock_irq(&old_ts->lock); |
571 | 0 | spin_lock(&new_ts->lock); |
572 | 0 | } |
573 | 0 | else |
574 | 0 | { |
575 | 0 | spin_lock_irq(&new_ts->lock); |
576 | 0 | spin_lock(&old_ts->lock); |
577 | 0 | } |
578 | 0 |
|
579 | 0 | while ( (t = GET_HEAP_SIZE(old_ts->heap) |
580 | 0 | ? old_ts->heap[1] : old_ts->list) != NULL ) |
581 | 0 | { |
582 | 0 | remove_entry(t); |
583 | 0 | write_atomic(&t->cpu, new_cpu); |
584 | 0 | notify |= add_entry(t); |
585 | 0 | } |
586 | 0 |
|
587 | 0 | while ( !list_empty(&old_ts->inactive) ) |
588 | 0 | { |
589 | 0 | t = list_entry(old_ts->inactive.next, struct timer, inactive); |
590 | 0 | list_del(&t->inactive); |
591 | 0 | write_atomic(&t->cpu, new_cpu); |
592 | 0 | list_add(&t->inactive, &new_ts->inactive); |
593 | 0 | } |
594 | 0 |
|
595 | 0 | spin_unlock(&old_ts->lock); |
596 | 0 | spin_unlock_irq(&new_ts->lock); |
597 | 0 |
|
598 | 0 | if ( notify ) |
599 | 0 | cpu_raise_softirq(new_cpu, TIMER_SOFTIRQ); |
600 | 0 | } |
601 | | |
602 | | static struct timer *dummy_heap; |
603 | | |
604 | | static int cpu_callback( |
605 | | struct notifier_block *nfb, unsigned long action, void *hcpu) |
606 | 34 | { |
607 | 34 | unsigned int cpu = (unsigned long)hcpu; |
608 | 34 | struct timers *ts = &per_cpu(timers, cpu); |
609 | 34 | |
610 | 34 | switch ( action ) |
611 | 34 | { |
612 | 12 | case CPU_UP_PREPARE: |
613 | 12 | INIT_LIST_HEAD(&ts->inactive); |
614 | 12 | spin_lock_init(&ts->lock); |
615 | 12 | ts->heap = &dummy_heap; |
616 | 12 | break; |
617 | 0 | case CPU_UP_CANCELED: |
618 | 0 | case CPU_DEAD: |
619 | 0 | migrate_timers_from_cpu(cpu); |
620 | 0 | break; |
621 | 22 | default: |
622 | 22 | break; |
623 | 34 | } |
624 | 34 | |
625 | 34 | return NOTIFY_DONE; |
626 | 34 | } |
627 | | |
628 | | static struct notifier_block cpu_nfb = { |
629 | | .notifier_call = cpu_callback, |
630 | | .priority = 99 |
631 | | }; |
632 | | |
633 | | void __init timer_init(void) |
634 | 1 | { |
635 | 1 | void *cpu = (void *)(long)smp_processor_id(); |
636 | 1 | |
637 | 1 | open_softirq(TIMER_SOFTIRQ, timer_softirq_action); |
638 | 1 | |
639 | 1 | /* |
640 | 1 | * All CPUs initially share an empty dummy heap. Only those CPUs that |
641 | 1 | * are brought online will be dynamically allocated their own heap. |
642 | 1 | */ |
643 | 1 | SET_HEAP_SIZE(&dummy_heap, 0); |
644 | 1 | SET_HEAP_LIMIT(&dummy_heap, 0); |
645 | 1 | |
646 | 1 | cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
647 | 1 | register_cpu_notifier(&cpu_nfb); |
648 | 1 | |
649 | 1 | register_keyhandler('a', dump_timerq, "dump timer queues", 1); |
650 | 1 | } |
651 | | |
652 | | /* |
653 | | * Local variables: |
654 | | * mode: C |
655 | | * c-file-style: "BSD" |
656 | | * c-basic-offset: 4 |
657 | | * tab-width: 4 |
658 | | * indent-tabs-mode: nil |
659 | | * End: |
660 | | */ |