debuggers.hg

annotate xen/common/timer.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents 1a64415c959f
children 700ac6445812
rev   line source
kaf24@8617 1 /******************************************************************************
kaf24@8617 2 * timer.c
kaf24@8617 3 *
kaf24@8617 4 * Copyright (c) 2002-2003 Rolf Neugebauer
kaf24@8617 5 * Copyright (c) 2002-2005 K A Fraser
kaf24@8617 6 */
kaf24@8617 7
kaf24@8617 8 #include <xen/config.h>
kaf24@8617 9 #include <xen/init.h>
kaf24@8617 10 #include <xen/types.h>
kaf24@8617 11 #include <xen/errno.h>
kaf24@8617 12 #include <xen/sched.h>
kaf24@8617 13 #include <xen/lib.h>
kaf24@8617 14 #include <xen/smp.h>
kaf24@8617 15 #include <xen/perfc.h>
kaf24@8617 16 #include <xen/time.h>
kaf24@8617 17 #include <xen/softirq.h>
kaf24@8617 18 #include <xen/timer.h>
kaf24@8617 19 #include <xen/keyhandler.h>
kaf24@11029 20 #include <xen/percpu.h>
keir@21436 21 #include <xen/cpu.h>
keir@22728 22 #include <xen/rcupdate.h>
keir@22066 23 #include <xen/symbols.h>
kaf24@8617 24 #include <asm/system.h>
kaf24@8617 25 #include <asm/desc.h>
keir@22730 26 #include <asm/atomic.h>
kaf24@8617 27
keir@22067 28 /* We program the time hardware this far behind the closest deadline. */
keir@18782 29 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
keir@18782 30 integer_param("timer_slop", timer_slop);
kaf24@8617 31
kaf24@8617 32 struct timers {
kaf24@8617 33 spinlock_t lock;
kaf24@8617 34 struct timer **heap;
keir@18419 35 struct timer *list;
kaf24@8618 36 struct timer *running;
keir@21554 37 struct list_head inactive;
kaf24@8617 38 } __cacheline_aligned;
kaf24@8617 39
kaf24@11016 40 static DEFINE_PER_CPU(struct timers, timers);
kaf24@8617 41
keir@22728 42 /* Protects lock-free access to per-timer cpu field against cpu offlining. */
keir@22728 43 static DEFINE_RCU_READ_LOCK(timer_cpu_read_lock);
keir@21555 44
keir@22067 45 DEFINE_PER_CPU(s_time_t, timer_deadline);
kaf24@8617 46
kaf24@8617 47 /****************************************************************************
kaf24@8617 48 * HEAP OPERATIONS.
kaf24@8617 49 */
kaf24@8617 50
kaf24@8617 51 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
kaf24@8617 52 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
kaf24@8617 53
kaf24@8617 54 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
kaf24@8617 55 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
kaf24@8617 56
kaf24@8617 57 /* Sink down element @pos of @heap. */
kaf24@8617 58 static void down_heap(struct timer **heap, int pos)
kaf24@8617 59 {
kaf24@8617 60 int sz = GET_HEAP_SIZE(heap), nxt;
kaf24@8617 61 struct timer *t = heap[pos];
kaf24@8617 62
kaf24@8617 63 while ( (nxt = (pos << 1)) <= sz )
kaf24@8617 64 {
kaf24@8617 65 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
kaf24@8617 66 nxt++;
kaf24@8617 67 if ( heap[nxt]->expires > t->expires )
kaf24@8617 68 break;
kaf24@8617 69 heap[pos] = heap[nxt];
kaf24@8617 70 heap[pos]->heap_offset = pos;
kaf24@8617 71 pos = nxt;
kaf24@8617 72 }
kaf24@8617 73
kaf24@8617 74 heap[pos] = t;
kaf24@8617 75 t->heap_offset = pos;
kaf24@8617 76 }
kaf24@8617 77
kaf24@8617 78 /* Float element @pos up @heap. */
kaf24@8617 79 static void up_heap(struct timer **heap, int pos)
kaf24@8617 80 {
kaf24@8617 81 struct timer *t = heap[pos];
kaf24@8617 82
kaf24@8617 83 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
kaf24@8617 84 {
kaf24@8617 85 heap[pos] = heap[pos>>1];
kaf24@8617 86 heap[pos]->heap_offset = pos;
kaf24@8617 87 pos >>= 1;
kaf24@8617 88 }
kaf24@8617 89
kaf24@8617 90 heap[pos] = t;
kaf24@8617 91 t->heap_offset = pos;
kaf24@8617 92 }
kaf24@8617 93
kaf24@8617 94
kaf24@8617 95 /* Delete @t from @heap. Return TRUE if new top of heap. */
keir@18419 96 static int remove_from_heap(struct timer **heap, struct timer *t)
kaf24@8617 97 {
kaf24@8617 98 int sz = GET_HEAP_SIZE(heap);
kaf24@8617 99 int pos = t->heap_offset;
kaf24@8617 100
kaf24@8617 101 if ( unlikely(pos == sz) )
kaf24@8617 102 {
kaf24@8617 103 SET_HEAP_SIZE(heap, sz-1);
kaf24@8617 104 goto out;
kaf24@8617 105 }
kaf24@8617 106
kaf24@8617 107 heap[pos] = heap[sz];
kaf24@8617 108 heap[pos]->heap_offset = pos;
kaf24@8617 109
kaf24@8617 110 SET_HEAP_SIZE(heap, --sz);
kaf24@8617 111
kaf24@8617 112 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
kaf24@8617 113 up_heap(heap, pos);
kaf24@8617 114 else
kaf24@8617 115 down_heap(heap, pos);
kaf24@8617 116
kaf24@8617 117 out:
kaf24@8617 118 return (pos == 1);
kaf24@8617 119 }
kaf24@8617 120
kaf24@8617 121
kaf24@8617 122 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
keir@18709 123 static int add_to_heap(struct timer **heap, struct timer *t)
kaf24@8617 124 {
kaf24@8617 125 int sz = GET_HEAP_SIZE(heap);
kaf24@8617 126
keir@18709 127 /* Fail if the heap is full. */
kaf24@8617 128 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
keir@18709 129 return 0;
kaf24@8617 130
kaf24@8617 131 SET_HEAP_SIZE(heap, ++sz);
kaf24@8617 132 heap[sz] = t;
kaf24@8617 133 t->heap_offset = sz;
kaf24@8617 134 up_heap(heap, sz);
keir@18709 135
kaf24@8617 136 return (t->heap_offset == 1);
kaf24@8617 137 }
kaf24@8617 138
kaf24@8617 139
kaf24@8617 140 /****************************************************************************
keir@18419 141 * LINKED LIST OPERATIONS.
keir@18419 142 */
keir@18419 143
keir@18419 144 static int remove_from_list(struct timer **pprev, struct timer *t)
keir@18419 145 {
keir@18419 146 struct timer *curr, **_pprev = pprev;
keir@18419 147
keir@18419 148 while ( (curr = *_pprev) != t )
keir@18419 149 _pprev = &curr->list_next;
keir@18419 150
keir@18419 151 *_pprev = t->list_next;
keir@18419 152
keir@18419 153 return (_pprev == pprev);
keir@18419 154 }
keir@18419 155
keir@18419 156 static int add_to_list(struct timer **pprev, struct timer *t)
keir@18419 157 {
keir@18419 158 struct timer *curr, **_pprev = pprev;
keir@18419 159
keir@18419 160 while ( ((curr = *_pprev) != NULL) && (curr->expires <= t->expires) )
keir@18419 161 _pprev = &curr->list_next;
keir@18419 162
keir@18419 163 t->list_next = curr;
keir@18419 164 *_pprev = t;
keir@18419 165
keir@18419 166 return (_pprev == pprev);
keir@18419 167 }
keir@18419 168
keir@18419 169
keir@18419 170 /****************************************************************************
kaf24@8617 171 * TIMER OPERATIONS.
kaf24@8617 172 */
kaf24@8617 173
keir@21554 174 static int remove_entry(struct timer *t)
keir@18419 175 {
keir@21554 176 struct timers *timers = &per_cpu(timers, t->cpu);
keir@18419 177 int rc;
keir@18419 178
keir@18419 179 switch ( t->status )
keir@18419 180 {
keir@18419 181 case TIMER_STATUS_in_heap:
keir@18419 182 rc = remove_from_heap(timers->heap, t);
keir@18419 183 break;
keir@18419 184 case TIMER_STATUS_in_list:
keir@18419 185 rc = remove_from_list(&timers->list, t);
keir@18419 186 break;
keir@18419 187 default:
keir@18419 188 rc = 0;
keir@18419 189 BUG();
keir@18419 190 }
keir@18419 191
keir@21554 192 t->status = TIMER_STATUS_invalid;
keir@18419 193 return rc;
keir@18419 194 }
keir@18419 195
keir@21554 196 static int add_entry(struct timer *t)
keir@18419 197 {
keir@21554 198 struct timers *timers = &per_cpu(timers, t->cpu);
keir@18419 199 int rc;
keir@18419 200
keir@21554 201 ASSERT(t->status == TIMER_STATUS_invalid);
keir@18419 202
keir@18419 203 /* Try to add to heap. t->heap_offset indicates whether we succeed. */
keir@18419 204 t->heap_offset = 0;
keir@18419 205 t->status = TIMER_STATUS_in_heap;
keir@18709 206 rc = add_to_heap(timers->heap, t);
keir@18419 207 if ( t->heap_offset != 0 )
keir@18419 208 return rc;
keir@18419 209
keir@18419 210 /* Fall back to adding to the slower linked list. */
keir@18419 211 t->status = TIMER_STATUS_in_list;
keir@18419 212 return add_to_list(&timers->list, t);
keir@18419 213 }
keir@18419 214
keir@21554 215 static inline void activate_timer(struct timer *timer)
kaf24@8617 216 {
keir@21554 217 ASSERT(timer->status == TIMER_STATUS_inactive);
keir@21554 218 timer->status = TIMER_STATUS_invalid;
keir@21554 219 list_del(&timer->inactive);
keir@21554 220
keir@21554 221 if ( add_entry(timer) )
keir@21554 222 cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
kaf24@8617 223 }
kaf24@8617 224
keir@21554 225 static inline void deactivate_timer(struct timer *timer)
kaf24@8617 226 {
keir@21554 227 if ( remove_entry(timer) )
keir@21554 228 cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
keir@21554 229
keir@21554 230 timer->status = TIMER_STATUS_inactive;
keir@21554 231 list_add(&timer->inactive, &per_cpu(timers, timer->cpu).inactive);
kaf24@8617 232 }
kaf24@8617 233
keir@21557 234 static inline bool_t timer_lock(struct timer *timer)
kaf24@8831 235 {
kaf24@8831 236 unsigned int cpu;
kaf24@8831 237
keir@22728 238 rcu_read_lock(&timer_cpu_read_lock);
keir@22728 239
kaf24@8831 240 for ( ; ; )
kaf24@8831 241 {
keir@22730 242 cpu = atomic_read16(&timer->cpu);
keir@22730 243 if ( unlikely(cpu == TIMER_CPU_status_killed) )
keir@22728 244 {
keir@22728 245 rcu_read_unlock(&timer_cpu_read_lock);
keir@21557 246 return 0;
keir@22728 247 }
kaf24@11016 248 spin_lock(&per_cpu(timers, cpu).lock);
keir@22730 249 if ( likely(timer->cpu == cpu) )
kaf24@8831 250 break;
kaf24@11016 251 spin_unlock(&per_cpu(timers, cpu).lock);
kaf24@8831 252 }
keir@21557 253
keir@22728 254 rcu_read_unlock(&timer_cpu_read_lock);
keir@21557 255 return 1;
kaf24@8831 256 }
kaf24@8831 257
keir@21557 258 #define timer_lock_irqsave(t, flags) ({ \
keir@21557 259 bool_t __x; \
keir@21557 260 local_irq_save(flags); \
keir@21557 261 if ( !(__x = timer_lock(t)) ) \
keir@21557 262 local_irq_restore(flags); \
keir@21557 263 __x; \
keir@21557 264 })
kaf24@8831 265
kaf24@8831 266 static inline void timer_unlock(struct timer *timer)
kaf24@8831 267 {
keir@15327 268 spin_unlock(&per_cpu(timers, timer->cpu).lock);
kaf24@8831 269 }
kaf24@8831 270
keir@21557 271 #define timer_unlock_irqrestore(t, flags) ({ \
keir@21557 272 timer_unlock(t); \
keir@21557 273 local_irq_restore(flags); \
keir@21557 274 })
kaf24@8831 275
kaf24@8617 276
keir@21554 277 static bool_t active_timer(struct timer *timer)
keir@21554 278 {
keir@21554 279 ASSERT(timer->status >= TIMER_STATUS_inactive);
keir@21554 280 ASSERT(timer->status <= TIMER_STATUS_in_list);
keir@21554 281 return (timer->status >= TIMER_STATUS_in_heap);
keir@21554 282 }
keir@21554 283
keir@21554 284
keir@21554 285 void init_timer(
keir@21554 286 struct timer *timer,
keir@21554 287 void (*function)(void *),
keir@21554 288 void *data,
keir@21554 289 unsigned int cpu)
keir@21554 290 {
keir@21554 291 unsigned long flags;
keir@21554 292 memset(timer, 0, sizeof(*timer));
keir@21554 293 timer->function = function;
keir@21554 294 timer->data = data;
keir@22730 295 atomic_write16(&timer->cpu, cpu);
keir@21554 296 timer->status = TIMER_STATUS_inactive;
keir@21557 297 if ( !timer_lock_irqsave(timer, flags) )
keir@21557 298 BUG();
keir@21554 299 list_add(&timer->inactive, &per_cpu(timers, cpu).inactive);
keir@21554 300 timer_unlock_irqrestore(timer, flags);
keir@21554 301 }
keir@21554 302
keir@21554 303
kaf24@8617 304 void set_timer(struct timer *timer, s_time_t expires)
kaf24@8617 305 {
kaf24@8617 306 unsigned long flags;
kaf24@8617 307
keir@21557 308 if ( !timer_lock_irqsave(timer, flags) )
keir@21557 309 return;
kaf24@8831 310
kaf24@8617 311 if ( active_timer(timer) )
keir@21554 312 deactivate_timer(timer);
kaf24@8831 313
kaf24@8617 314 timer->expires = expires;
kaf24@8831 315
keir@21557 316 activate_timer(timer);
kaf24@8831 317
kaf24@8831 318 timer_unlock_irqrestore(timer, flags);
kaf24@8617 319 }
kaf24@8617 320
kaf24@8617 321
kaf24@8617 322 void stop_timer(struct timer *timer)
kaf24@8617 323 {
kaf24@8831 324 unsigned long flags;
kaf24@8831 325
keir@21557 326 if ( !timer_lock_irqsave(timer, flags) )
keir@21557 327 return;
kaf24@8831 328
kaf24@8831 329 if ( active_timer(timer) )
keir@21554 330 deactivate_timer(timer);
kaf24@8831 331
kaf24@8831 332 timer_unlock_irqrestore(timer, flags);
kaf24@8831 333 }
kaf24@8831 334
kaf24@8831 335
kaf24@8831 336 void migrate_timer(struct timer *timer, unsigned int new_cpu)
kaf24@8831 337 {
keir@22730 338 unsigned int old_cpu;
keir@21554 339 bool_t active;
kaf24@8617 340 unsigned long flags;
kaf24@8617 341
keir@22728 342 rcu_read_lock(&timer_cpu_read_lock);
keir@22728 343
kaf24@8831 344 for ( ; ; )
kaf24@8831 345 {
keir@22730 346 old_cpu = atomic_read16(&timer->cpu);
keir@22730 347 if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) )
keir@22728 348 {
keir@22728 349 rcu_read_unlock(&timer_cpu_read_lock);
kaf24@8831 350 return;
keir@22728 351 }
keir@21555 352
kaf24@8831 353 if ( old_cpu < new_cpu )
kaf24@8831 354 {
kaf24@11016 355 spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags);
kaf24@11016 356 spin_lock(&per_cpu(timers, new_cpu).lock);
kaf24@8831 357 }
kaf24@8831 358 else
kaf24@8831 359 {
kaf24@11016 360 spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags);
kaf24@11016 361 spin_lock(&per_cpu(timers, old_cpu).lock);
kaf24@8831 362 }
kaf24@8831 363
keir@22730 364 if ( likely(timer->cpu == old_cpu) )
kaf24@8831 365 break;
kaf24@8831 366
kaf24@11016 367 spin_unlock(&per_cpu(timers, old_cpu).lock);
kaf24@11016 368 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
kaf24@8831 369 }
kaf24@8831 370
keir@22728 371 rcu_read_unlock(&timer_cpu_read_lock);
keir@22728 372
keir@21554 373 active = active_timer(timer);
keir@21554 374 if ( active )
keir@21554 375 deactivate_timer(timer);
keir@21554 376
keir@21554 377 list_del(&timer->inactive);
keir@22730 378 atomic_write16(&timer->cpu, new_cpu);
keir@21554 379 list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
keir@21554 380
keir@21554 381 if ( active )
keir@21554 382 activate_timer(timer);
kaf24@8831 383
kaf24@11016 384 spin_unlock(&per_cpu(timers, old_cpu).lock);
kaf24@11016 385 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
kaf24@8617 386 }
kaf24@8617 387
kaf24@8617 388
kaf24@8618 389 void kill_timer(struct timer *timer)
kaf24@8618 390 {
keir@22730 391 unsigned int old_cpu, cpu;
kaf24@8618 392 unsigned long flags;
kaf24@8618 393
kaf24@11016 394 BUG_ON(this_cpu(timers).running == timer);
kaf24@8618 395
keir@21557 396 if ( !timer_lock_irqsave(timer, flags) )
keir@21557 397 return;
kaf24@8831 398
kaf24@8618 399 if ( active_timer(timer) )
keir@21554 400 deactivate_timer(timer);
keir@21554 401
keir@21554 402 list_del(&timer->inactive);
keir@18419 403 timer->status = TIMER_STATUS_killed;
keir@22730 404 old_cpu = timer->cpu;
keir@22730 405 atomic_write16(&timer->cpu, TIMER_CPU_status_killed);
kaf24@8831 406
keir@22730 407 spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags);
kaf24@8618 408
kaf24@8618 409 for_each_online_cpu ( cpu )
kaf24@11016 410 while ( per_cpu(timers, cpu).running == timer )
kaf24@8618 411 cpu_relax();
kaf24@8618 412 }
kaf24@8618 413
kaf24@8618 414
keir@18782 415 static void execute_timer(struct timers *ts, struct timer *t)
keir@18782 416 {
keir@18782 417 void (*fn)(void *) = t->function;
keir@18782 418 void *data = t->data;
keir@18782 419
keir@21554 420 t->status = TIMER_STATUS_inactive;
keir@21554 421 list_add(&t->inactive, &ts->inactive);
keir@21554 422
keir@18782 423 ts->running = t;
keir@18782 424 spin_unlock_irq(&ts->lock);
keir@18782 425 (*fn)(data);
keir@18782 426 spin_lock_irq(&ts->lock);
keir@18782 427 ts->running = NULL;
keir@18782 428 }
keir@18782 429
keir@18782 430
kaf24@8617 431 static void timer_softirq_action(void)
kaf24@8617 432 {
keir@18419 433 struct timer *t, **heap, *next;
kaf24@11016 434 struct timers *ts;
keir@22067 435 s_time_t now, deadline;
kaf24@8617 436
kaf24@11016 437 ts = &this_cpu(timers);
keir@18709 438 heap = ts->heap;
keir@18709 439
keir@18782 440 /* If we overflowed the heap, try to allocate a larger heap. */
keir@22067 441 if ( unlikely(ts->list != NULL) )
keir@18709 442 {
keir@18709 443 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
keir@18709 444 int old_limit = GET_HEAP_LIMIT(heap);
keir@18709 445 int new_limit = ((old_limit + 1) << 4) - 1;
keir@18709 446 struct timer **newheap = xmalloc_array(struct timer *, new_limit + 1);
keir@18709 447 if ( newheap != NULL )
keir@18709 448 {
keir@18709 449 spin_lock_irq(&ts->lock);
keir@18709 450 memcpy(newheap, heap, (old_limit + 1) * sizeof(*heap));
keir@18709 451 SET_HEAP_LIMIT(newheap, new_limit);
keir@18709 452 ts->heap = newheap;
keir@18709 453 spin_unlock_irq(&ts->lock);
keir@18709 454 if ( old_limit != 0 )
keir@18709 455 xfree(heap);
keir@18709 456 heap = newheap;
keir@18709 457 }
keir@18709 458 }
kaf24@11016 459
kaf24@11016 460 spin_lock_irq(&ts->lock);
kaf24@8617 461
keir@18782 462 now = NOW();
keir@18782 463
keir@18782 464 /* Execute ready heap timers. */
keir@18782 465 while ( (GET_HEAP_SIZE(heap) != 0) &&
keir@18901 466 ((t = heap[1])->expires < now) )
keir@18782 467 {
keir@18782 468 remove_from_heap(heap, t);
keir@18782 469 execute_timer(ts, t);
keir@18782 470 }
keir@18782 471
keir@18782 472 /* Execute ready list timers. */
keir@18901 473 while ( ((t = ts->list) != NULL) && (t->expires < now) )
keir@18782 474 {
keir@18782 475 ts->list = t->list_next;
keir@18782 476 execute_timer(ts, t);
keir@18782 477 }
keir@18782 478
keir@18782 479 /* Try to move timers from linked list to more efficient heap. */
keir@18419 480 next = ts->list;
keir@18419 481 ts->list = NULL;
keir@18419 482 while ( unlikely((t = next) != NULL) )
keir@18419 483 {
keir@18419 484 next = t->list_next;
keir@21554 485 t->status = TIMER_STATUS_invalid;
keir@21554 486 add_entry(t);
keir@18419 487 }
keir@18709 488
keir@22067 489 /* Find earliest deadline from head of linked list and top of heap. */
keir@22067 490 deadline = STIME_MAX;
keir@22067 491 if ( GET_HEAP_SIZE(heap) != 0 )
keir@22067 492 deadline = heap[1]->expires;
keir@22067 493 if ( (ts->list != NULL) && (ts->list->expires < deadline) )
keir@22067 494 deadline = ts->list->expires;
keir@22067 495 this_cpu(timer_deadline) =
keir@22067 496 (deadline == STIME_MAX) ? 0 : deadline + timer_slop;
keir@18419 497
keir@22067 498 if ( !reprogram_timer(this_cpu(timer_deadline)) )
keir@18419 499 raise_softirq(TIMER_SOFTIRQ);
kaf24@8617 500
kaf24@11016 501 spin_unlock_irq(&ts->lock);
kaf24@8617 502 }
kaf24@8617 503
keir@19247 504 s_time_t align_timer(s_time_t firsttick, uint64_t period)
keir@19247 505 {
keir@19247 506 if ( !period )
keir@19247 507 return firsttick;
keir@19247 508
keir@19247 509 return firsttick + (period - 1) - ((firsttick - 1) % period);
keir@19247 510 }
kfraser@10603 511
keir@22066 512 static void dump_timer(struct timer *t, s_time_t now)
keir@22066 513 {
keir@22090 514 printk(" ex=%8"PRId64"us timer=%p cb=%p(%p)",
keir@22066 515 (t->expires - now) / 1000, t, t->function, t->data);
keir@22066 516 print_symbol(" %s\n", (unsigned long)t->function);
keir@22066 517 }
keir@22066 518
kaf24@8617 519 static void dump_timerq(unsigned char key)
kaf24@8617 520 {
kaf24@11016 521 struct timer *t;
kaf24@11016 522 struct timers *ts;
shand@11173 523 unsigned long flags;
kaf24@11016 524 s_time_t now = NOW();
kaf24@11016 525 int i, j;
kaf24@8617 526
keir@22066 527 printk("Dumping timer queues:\n");
kaf24@8617 528
kaf24@8617 529 for_each_online_cpu( i )
kaf24@8617 530 {
kaf24@11016 531 ts = &per_cpu(timers, i);
kaf24@11016 532
keir@22066 533 printk("CPU%02d:\n", i);
kaf24@11016 534 spin_lock_irqsave(&ts->lock, flags);
kaf24@11016 535 for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
keir@22066 536 dump_timer(ts->heap[j], now);
keir@18419 537 for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ )
keir@22066 538 dump_timer(t, now);
kaf24@11016 539 spin_unlock_irqrestore(&ts->lock, flags);
kaf24@8617 540 }
kaf24@8617 541 }
kaf24@8617 542
keir@20048 543 static struct keyhandler dump_timerq_keyhandler = {
keir@20048 544 .diagnostic = 1,
keir@20048 545 .u.fn = dump_timerq,
keir@20048 546 .desc = "dump timer queues"
keir@20048 547 };
kaf24@8617 548
keir@22731 549 static void migrate_timers_from_cpu(unsigned int old_cpu)
keir@21470 550 {
keir@22731 551 unsigned int new_cpu = first_cpu(cpu_online_map);
keir@22731 552 struct timers *old_ts, *new_ts;
keir@21470 553 struct timer *t;
keir@21555 554 bool_t notify = 0;
keir@21470 555
keir@22731 556 ASSERT(!cpu_online(old_cpu) && cpu_online(new_cpu));
keir@21470 557
keir@22731 558 old_ts = &per_cpu(timers, old_cpu);
keir@22731 559 new_ts = &per_cpu(timers, new_cpu);
keir@21470 560
keir@22731 561 if ( old_cpu < new_cpu )
keir@22731 562 {
keir@22731 563 spin_lock_irq(&old_ts->lock);
keir@22731 564 spin_lock(&new_ts->lock);
keir@22731 565 }
keir@22731 566 else
keir@22731 567 {
keir@22731 568 spin_lock_irq(&new_ts->lock);
keir@22731 569 spin_lock(&old_ts->lock);
keir@22731 570 }
keir@21470 571
keir@22731 572 while ( (t = GET_HEAP_SIZE(old_ts->heap)
keir@22731 573 ? old_ts->heap[1] : old_ts->list) != NULL )
keir@21470 574 {
keir@21554 575 remove_entry(t);
keir@22731 576 atomic_write16(&t->cpu, new_cpu);
keir@21555 577 notify |= add_entry(t);
keir@21554 578 }
keir@21554 579
keir@22731 580 while ( !list_empty(&old_ts->inactive) )
keir@21554 581 {
keir@22731 582 t = list_entry(old_ts->inactive.next, struct timer, inactive);
keir@21554 583 list_del(&t->inactive);
keir@22731 584 atomic_write16(&t->cpu, new_cpu);
keir@22731 585 list_add(&t->inactive, &new_ts->inactive);
keir@21470 586 }
keir@21470 587
keir@22731 588 spin_unlock(&old_ts->lock);
keir@22731 589 spin_unlock_irq(&new_ts->lock);
keir@22731 590 local_irq_enable();
keir@21554 591
keir@21555 592 if ( notify )
keir@22731 593 cpu_raise_softirq(new_cpu, TIMER_SOFTIRQ);
keir@21470 594 }
keir@21470 595
keir@21436 596 static struct timer *dummy_heap;
keir@21436 597
keir@21436 598 static int cpu_callback(
keir@21436 599 struct notifier_block *nfb, unsigned long action, void *hcpu)
keir@21436 600 {
keir@21436 601 unsigned int cpu = (unsigned long)hcpu;
keir@21460 602 struct timers *ts = &per_cpu(timers, cpu);
keir@21436 603
keir@21460 604 switch ( action )
keir@21436 605 {
keir@21460 606 case CPU_UP_PREPARE:
keir@21554 607 INIT_LIST_HEAD(&ts->inactive);
keir@21460 608 spin_lock_init(&ts->lock);
keir@21460 609 ts->heap = &dummy_heap;
keir@21460 610 break;
keir@21460 611 case CPU_UP_CANCELED:
keir@21460 612 case CPU_DEAD:
keir@22728 613 migrate_timers_from_cpu(cpu);
keir@21460 614 break;
keir@21460 615 default:
keir@21460 616 break;
keir@21436 617 }
keir@21436 618
keir@21436 619 return NOTIFY_DONE;
keir@21436 620 }
keir@21436 621
keir@21436 622 static struct notifier_block cpu_nfb = {
keir@21460 623 .notifier_call = cpu_callback,
keir@21460 624 .priority = 99
keir@21436 625 };
keir@21436 626
kaf24@8617 627 void __init timer_init(void)
kaf24@8617 628 {
keir@21436 629 void *cpu = (void *)(long)smp_processor_id();
kaf24@8617 630
kaf24@8617 631 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
kaf24@8617 632
kaf24@8617 633 /*
kaf24@8617 634 * All CPUs initially share an empty dummy heap. Only those CPUs that
kaf24@8617 635 * are brought online will be dynamically allocated their own heap.
kaf24@8617 636 */
kaf24@8617 637 SET_HEAP_SIZE(&dummy_heap, 0);
kaf24@8617 638 SET_HEAP_LIMIT(&dummy_heap, 0);
kaf24@8617 639
keir@21436 640 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
keir@21436 641 register_cpu_notifier(&cpu_nfb);
kaf24@8617 642
keir@20048 643 register_keyhandler('a', &dump_timerq_keyhandler);
kaf24@8617 644 }
kaf24@8617 645
kaf24@8617 646 /*
kaf24@8617 647 * Local variables:
kaf24@8617 648 * mode: C
kaf24@8617 649 * c-set-style: "BSD"
kaf24@8617 650 * c-basic-offset: 4
kaf24@8617 651 * tab-width: 4
kaf24@8617 652 * indent-tabs-mode: nil
kaf24@8617 653 * End:
kaf24@8617 654 */