debuggers.hg

changeset 22731:1a64415c959f

timer: Don't hardcode cpu0 in migrate_timers_from_cpu().

Although we don't allow cpu0 to be offlined, there's no need to
hardcode that assumption in the timer subsystem.

Signed-off-by: Keir Fraser <keir@xen.org>
author Keir Fraser <keir@xen.org>
date Sat Jan 08 10:43:01 2011 +0000 (2011-01-08)
parents 0e49e2590462
children aec06605e125
files xen/common/timer.c
line diff
     1.1 --- a/xen/common/timer.c	Sat Jan 08 10:09:44 2011 +0000
     1.2 +++ b/xen/common/timer.c	Sat Jan 08 10:43:01 2011 +0000
     1.3 @@ -546,39 +546,51 @@ static struct keyhandler dump_timerq_key
     1.4      .desc = "dump timer queues"
     1.5  };
     1.6  
     1.7 -static void migrate_timers_from_cpu(unsigned int cpu)
     1.8 +static void migrate_timers_from_cpu(unsigned int old_cpu)
     1.9  {
    1.10 -    struct timers *ts;
    1.11 +    unsigned int new_cpu = first_cpu(cpu_online_map);
    1.12 +    struct timers *old_ts, *new_ts;
    1.13      struct timer *t;
    1.14      bool_t notify = 0;
    1.15  
    1.16 -    ASSERT((cpu != 0) && cpu_online(0));
    1.17 +    ASSERT(!cpu_online(old_cpu) && cpu_online(new_cpu));
    1.18  
    1.19 -    ts = &per_cpu(timers, cpu);
    1.20 +    old_ts = &per_cpu(timers, old_cpu);
    1.21 +    new_ts = &per_cpu(timers, new_cpu);
    1.22  
    1.23 -    spin_lock_irq(&per_cpu(timers, 0).lock);
    1.24 -    spin_lock(&ts->lock);
    1.25 +    if ( old_cpu < new_cpu )
    1.26 +    {
    1.27 +        spin_lock_irq(&old_ts->lock);
    1.28 +        spin_lock(&new_ts->lock);
    1.29 +    }
    1.30 +    else
    1.31 +    {
    1.32 +        spin_lock_irq(&new_ts->lock);
    1.33 +        spin_lock(&old_ts->lock);
    1.34 +    }
    1.35  
    1.36 -    while ( (t = GET_HEAP_SIZE(ts->heap) ? ts->heap[1] : ts->list) != NULL )
    1.37 +    while ( (t = GET_HEAP_SIZE(old_ts->heap)
    1.38 +             ? old_ts->heap[1] : old_ts->list) != NULL )
    1.39      {
    1.40          remove_entry(t);
    1.41 -        atomic_write16(&t->cpu, 0);
    1.42 +        atomic_write16(&t->cpu, new_cpu);
    1.43          notify |= add_entry(t);
    1.44      }
    1.45  
    1.46 -    while ( !list_empty(&ts->inactive) )
    1.47 +    while ( !list_empty(&old_ts->inactive) )
    1.48      {
    1.49 -        t = list_entry(ts->inactive.next, struct timer, inactive);
    1.50 +        t = list_entry(old_ts->inactive.next, struct timer, inactive);
    1.51          list_del(&t->inactive);
    1.52 -        atomic_write16(&t->cpu, 0);
    1.53 -        list_add(&t->inactive, &per_cpu(timers, 0).inactive);
    1.54 +        atomic_write16(&t->cpu, new_cpu);
    1.55 +        list_add(&t->inactive, &new_ts->inactive);
    1.56      }
    1.57  
    1.58 -    spin_unlock(&ts->lock);
    1.59 -    spin_unlock_irq(&per_cpu(timers, 0).lock);
    1.60 +    spin_unlock(&old_ts->lock);
    1.61 +    spin_unlock_irq(&new_ts->lock);
    1.62 +    local_irq_enable();
    1.63  
    1.64      if ( notify )
    1.65 -        cpu_raise_softirq(0, TIMER_SOFTIRQ);
    1.66 +        cpu_raise_softirq(new_cpu, TIMER_SOFTIRQ);
    1.67  }
    1.68  
    1.69  static struct timer *dummy_heap;