debuggers.hg
changeset 22728:c7c1ab13d08e
timer: Fix up timer-state teardown on CPU offline / online-failure.
The lock-free access to timer->cpu in timer_lock() is problematic, as
the per-cpu data that is then dereferenced could disappear under our
feet. Now that per-cpu data is freed via RCU, we simply add a RCU
read-side critical section to timer_lock(). It is then also
unnecessary to migrate timers on CPU_DYING (we can defer it to the
nicer CPU_DEAD state) and we can also safely migrate timers on
CPU_UP_CANCELED.
Signed-off-by: Keir Fraser <keir@xen.org>
The lock-free access to timer->cpu in timer_lock() is problematic, as
the per-cpu data that is then dereferenced could disappear under our
feet. Now that per-cpu data is freed via RCU, we simply add a RCU
read-side critical section to timer_lock(). It is then also
unnecessary to migrate timers on CPU_DYING (we can defer it to the
nicer CPU_DEAD state) and we can also safely migrate timers on
CPU_UP_CANCELED.
Signed-off-by: Keir Fraser <keir@xen.org>
author | Keir Fraser <keir@xen.org> |
---|---|
date | Sat Jan 08 09:29:11 2011 +0000 (2011-01-08) |
parents | 2dd233e88838 |
children | 533d6e5c0099 |
files | xen/common/timer.c |
line diff
1.1 --- a/xen/common/timer.c Sat Jan 08 09:14:23 2011 +0000 1.2 +++ b/xen/common/timer.c Sat Jan 08 09:29:11 2011 +0000 1.3 @@ -19,6 +19,7 @@ 1.4 #include <xen/keyhandler.h> 1.5 #include <xen/percpu.h> 1.6 #include <xen/cpu.h> 1.7 +#include <xen/rcupdate.h> 1.8 #include <xen/symbols.h> 1.9 #include <asm/system.h> 1.10 #include <asm/desc.h> 1.11 @@ -37,7 +38,8 @@ struct timers { 1.12 1.13 static DEFINE_PER_CPU(struct timers, timers); 1.14 1.15 -static cpumask_t timer_valid_cpumask; 1.16 +/* Protects lock-free access to per-timer cpu field against cpu offlining. */ 1.17 +static DEFINE_RCU_READ_LOCK(timer_cpu_read_lock); 1.18 1.19 DEFINE_PER_CPU(s_time_t, timer_deadline); 1.20 1.21 @@ -232,12 +234,16 @@ static inline bool_t timer_lock(struct t 1.22 { 1.23 unsigned int cpu; 1.24 1.25 + rcu_read_lock(&timer_cpu_read_lock); 1.26 + 1.27 for ( ; ; ) 1.28 { 1.29 cpu = timer->cpu; 1.30 if ( unlikely(timer->status == TIMER_STATUS_killed) ) 1.31 + { 1.32 + rcu_read_unlock(&timer_cpu_read_lock); 1.33 return 0; 1.34 - ASSERT(cpu_isset(cpu, timer_valid_cpumask)); 1.35 + } 1.36 spin_lock(&per_cpu(timers, cpu).lock); 1.37 if ( likely(timer->cpu == cpu) && 1.38 likely(timer->status != TIMER_STATUS_killed) ) 1.39 @@ -245,6 +251,7 @@ static inline bool_t timer_lock(struct t 1.40 spin_unlock(&per_cpu(timers, cpu).lock); 1.41 } 1.42 1.43 + rcu_read_unlock(&timer_cpu_read_lock); 1.44 return 1; 1.45 } 1.46 1.47 @@ -332,14 +339,16 @@ void migrate_timer(struct timer *timer, 1.48 bool_t active; 1.49 unsigned long flags; 1.50 1.51 + rcu_read_lock(&timer_cpu_read_lock); 1.52 + 1.53 for ( ; ; ) 1.54 { 1.55 if ( ((old_cpu = timer->cpu) == new_cpu) || 1.56 unlikely(timer->status == TIMER_STATUS_killed) ) 1.57 + { 1.58 + rcu_read_unlock(&timer_cpu_read_lock); 1.59 return; 1.60 - 1.61 - ASSERT(cpu_isset(old_cpu, timer_valid_cpumask)); 1.62 - ASSERT(cpu_isset(new_cpu, timer_valid_cpumask)); 1.63 + } 1.64 1.65 if ( old_cpu < new_cpu ) 1.66 { 1.67 @@ -360,6 +369,8 @@ void migrate_timer(struct timer *timer, 1.68 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags); 1.69 } 1.70 1.71 + rcu_read_unlock(&timer_cpu_read_lock); 1.72 + 1.73 active = active_timer(timer); 1.74 if ( active ) 1.75 deactivate_timer(timer); 1.76 @@ -534,19 +545,17 @@ static struct keyhandler dump_timerq_key 1.77 .desc = "dump timer queues" 1.78 }; 1.79 1.80 -static unsigned int migrate_timers_from_cpu(unsigned int cpu) 1.81 +static void migrate_timers_from_cpu(unsigned int cpu) 1.82 { 1.83 struct timers *ts; 1.84 struct timer *t; 1.85 bool_t notify = 0; 1.86 - unsigned int nr_migrated = 0; 1.87 - unsigned long flags; 1.88 1.89 ASSERT((cpu != 0) && cpu_online(0)); 1.90 1.91 ts = &per_cpu(timers, cpu); 1.92 1.93 - spin_lock_irqsave(&per_cpu(timers, 0).lock, flags); 1.94 + spin_lock_irq(&per_cpu(timers, 0).lock); 1.95 spin_lock(&ts->lock); 1.96 1.97 while ( (t = GET_HEAP_SIZE(ts->heap) ? ts->heap[1] : ts->list) != NULL ) 1.98 @@ -554,7 +563,6 @@ static unsigned int migrate_timers_from_ 1.99 remove_entry(t); 1.100 t->cpu = 0; 1.101 notify |= add_entry(t); 1.102 - nr_migrated++; 1.103 } 1.104 1.105 while ( !list_empty(&ts->inactive) ) 1.106 @@ -563,16 +571,13 @@ static unsigned int migrate_timers_from_ 1.107 list_del(&t->inactive); 1.108 t->cpu = 0; 1.109 list_add(&t->inactive, &per_cpu(timers, 0).inactive); 1.110 - nr_migrated++; 1.111 } 1.112 1.113 spin_unlock(&ts->lock); 1.114 - spin_unlock_irqrestore(&per_cpu(timers, 0).lock, flags); 1.115 + spin_unlock_irq(&per_cpu(timers, 0).lock); 1.116 1.117 if ( notify ) 1.118 cpu_raise_softirq(0, TIMER_SOFTIRQ); 1.119 - 1.120 - return nr_migrated; 1.121 } 1.122 1.123 static struct timer *dummy_heap; 1.124 @@ -589,17 +594,10 @@ static int cpu_callback( 1.125 INIT_LIST_HEAD(&ts->inactive); 1.126 spin_lock_init(&ts->lock); 1.127 ts->heap = &dummy_heap; 1.128 - cpu_set(cpu, timer_valid_cpumask); 1.129 - break; 1.130 - case CPU_DYING: 1.131 - cpu_clear(cpu, timer_valid_cpumask); 1.132 - migrate_timers_from_cpu(cpu); 1.133 break; 1.134 case CPU_UP_CANCELED: 1.135 case CPU_DEAD: 1.136 - cpu_clear(cpu, timer_valid_cpumask); 1.137 - if ( migrate_timers_from_cpu(cpu) ) 1.138 - BUG(); 1.139 + migrate_timers_from_cpu(cpu); 1.140 break; 1.141 default: 1.142 break;