debuggers.hg
changeset 22067:28546f5ec0eb
timers: Simplify implementation logic.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed Aug 18 14:56:01 2010 +0100 (2010-08-18) |
parents | d20cbccb6fea |
children | 49e17551ef90 |
files | xen/arch/x86/acpi/cpu_idle.c xen/arch/x86/acpi/cpuidle_menu.c xen/arch/x86/hpet.c xen/arch/x86/time.c xen/common/timer.c xen/include/xen/timer.h |
line diff
1.1 --- a/xen/arch/x86/acpi/cpu_idle.c Wed Aug 18 14:22:48 2010 +0100 1.2 +++ b/xen/arch/x86/acpi/cpu_idle.c Wed Aug 18 14:56:01 2010 +0100 1.3 @@ -252,7 +252,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mas 1.4 static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 1.5 { 1.6 unsigned int cpu = smp_processor_id(); 1.7 - s_time_t expires = per_cpu(timer_deadline_start, cpu); 1.8 + s_time_t expires = per_cpu(timer_deadline, cpu); 1.9 1.10 __monitor((void *)&mwait_wakeup(cpu), 0, 0); 1.11 smp_mb();
2.1 --- a/xen/arch/x86/acpi/cpuidle_menu.c Wed Aug 18 14:22:48 2010 +0100 2.2 +++ b/xen/arch/x86/acpi/cpuidle_menu.c Wed Aug 18 14:56:01 2010 +0100 2.3 @@ -173,7 +173,7 @@ static inline s_time_t avg_intr_interval 2.4 2.5 static unsigned int get_sleep_length_us(void) 2.6 { 2.7 - s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000; 2.8 + s_time_t us = (this_cpu(timer_deadline) - NOW()) / 1000; 2.9 /* 2.10 * while us < 0 or us > (u32)-1, return a large u32, 2.11 * choose (unsigned int)-2000 to avoid wrapping while added with exit
3.1 --- a/xen/arch/x86/hpet.c Wed Aug 18 14:22:48 2010 +0100 3.2 +++ b/xen/arch/x86/hpet.c Wed Aug 18 14:56:01 2010 +0100 3.3 @@ -36,14 +36,14 @@ struct hpet_event_channel 3.4 cpumask_t cpumask; 3.5 /* 3.6 * cpumask_lock is used to prevent hpet intr handler from accessing other 3.7 - * cpu's timer_deadline_start/end after the other cpu's mask was cleared -- 3.8 - * mask cleared means cpu waken up, then accessing timer_deadline_xxx from 3.9 + * cpu's timer_deadline after the other cpu's mask was cleared -- 3.10 + * mask cleared means cpu waken up, then accessing timer_deadline from 3.11 * other cpu is not safe. 3.12 * It is not used for protecting cpumask, so set ops needn't take it. 3.13 * Multiple cpus clear cpumask simultaneously is ok due to the atomic 3.14 * feature of cpu_clear, so hpet_broadcast_exit() can take read lock for 3.15 * clearing cpumask, and handle_hpet_broadcast() have to take write lock 3.16 - * for read cpumask & access timer_deadline_xxx. 3.17 + * for read cpumask & access timer_deadline. 3.18 */ 3.19 rwlock_t cpumask_lock; 3.20 spinlock_t lock; 3.21 @@ -212,10 +212,10 @@ again: 3.22 3.23 if ( cpu_isset(cpu, ch->cpumask) ) 3.24 { 3.25 - if ( per_cpu(timer_deadline_start, cpu) <= now ) 3.26 + if ( per_cpu(timer_deadline, cpu) <= now ) 3.27 cpu_set(cpu, mask); 3.28 - else if ( per_cpu(timer_deadline_end, cpu) < next_event ) 3.29 - next_event = per_cpu(timer_deadline_end, cpu); 3.30 + else if ( per_cpu(timer_deadline, cpu) < next_event ) 3.31 + next_event = per_cpu(timer_deadline, cpu); 3.32 } 3.33 3.34 write_unlock_irq(&ch->cpumask_lock); 3.35 @@ -661,7 +661,7 @@ void hpet_broadcast_enter(void) 3.36 int cpu = smp_processor_id(); 3.37 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu); 3.38 3.39 - if ( this_cpu(timer_deadline_start) == 0 ) 3.40 + if ( this_cpu(timer_deadline) == 0 ) 3.41 return; 3.42 3.43 if ( !ch ) 3.44 @@ -682,8 +682,8 @@ void hpet_broadcast_enter(void) 3.45 3.46 spin_lock(&ch->lock); 3.47 /* reprogram if current cpu expire time is nearer */ 3.48 - if ( this_cpu(timer_deadline_end) < ch->next_event ) 3.49 - reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1); 3.50 + if ( this_cpu(timer_deadline) < ch->next_event ) 3.51 + reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1); 3.52 spin_unlock(&ch->lock); 3.53 } 3.54 3.55 @@ -692,7 +692,7 @@ void hpet_broadcast_exit(void) 3.56 int cpu = smp_processor_id(); 3.57 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu); 3.58 3.59 - if ( this_cpu(timer_deadline_start) == 0 ) 3.60 + if ( this_cpu(timer_deadline) == 0 ) 3.61 return; 3.62 3.63 if ( !ch ) 3.64 @@ -700,7 +700,7 @@ void hpet_broadcast_exit(void) 3.65 3.66 /* Reprogram the deadline; trigger timer work now if it has passed. */ 3.67 enable_APIC_timer(); 3.68 - if ( !reprogram_timer(this_cpu(timer_deadline_start)) ) 3.69 + if ( !reprogram_timer(this_cpu(timer_deadline)) ) 3.70 raise_softirq(TIMER_SOFTIRQ); 3.71 3.72 read_lock_irq(&ch->cpumask_lock);
4.1 --- a/xen/arch/x86/time.c Wed Aug 18 14:22:48 2010 +0100 4.2 +++ b/xen/arch/x86/time.c Wed Aug 18 14:56:01 2010 +0100 4.3 @@ -1488,7 +1488,7 @@ void pit_broadcast_exit(void) 4.4 int cpu = smp_processor_id(); 4.5 4.6 if ( cpu_test_and_clear(cpu, pit_broadcast_mask) ) 4.7 - reprogram_timer(per_cpu(timer_deadline_start, cpu)); 4.8 + reprogram_timer(this_cpu(timer_deadline)); 4.9 } 4.10 4.11 int pit_broadcast_is_available(void)
5.1 --- a/xen/common/timer.c Wed Aug 18 14:22:48 2010 +0100 5.2 +++ b/xen/common/timer.c Wed Aug 18 14:56:01 2010 +0100 5.3 @@ -23,16 +23,12 @@ 5.4 #include <asm/system.h> 5.5 #include <asm/desc.h> 5.6 5.7 -/* 5.8 - * We pull handlers off the timer list this far in future, 5.9 - * rather than reprogramming the time hardware. 5.10 - */ 5.11 +/* We program the time hardware this far behind the closest deadline. */ 5.12 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */ 5.13 integer_param("timer_slop", timer_slop); 5.14 5.15 struct timers { 5.16 spinlock_t lock; 5.17 - bool_t overflow; 5.18 struct timer **heap; 5.19 struct timer *list; 5.20 struct timer *running; 5.21 @@ -43,8 +39,7 @@ static DEFINE_PER_CPU(struct timers, tim 5.22 5.23 static cpumask_t timer_valid_cpumask; 5.24 5.25 -DEFINE_PER_CPU(s_time_t, timer_deadline_start); 5.26 -DEFINE_PER_CPU(s_time_t, timer_deadline_end); 5.27 +DEFINE_PER_CPU(s_time_t, timer_deadline); 5.28 5.29 /**************************************************************************** 5.30 * HEAP OPERATIONS. 5.31 @@ -210,7 +205,6 @@ static int add_entry(struct timer *t) 5.32 return rc; 5.33 5.34 /* Fall back to adding to the slower linked list. */ 5.35 - timers->overflow = 1; 5.36 t->status = TIMER_STATUS_in_list; 5.37 return add_to_list(&timers->list, t); 5.38 } 5.39 @@ -311,7 +305,6 @@ void set_timer(struct timer *timer, s_ti 5.40 deactivate_timer(timer); 5.41 5.42 timer->expires = expires; 5.43 - timer->expires_end = expires + timer_slop; 5.44 5.45 activate_timer(timer); 5.46 5.47 @@ -427,13 +420,13 @@ static void timer_softirq_action(void) 5.48 { 5.49 struct timer *t, **heap, *next; 5.50 struct timers *ts; 5.51 - s_time_t now; 5.52 + s_time_t now, deadline; 5.53 5.54 ts = &this_cpu(timers); 5.55 heap = ts->heap; 5.56 5.57 /* If we overflowed the heap, try to allocate a larger heap. */ 5.58 - if ( unlikely(ts->overflow) ) 5.59 + if ( unlikely(ts->list != NULL) ) 5.60 { 5.61 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */ 5.62 int old_limit = GET_HEAP_LIMIT(heap); 5.63 @@ -481,46 +474,16 @@ static void timer_softirq_action(void) 5.64 add_entry(t); 5.65 } 5.66 5.67 - ts->overflow = (ts->list != NULL); 5.68 - if ( unlikely(ts->overflow) ) 5.69 - { 5.70 - /* Find earliest deadline at head of list or top of heap. */ 5.71 - this_cpu(timer_deadline_start) = ts->list->expires; 5.72 - if ( (GET_HEAP_SIZE(heap) != 0) && 5.73 - ((t = heap[1])->expires < this_cpu(timer_deadline_start)) ) 5.74 - this_cpu(timer_deadline_start) = t->expires; 5.75 - this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start); 5.76 - } 5.77 - else 5.78 - { 5.79 - /* 5.80 - * Find the earliest deadline that encompasses largest number of timers 5.81 - * on the heap. To do this we take timers from the heap while their 5.82 - * valid deadline ranges continue to intersect. 5.83 - */ 5.84 - s_time_t start = 0, end = STIME_MAX; 5.85 - struct timer **list_tail = &ts->list; 5.86 + /* Find earliest deadline from head of linked list and top of heap. */ 5.87 + deadline = STIME_MAX; 5.88 + if ( GET_HEAP_SIZE(heap) != 0 ) 5.89 + deadline = heap[1]->expires; 5.90 + if ( (ts->list != NULL) && (ts->list->expires < deadline) ) 5.91 + deadline = ts->list->expires; 5.92 + this_cpu(timer_deadline) = 5.93 + (deadline == STIME_MAX) ? 0 : deadline + timer_slop; 5.94 5.95 - while ( (GET_HEAP_SIZE(heap) != 0) && 5.96 - ((t = heap[1])->expires <= end) ) 5.97 - { 5.98 - remove_entry(t); 5.99 - 5.100 - t->status = TIMER_STATUS_in_list; 5.101 - t->list_next = NULL; 5.102 - *list_tail = t; 5.103 - list_tail = &t->list_next; 5.104 - 5.105 - start = t->expires; 5.106 - if ( end > t->expires_end ) 5.107 - end = t->expires_end; 5.108 - } 5.109 - 5.110 - this_cpu(timer_deadline_start) = start; 5.111 - this_cpu(timer_deadline_end) = end; 5.112 - } 5.113 - 5.114 - if ( !reprogram_timer(this_cpu(timer_deadline_start)) ) 5.115 + if ( !reprogram_timer(this_cpu(timer_deadline)) ) 5.116 raise_softirq(TIMER_SOFTIRQ); 5.117 5.118 spin_unlock_irq(&ts->lock);
6.1 --- a/xen/include/xen/timer.h Wed Aug 18 14:22:48 2010 +0100 6.2 +++ b/xen/include/xen/timer.h Wed Aug 18 14:56:01 2010 +0100 6.3 @@ -16,7 +16,6 @@ 6.4 struct timer { 6.5 /* System time expiry value (nanoseconds since boot). */ 6.6 s_time_t expires; 6.7 - s_time_t expires_end; 6.8 6.9 /* Position in active-timer data structure. */ 6.10 union { 6.11 @@ -82,12 +81,8 @@ void kill_timer(struct timer *timer); 6.12 /* Bootstrap initialisation. Must be called before any other timer function. */ 6.13 void timer_init(void); 6.14 6.15 -/* 6.16 - * Next timer deadline for each CPU. 6.17 - * Modified only by the local CPU and never in interrupt context. 6.18 - */ 6.19 -DECLARE_PER_CPU(s_time_t, timer_deadline_start); 6.20 -DECLARE_PER_CPU(s_time_t, timer_deadline_end); 6.21 +/* Next timer deadline for each CPU. */ 6.22 +DECLARE_PER_CPU(s_time_t, timer_deadline); 6.23 6.24 /* Arch-defined function to reprogram timer hardware for new deadline. */ 6.25 int reprogram_timer(s_time_t timeout);