debuggers.hg

changeset 20657:1396dfb8d6ba

x86: Allow HPET to set timers more sloppily by seeing each CPU's
acceptable deadline range, rather than just deadline start.

Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 11 08:50:13 2009 +0000 (2009-12-11)
parents b2ccd48f2f9b
children 1f5f36e11114
files xen/arch/x86/acpi/cpuidle_menu.c xen/arch/x86/hpet.c xen/arch/x86/time.c xen/common/timer.c xen/include/xen/timer.h
line diff
     1.1 --- a/xen/arch/x86/acpi/cpuidle_menu.c	Fri Dec 11 08:47:51 2009 +0000
     1.2 +++ b/xen/arch/x86/acpi/cpuidle_menu.c	Fri Dec 11 08:50:13 2009 +0000
     1.3 @@ -49,7 +49,7 @@ static DEFINE_PER_CPU(struct menu_device
     1.4  
     1.5  static unsigned int get_sleep_length_us(void)
     1.6  {
     1.7 -    s_time_t us = (per_cpu(timer_deadline, smp_processor_id()) - NOW()) / 1000;
     1.8 +    s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000;
     1.9      /*
    1.10       * while us < 0 or us > (u32)-1, return a large u32,
    1.11       * choose (unsigned int)-2000 to avoid wrapping while added with exit
     2.1 --- a/xen/arch/x86/hpet.c	Fri Dec 11 08:47:51 2009 +0000
     2.2 +++ b/xen/arch/x86/hpet.c	Fri Dec 11 08:50:13 2009 +0000
     2.3 @@ -190,10 +190,10 @@ again:
     2.4      /* find all expired events */
     2.5      for_each_cpu_mask(cpu, ch->cpumask)
     2.6      {
     2.7 -        if ( per_cpu(timer_deadline, cpu) <= now )
     2.8 +        if ( per_cpu(timer_deadline_start, cpu) <= now )
     2.9              cpu_set(cpu, mask);
    2.10 -        else if ( per_cpu(timer_deadline, cpu) < next_event )
    2.11 -            next_event = per_cpu(timer_deadline, cpu);
    2.12 +        else if ( per_cpu(timer_deadline_end, cpu) < next_event )
    2.13 +            next_event = per_cpu(timer_deadline_end, cpu);
    2.14      }
    2.15  
    2.16      /* wakeup the cpus which have an expired event. */
    2.17 @@ -629,7 +629,7 @@ void hpet_broadcast_enter(void)
    2.18      int cpu = smp_processor_id();
    2.19      struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
    2.20  
    2.21 -    if ( this_cpu(timer_deadline) == 0 )
    2.22 +    if ( this_cpu(timer_deadline_start) == 0 )
    2.23          return;
    2.24  
    2.25      if ( !ch )
    2.26 @@ -649,8 +649,8 @@ void hpet_broadcast_enter(void)
    2.27      cpu_set(cpu, ch->cpumask);
    2.28  
    2.29      /* reprogram if current cpu expire time is nearer */
    2.30 -    if ( this_cpu(timer_deadline) < ch->next_event )
    2.31 -        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
    2.32 +    if ( this_cpu(timer_deadline_end) < ch->next_event )
    2.33 +        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
    2.34  
    2.35      spin_unlock(&ch->lock);
    2.36  }
    2.37 @@ -660,7 +660,7 @@ void hpet_broadcast_exit(void)
    2.38      int cpu = smp_processor_id();
    2.39      struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
    2.40  
    2.41 -    if ( this_cpu(timer_deadline) == 0 )
    2.42 +    if ( this_cpu(timer_deadline_start) == 0 )
    2.43          return;
    2.44  
    2.45      BUG_ON( !ch );
    2.46 @@ -671,7 +671,7 @@ void hpet_broadcast_exit(void)
    2.47      {
    2.48          /* Reprogram the deadline; trigger timer work now if it has passed. */
    2.49          enable_APIC_timer();
    2.50 -        if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
    2.51 +        if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
    2.52              raise_softirq(TIMER_SOFTIRQ);
    2.53  
    2.54          if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
     3.1 --- a/xen/arch/x86/time.c	Fri Dec 11 08:47:51 2009 +0000
     3.2 +++ b/xen/arch/x86/time.c	Fri Dec 11 08:50:13 2009 +0000
     3.3 @@ -1367,7 +1367,7 @@ void pit_broadcast_exit(void)
     3.4      int cpu = smp_processor_id();
     3.5  
     3.6      if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
     3.7 -        reprogram_timer(per_cpu(timer_deadline, cpu));
     3.8 +        reprogram_timer(per_cpu(timer_deadline_start, cpu));
     3.9  }
    3.10  
    3.11  int pit_broadcast_is_available(void)
     4.1 --- a/xen/common/timer.c	Fri Dec 11 08:47:51 2009 +0000
     4.2 +++ b/xen/common/timer.c	Fri Dec 11 08:50:13 2009 +0000
     4.3 @@ -38,7 +38,8 @@ struct timers {
     4.4  
     4.5  static DEFINE_PER_CPU(struct timers, timers);
     4.6  
     4.7 -DEFINE_PER_CPU(s_time_t, timer_deadline);
     4.8 +DEFINE_PER_CPU(s_time_t, timer_deadline_start);
     4.9 +DEFINE_PER_CPU(s_time_t, timer_deadline_end);
    4.10  
    4.11  /****************************************************************************
    4.12   * HEAP OPERATIONS.
    4.13 @@ -425,10 +426,11 @@ static void timer_softirq_action(void)
    4.14      if ( unlikely(ts->overflow) )
    4.15      {
    4.16          /* Find earliest deadline at head of list or top of heap. */
    4.17 -        this_cpu(timer_deadline) = ts->list->expires;
    4.18 +        this_cpu(timer_deadline_start) = ts->list->expires;
    4.19          if ( (GET_HEAP_SIZE(heap) != 0) &&
    4.20 -             ((t = heap[1])->expires < this_cpu(timer_deadline)) )
    4.21 -            this_cpu(timer_deadline) = t->expires;
    4.22 +             ((t = heap[1])->expires < this_cpu(timer_deadline_start)) )
    4.23 +            this_cpu(timer_deadline_start) = t->expires;
    4.24 +        this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start);
    4.25      }
    4.26      else
    4.27      {
    4.28 @@ -455,10 +457,11 @@ static void timer_softirq_action(void)
    4.29                  end = t->expires_end;
    4.30          }
    4.31  
    4.32 -        this_cpu(timer_deadline) = start;
    4.33 +        this_cpu(timer_deadline_start) = start;
    4.34 +        this_cpu(timer_deadline_end) = end;
    4.35      }
    4.36  
    4.37 -    if ( !reprogram_timer(this_cpu(timer_deadline)) )
    4.38 +    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
    4.39          raise_softirq(TIMER_SOFTIRQ);
    4.40  
    4.41      spin_unlock_irq(&ts->lock);
     5.1 --- a/xen/include/xen/timer.h	Fri Dec 11 08:47:51 2009 +0000
     5.2 +++ b/xen/include/xen/timer.h	Fri Dec 11 08:50:13 2009 +0000
     5.3 @@ -117,7 +117,8 @@ extern void timer_init(void);
     5.4   * Next timer deadline for each CPU.
     5.5   * Modified only by the local CPU and never in interrupt context.
     5.6   */
     5.7 -DECLARE_PER_CPU(s_time_t, timer_deadline);
     5.8 +DECLARE_PER_CPU(s_time_t, timer_deadline_start);
     5.9 +DECLARE_PER_CPU(s_time_t, timer_deadline_end);
    5.10  
    5.11  /* Arch-defined function to reprogram timer hardware for new deadline. */
    5.12  extern int reprogram_timer(s_time_t timeout);