debuggers.hg

changeset 21211:5057604eeefc

Per-cpu tasklet lists.

Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Apr 14 10:44:29 2010 +0100 (2010-04-14)
parents c02cc832cb2d
children 07befd9cf6d3
files xen/arch/x86/smpboot.c xen/common/softirq.c xen/include/xen/softirq.h
line diff
     1.1 --- a/xen/arch/x86/smpboot.c	Tue Apr 13 18:19:33 2010 +0100
     1.2 +++ b/xen/arch/x86/smpboot.c	Wed Apr 14 10:44:29 2010 +0100
     1.3 @@ -1374,6 +1374,7 @@ int cpu_down(unsigned int cpu)
     1.4  
     1.5  	BUG_ON(cpu_online(cpu));
     1.6  
     1.7 +	migrate_tasklets_from_cpu(cpu);
     1.8  	cpu_mcheck_distribute_cmci();
     1.9  
    1.10  out:
     2.1 --- a/xen/common/softirq.c	Tue Apr 13 18:19:33 2010 +0100
     2.2 +++ b/xen/common/softirq.c	Wed Apr 14 10:44:29 2010 +0100
     2.3 @@ -78,7 +78,8 @@ void cpumask_raise_softirq(cpumask_t mas
     2.4  
     2.5  void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
     2.6  {
     2.7 -    if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
     2.8 +    if ( !test_and_set_bit(nr, &softirq_pending(cpu))
     2.9 +         && (cpu != smp_processor_id()) )
    2.10          smp_send_event_check_cpu(cpu);
    2.11  }
    2.12  
    2.13 @@ -87,46 +88,54 @@ void raise_softirq(unsigned int nr)
    2.14      set_bit(nr, &softirq_pending(smp_processor_id()));
    2.15  }
    2.16  
    2.17 -static LIST_HEAD(tasklet_list);
    2.18 +static bool_t tasklets_initialised;
    2.19 +static DEFINE_PER_CPU(struct list_head, tasklet_list);
    2.20  static DEFINE_SPINLOCK(tasklet_lock);
    2.21  
    2.22 -void tasklet_schedule(struct tasklet *t)
    2.23 +void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
    2.24  {
    2.25      unsigned long flags;
    2.26  
    2.27      spin_lock_irqsave(&tasklet_lock, flags);
    2.28  
    2.29 -    if ( !t->is_dead )
    2.30 +    if ( tasklets_initialised && !t->is_dead )
    2.31      {
    2.32 -        if ( !t->is_scheduled && !t->is_running )
    2.33 +        t->scheduled_on = cpu;
    2.34 +        if ( !t->is_running )
    2.35          {
    2.36 -            BUG_ON(!list_empty(&t->list));
    2.37 -            list_add_tail(&t->list, &tasklet_list);
    2.38 +            list_del(&t->list);
    2.39 +            list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
    2.40 +            cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
    2.41          }
    2.42 -        t->is_scheduled = 1;
    2.43 -        raise_softirq(TASKLET_SOFTIRQ);
    2.44      }
    2.45  
    2.46      spin_unlock_irqrestore(&tasklet_lock, flags);
    2.47  }
    2.48  
    2.49 +void tasklet_schedule(struct tasklet *t)
    2.50 +{
    2.51 +    tasklet_schedule_on_cpu(t, smp_processor_id());
    2.52 +}
    2.53 +
    2.54  static void tasklet_action(void)
    2.55  {
    2.56 +    unsigned int cpu = smp_processor_id();
    2.57 +    struct list_head *list = &per_cpu(tasklet_list, cpu);
    2.58      struct tasklet *t;
    2.59  
    2.60      spin_lock_irq(&tasklet_lock);
    2.61  
    2.62 -    if ( list_empty(&tasklet_list) )
    2.63 +    if ( list_empty(list) )
    2.64      {
    2.65          spin_unlock_irq(&tasklet_lock);
    2.66          return;
    2.67      }
    2.68  
    2.69 -    t = list_entry(tasklet_list.next, struct tasklet, list);
    2.70 +    t = list_entry(list->next, struct tasklet, list);
    2.71      list_del_init(&t->list);
    2.72  
    2.73 -    BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
    2.74 -    t->is_scheduled = 0;
    2.75 +    BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
    2.76 +    t->scheduled_on = -1;
    2.77      t->is_running = 1;
    2.78  
    2.79      spin_unlock_irq(&tasklet_lock);
    2.80 @@ -135,17 +144,19 @@ static void tasklet_action(void)
    2.81  
    2.82      t->is_running = 0;
    2.83  
    2.84 -    if ( t->is_scheduled )
    2.85 +    if ( t->scheduled_on >= 0 )
    2.86      {
    2.87          BUG_ON(t->is_dead || !list_empty(&t->list));
    2.88 -        list_add_tail(&t->list, &tasklet_list);
    2.89 +        list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on));
    2.90 +        if ( t->scheduled_on != cpu )
    2.91 +            cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
    2.92      }
    2.93  
    2.94      /*
    2.95       * If there is more work to do then reschedule. We don't grab more work
    2.96       * immediately as we want to allow other softirq work to happen first.
    2.97       */
    2.98 -    if ( !list_empty(&tasklet_list) )
    2.99 +    if ( !list_empty(list) )
   2.100          raise_softirq(TASKLET_SOFTIRQ);
   2.101  
   2.102      spin_unlock_irq(&tasklet_lock);
   2.103 @@ -159,10 +170,10 @@ void tasklet_kill(struct tasklet *t)
   2.104  
   2.105      if ( !list_empty(&t->list) )
   2.106      {
   2.107 -        BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
   2.108 +        BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
   2.109          list_del_init(&t->list);
   2.110      }
   2.111 -    t->is_scheduled = 0;
   2.112 +    t->scheduled_on = -1;
   2.113      t->is_dead = 1;
   2.114  
   2.115      while ( t->is_running )
   2.116 @@ -175,18 +186,48 @@ void tasklet_kill(struct tasklet *t)
   2.117      spin_unlock_irqrestore(&tasklet_lock, flags);
   2.118  }
   2.119  
   2.120 +void migrate_tasklets_from_cpu(unsigned int cpu)
   2.121 +{
   2.122 +    struct list_head *list = &per_cpu(tasklet_list, cpu);
   2.123 +    unsigned long flags;
   2.124 +    struct tasklet *t;
   2.125 +
   2.126 +    spin_lock_irqsave(&tasklet_lock, flags);
   2.127 +
   2.128 +    while ( !list_empty(list) )
   2.129 +    {
   2.130 +        t = list_entry(list->next, struct tasklet, list);
   2.131 +        BUG_ON(t->scheduled_on != cpu);
   2.132 +        t->scheduled_on = smp_processor_id();
   2.133 +        list_del(&t->list);
   2.134 +        list_add_tail(&t->list, &this_cpu(tasklet_list));
   2.135 +    }
   2.136 +
   2.137 +    raise_softirq(TASKLET_SOFTIRQ);
   2.138 +
   2.139 +    spin_unlock_irqrestore(&tasklet_lock, flags);
   2.140 +}
   2.141 +
   2.142  void tasklet_init(
   2.143      struct tasklet *t, void (*func)(unsigned long), unsigned long data)
   2.144  {
   2.145      memset(t, 0, sizeof(*t));
   2.146      INIT_LIST_HEAD(&t->list);
   2.147 +    t->scheduled_on = -1;
   2.148      t->func = func;
   2.149      t->data = data;
   2.150  }
   2.151  
   2.152  void __init softirq_init(void)
   2.153  {
   2.154 +    unsigned int cpu;
   2.155 +
   2.156 +    for_each_possible_cpu ( cpu )
   2.157 +        INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
   2.158 +
   2.159      open_softirq(TASKLET_SOFTIRQ, tasklet_action);
   2.160 +
   2.161 +    tasklets_initialised = 1;
   2.162  }
   2.163  
   2.164  /*
     3.1 --- a/xen/include/xen/softirq.h	Tue Apr 13 18:19:33 2010 +0100
     3.2 +++ b/xen/include/xen/softirq.h	Wed Apr 14 10:44:29 2010 +0100
     3.3 @@ -47,7 +47,7 @@ void process_pending_softirqs(void);
     3.4  struct tasklet
     3.5  {
     3.6      struct list_head list;
     3.7 -    bool_t is_scheduled;
     3.8 +    int scheduled_on;
     3.9      bool_t is_running;
    3.10      bool_t is_dead;
    3.11      void (*func)(unsigned long);
    3.12 @@ -55,10 +55,12 @@ struct tasklet
    3.13  };
    3.14  
    3.15  #define DECLARE_TASKLET(name, func, data) \
    3.16 -    struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, func, data }
    3.17 +    struct tasklet name = { LIST_HEAD_INIT(name.list), -1, 0, 0, func, data }
    3.18  
    3.19 +void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu);
    3.20  void tasklet_schedule(struct tasklet *t);
    3.21  void tasklet_kill(struct tasklet *t);
    3.22 +void migrate_tasklets_from_cpu(unsigned int cpu);
    3.23  void tasklet_init(
    3.24      struct tasklet *t, void (*func)(unsigned long), unsigned long data);
    3.25