debuggers.hg

annotate xen/common/tasklet.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents 0ab058a55c24
children
rev   line source
keir@21242 1 /******************************************************************************
keir@21242 2 * tasklet.c
keir@21242 3 *
keir@21243 4 * Tasklets are dynamically-allocatable tasks run in VCPU context
keir@21243 5 * (specifically, the idle VCPU's context) on at most one CPU at a time.
keir@21242 6 *
keir@21242 7 * Copyright (c) 2010, Citrix Systems, Inc.
keir@21242 8 * Copyright (c) 1992, Linus Torvalds
keir@21242 9 *
keir@21242 10 * Authors:
keir@22721 11 * Keir Fraser <keir@xen.org>
keir@21242 12 */
keir@21242 13
keir@21242 14 #include <xen/config.h>
keir@21242 15 #include <xen/init.h>
keir@21242 16 #include <xen/sched.h>
keir@21242 17 #include <xen/softirq.h>
keir@21242 18 #include <xen/tasklet.h>
keir@21429 19 #include <xen/cpu.h>
keir@21242 20
keir@21243 21 /* Some subsystems call into us before we are initialised. We ignore them. */
keir@21436 22 static bool_t tasklets_initialised;
keir@21243 23
keir@21390 24 DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
keir@21390 25
keir@21242 26 static DEFINE_PER_CPU(struct list_head, tasklet_list);
keir@21243 27
keir@21243 28 /* Protects all lists and tasklet structures. */
keir@21242 29 static DEFINE_SPINLOCK(tasklet_lock);
keir@21242 30
keir@21390 31 static void tasklet_enqueue(struct tasklet *t)
keir@21390 32 {
keir@21390 33 unsigned int cpu = t->scheduled_on;
keir@21390 34 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
keir@21390 35
keir@21390 36 list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
keir@21390 37 if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
keir@21390 38 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
keir@21390 39 }
keir@21390 40
keir@21242 41 void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
keir@21242 42 {
keir@21242 43 unsigned long flags;
keir@21242 44
keir@21242 45 spin_lock_irqsave(&tasklet_lock, flags);
keir@21242 46
keir@21436 47 if ( tasklets_initialised && !t->is_dead )
keir@21242 48 {
keir@21242 49 t->scheduled_on = cpu;
keir@21242 50 if ( !t->is_running )
keir@21242 51 {
keir@21242 52 list_del(&t->list);
keir@21390 53 tasklet_enqueue(t);
keir@21242 54 }
keir@21242 55 }
keir@21242 56
keir@21242 57 spin_unlock_irqrestore(&tasklet_lock, flags);
keir@21242 58 }
keir@21242 59
keir@21242 60 void tasklet_schedule(struct tasklet *t)
keir@21242 61 {
keir@21242 62 tasklet_schedule_on_cpu(t, smp_processor_id());
keir@21242 63 }
keir@21242 64
keir@21243 65 void do_tasklet(void)
keir@21242 66 {
keir@21242 67 unsigned int cpu = smp_processor_id();
keir@21390 68 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
keir@21242 69 struct list_head *list = &per_cpu(tasklet_list, cpu);
keir@21242 70 struct tasklet *t;
keir@21242 71
keir@21390 72 /*
keir@21390 73 * Work must be enqueued *and* scheduled. Otherwise there is no work to
keir@21390 74 * do, and/or scheduler needs to run to update idle vcpu priority.
keir@21390 75 */
keir@21390 76 if ( likely(*work_to_do != (TASKLET_enqueued|TASKLET_scheduled)) )
keir@21254 77 return;
keir@21254 78
keir@21242 79 spin_lock_irq(&tasklet_lock);
keir@21242 80
keir@21434 81 if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
keir@21390 82 goto out;
keir@21242 83
keir@21242 84 t = list_entry(list->next, struct tasklet, list);
keir@21242 85 list_del_init(&t->list);
keir@21242 86
keir@21242 87 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
keir@21242 88 t->scheduled_on = -1;
keir@21242 89 t->is_running = 1;
keir@21242 90
keir@21242 91 spin_unlock_irq(&tasklet_lock);
keir@21250 92 sync_local_execstate();
keir@21242 93 t->func(t->data);
keir@21242 94 spin_lock_irq(&tasklet_lock);
keir@21242 95
keir@21242 96 t->is_running = 0;
keir@21242 97
keir@21242 98 if ( t->scheduled_on >= 0 )
keir@21242 99 {
keir@21242 100 BUG_ON(t->is_dead || !list_empty(&t->list));
keir@21390 101 tasklet_enqueue(t);
keir@21242 102 }
keir@21242 103
keir@21390 104 out:
keir@21390 105 if ( list_empty(list) )
keir@21390 106 {
keir@21390 107 clear_bit(_TASKLET_enqueued, work_to_do);
keir@21390 108 raise_softirq(SCHEDULE_SOFTIRQ);
keir@21390 109 }
keir@21242 110
keir@21242 111 spin_unlock_irq(&tasklet_lock);
keir@21242 112 }
keir@21242 113
keir@21242 114 void tasklet_kill(struct tasklet *t)
keir@21242 115 {
keir@21242 116 unsigned long flags;
keir@21242 117
keir@21242 118 spin_lock_irqsave(&tasklet_lock, flags);
keir@21242 119
keir@21242 120 if ( !list_empty(&t->list) )
keir@21242 121 {
keir@21242 122 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
keir@21242 123 list_del_init(&t->list);
keir@21242 124 }
keir@21243 125
keir@21242 126 t->scheduled_on = -1;
keir@21242 127 t->is_dead = 1;
keir@21242 128
keir@21242 129 while ( t->is_running )
keir@21242 130 {
keir@21242 131 spin_unlock_irqrestore(&tasklet_lock, flags);
keir@21242 132 cpu_relax();
keir@21242 133 spin_lock_irqsave(&tasklet_lock, flags);
keir@21242 134 }
keir@21242 135
keir@21242 136 spin_unlock_irqrestore(&tasklet_lock, flags);
keir@21242 137 }
keir@21242 138
keir@21429 139 static void migrate_tasklets_from_cpu(unsigned int cpu)
keir@21242 140 {
keir@21242 141 struct list_head *list = &per_cpu(tasklet_list, cpu);
keir@21242 142 unsigned long flags;
keir@21242 143 struct tasklet *t;
keir@21242 144
keir@21242 145 spin_lock_irqsave(&tasklet_lock, flags);
keir@21242 146
keir@21242 147 while ( !list_empty(list) )
keir@21242 148 {
keir@21242 149 t = list_entry(list->next, struct tasklet, list);
keir@21242 150 BUG_ON(t->scheduled_on != cpu);
keir@21242 151 t->scheduled_on = smp_processor_id();
keir@21242 152 list_del(&t->list);
keir@21390 153 tasklet_enqueue(t);
keir@21242 154 }
keir@21242 155
keir@21242 156 spin_unlock_irqrestore(&tasklet_lock, flags);
keir@21242 157 }
keir@21242 158
keir@21242 159 void tasklet_init(
keir@21242 160 struct tasklet *t, void (*func)(unsigned long), unsigned long data)
keir@21242 161 {
keir@21242 162 memset(t, 0, sizeof(*t));
keir@21242 163 INIT_LIST_HEAD(&t->list);
keir@21242 164 t->scheduled_on = -1;
keir@21242 165 t->func = func;
keir@21242 166 t->data = data;
keir@21242 167 }
keir@21242 168
keir@21429 169 static int cpu_callback(
keir@21429 170 struct notifier_block *nfb, unsigned long action, void *hcpu)
keir@21429 171 {
keir@21429 172 unsigned int cpu = (unsigned long)hcpu;
keir@21429 173
keir@21429 174 switch ( action )
keir@21429 175 {
keir@21429 176 case CPU_UP_PREPARE:
keir@21436 177 INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
keir@21429 178 break;
keir@21436 179 case CPU_UP_CANCELED:
keir@21429 180 case CPU_DEAD:
keir@21429 181 migrate_tasklets_from_cpu(cpu);
keir@21429 182 break;
keir@21429 183 default:
keir@21429 184 break;
keir@21429 185 }
keir@21429 186
keir@21429 187 return NOTIFY_DONE;
keir@21429 188 }
keir@21429 189
keir@21429 190 static struct notifier_block cpu_nfb = {
keir@21460 191 .notifier_call = cpu_callback,
keir@21460 192 .priority = 99
keir@21429 193 };
keir@21429 194
keir@21242 195 void __init tasklet_subsys_init(void)
keir@21242 196 {
keir@21429 197 void *hcpu = (void *)(long)smp_processor_id();
keir@21429 198 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
keir@21429 199 register_cpu_notifier(&cpu_nfb);
keir@21436 200 tasklets_initialised = 1;
keir@21242 201 }
keir@21242 202
keir@21242 203 /*
keir@21242 204 * Local variables:
keir@21242 205 * mode: C
keir@21242 206 * c-set-style: "BSD"
keir@21242 207 * c-basic-offset: 4
keir@21242 208 * tab-width: 4
keir@21242 209 * indent-tabs-mode: nil
keir@21242 210 * End:
keir@21242 211 */