debuggers.hg

view xen/common/tasklet.c @ 22721:0ab058a55c24

Update my email address to long-term stable address.

Signed-off-by: Keir Fraser <keir@xen.org>
author Keir Fraser <keir@xen.org>
date Fri Jan 07 13:30:04 2011 +0000 (2011-01-07)
parents 9d5afef2421c
children
line source
1 /******************************************************************************
2 * tasklet.c
3 *
4 * Tasklets are dynamically-allocatable tasks run in VCPU context
5 * (specifically, the idle VCPU's context) on at most one CPU at a time.
6 *
7 * Copyright (c) 2010, Citrix Systems, Inc.
8 * Copyright (c) 1992, Linus Torvalds
9 *
10 * Authors:
11 * Keir Fraser <keir@xen.org>
12 */
14 #include <xen/config.h>
15 #include <xen/init.h>
16 #include <xen/sched.h>
17 #include <xen/softirq.h>
18 #include <xen/tasklet.h>
19 #include <xen/cpu.h>
21 /* Some subsystems call into us before we are initialised. We ignore them. */
22 static bool_t tasklets_initialised;
24 DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
26 static DEFINE_PER_CPU(struct list_head, tasklet_list);
28 /* Protects all lists and tasklet structures. */
29 static DEFINE_SPINLOCK(tasklet_lock);
31 static void tasklet_enqueue(struct tasklet *t)
32 {
33 unsigned int cpu = t->scheduled_on;
34 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
36 list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
37 if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
38 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
39 }
41 void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
42 {
43 unsigned long flags;
45 spin_lock_irqsave(&tasklet_lock, flags);
47 if ( tasklets_initialised && !t->is_dead )
48 {
49 t->scheduled_on = cpu;
50 if ( !t->is_running )
51 {
52 list_del(&t->list);
53 tasklet_enqueue(t);
54 }
55 }
57 spin_unlock_irqrestore(&tasklet_lock, flags);
58 }
60 void tasklet_schedule(struct tasklet *t)
61 {
62 tasklet_schedule_on_cpu(t, smp_processor_id());
63 }
65 void do_tasklet(void)
66 {
67 unsigned int cpu = smp_processor_id();
68 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
69 struct list_head *list = &per_cpu(tasklet_list, cpu);
70 struct tasklet *t;
72 /*
73 * Work must be enqueued *and* scheduled. Otherwise there is no work to
74 * do, and/or scheduler needs to run to update idle vcpu priority.
75 */
76 if ( likely(*work_to_do != (TASKLET_enqueued|TASKLET_scheduled)) )
77 return;
79 spin_lock_irq(&tasklet_lock);
81 if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
82 goto out;
84 t = list_entry(list->next, struct tasklet, list);
85 list_del_init(&t->list);
87 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
88 t->scheduled_on = -1;
89 t->is_running = 1;
91 spin_unlock_irq(&tasklet_lock);
92 sync_local_execstate();
93 t->func(t->data);
94 spin_lock_irq(&tasklet_lock);
96 t->is_running = 0;
98 if ( t->scheduled_on >= 0 )
99 {
100 BUG_ON(t->is_dead || !list_empty(&t->list));
101 tasklet_enqueue(t);
102 }
104 out:
105 if ( list_empty(list) )
106 {
107 clear_bit(_TASKLET_enqueued, work_to_do);
108 raise_softirq(SCHEDULE_SOFTIRQ);
109 }
111 spin_unlock_irq(&tasklet_lock);
112 }
114 void tasklet_kill(struct tasklet *t)
115 {
116 unsigned long flags;
118 spin_lock_irqsave(&tasklet_lock, flags);
120 if ( !list_empty(&t->list) )
121 {
122 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
123 list_del_init(&t->list);
124 }
126 t->scheduled_on = -1;
127 t->is_dead = 1;
129 while ( t->is_running )
130 {
131 spin_unlock_irqrestore(&tasklet_lock, flags);
132 cpu_relax();
133 spin_lock_irqsave(&tasklet_lock, flags);
134 }
136 spin_unlock_irqrestore(&tasklet_lock, flags);
137 }
139 static void migrate_tasklets_from_cpu(unsigned int cpu)
140 {
141 struct list_head *list = &per_cpu(tasklet_list, cpu);
142 unsigned long flags;
143 struct tasklet *t;
145 spin_lock_irqsave(&tasklet_lock, flags);
147 while ( !list_empty(list) )
148 {
149 t = list_entry(list->next, struct tasklet, list);
150 BUG_ON(t->scheduled_on != cpu);
151 t->scheduled_on = smp_processor_id();
152 list_del(&t->list);
153 tasklet_enqueue(t);
154 }
156 spin_unlock_irqrestore(&tasklet_lock, flags);
157 }
159 void tasklet_init(
160 struct tasklet *t, void (*func)(unsigned long), unsigned long data)
161 {
162 memset(t, 0, sizeof(*t));
163 INIT_LIST_HEAD(&t->list);
164 t->scheduled_on = -1;
165 t->func = func;
166 t->data = data;
167 }
169 static int cpu_callback(
170 struct notifier_block *nfb, unsigned long action, void *hcpu)
171 {
172 unsigned int cpu = (unsigned long)hcpu;
174 switch ( action )
175 {
176 case CPU_UP_PREPARE:
177 INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
178 break;
179 case CPU_UP_CANCELED:
180 case CPU_DEAD:
181 migrate_tasklets_from_cpu(cpu);
182 break;
183 default:
184 break;
185 }
187 return NOTIFY_DONE;
188 }
190 static struct notifier_block cpu_nfb = {
191 .notifier_call = cpu_callback,
192 .priority = 99
193 };
195 void __init tasklet_subsys_init(void)
196 {
197 void *hcpu = (void *)(long)smp_processor_id();
198 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
199 register_cpu_notifier(&cpu_nfb);
200 tasklets_initialised = 1;
201 }
203 /*
204 * Local variables:
205 * mode: C
206 * c-set-style: "BSD"
207 * c-basic-offset: 4
208 * tab-width: 4
209 * indent-tabs-mode: nil
210 * End:
211 */