/root/src/xen/xen/common/tasklet.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * tasklet.c |
3 | | * |
4 | | * Tasklets are dynamically-allocatable tasks run in either VCPU context |
5 | | * (specifically, the idle VCPU's context) or in softirq context, on at most |
6 | | * one CPU at a time. Softirq versus VCPU context execution is specified |
7 | | * during per-tasklet initialisation. |
8 | | * |
9 | | * Copyright (c) 2010, Citrix Systems, Inc. |
10 | | * Copyright (c) 1992, Linus Torvalds |
11 | | * |
12 | | * Authors: |
13 | | * Keir Fraser <keir@xen.org> |
14 | | */ |
15 | | |
16 | | #include <xen/init.h> |
17 | | #include <xen/sched.h> |
18 | | #include <xen/softirq.h> |
19 | | #include <xen/tasklet.h> |
20 | | #include <xen/cpu.h> |
21 | | |
22 | | /* Some subsystems call into us before we are initialised. We ignore them. */ |
23 | | static bool_t tasklets_initialised; |
24 | | |
25 | | DEFINE_PER_CPU(unsigned long, tasklet_work_to_do); |
26 | | |
27 | | static DEFINE_PER_CPU(struct list_head, tasklet_list); |
28 | | static DEFINE_PER_CPU(struct list_head, softirq_tasklet_list); |
29 | | |
30 | | /* Protects all lists and tasklet structures. */ |
31 | | static DEFINE_SPINLOCK(tasklet_lock); |
32 | | |
33 | | static void tasklet_enqueue(struct tasklet *t) |
34 | 6.54k | { |
35 | 6.54k | unsigned int cpu = t->scheduled_on; |
36 | 6.54k | |
37 | 6.54k | if ( t->is_softirq ) |
38 | 6.50k | { |
39 | 6.50k | struct list_head *list = &per_cpu(softirq_tasklet_list, cpu); |
40 | 6.50k | bool_t was_empty = list_empty(list); |
41 | 6.50k | list_add_tail(&t->list, list); |
42 | 6.50k | if ( was_empty ) |
43 | 6.49k | cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); |
44 | 6.50k | } |
45 | 6.54k | else |
46 | 44 | { |
47 | 44 | unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu); |
48 | 44 | list_add_tail(&t->list, &per_cpu(tasklet_list, cpu)); |
49 | 44 | if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) ) |
50 | 44 | cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); |
51 | 44 | } |
52 | 6.54k | } |
53 | | |
54 | | void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu) |
55 | 6.76k | { |
56 | 6.76k | unsigned long flags; |
57 | 6.76k | |
58 | 6.76k | spin_lock_irqsave(&tasklet_lock, flags); |
59 | 6.76k | |
60 | 6.76k | if ( tasklets_initialised && !t->is_dead ) |
61 | 6.54k | { |
62 | 6.54k | t->scheduled_on = cpu; |
63 | 6.54k | if ( !t->is_running ) |
64 | 6.54k | { |
65 | 6.54k | list_del(&t->list); |
66 | 6.54k | tasklet_enqueue(t); |
67 | 6.54k | } |
68 | 6.54k | } |
69 | 6.76k | |
70 | 6.76k | spin_unlock_irqrestore(&tasklet_lock, flags); |
71 | 6.76k | } |
72 | | |
73 | | void tasklet_schedule(struct tasklet *t) |
74 | 6.76k | { |
75 | 6.76k | tasklet_schedule_on_cpu(t, smp_processor_id()); |
76 | 6.76k | } |
77 | | |
78 | | static void do_tasklet_work(unsigned int cpu, struct list_head *list) |
79 | 5.88k | { |
80 | 5.88k | struct tasklet *t; |
81 | 5.88k | |
82 | 5.88k | if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) ) |
83 | 0 | return; |
84 | 5.88k | |
85 | 5.88k | t = list_entry(list->next, struct tasklet, list); |
86 | 5.88k | list_del_init(&t->list); |
87 | 5.88k | |
88 | 5.88k | BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu)); |
89 | 5.88k | t->scheduled_on = -1; |
90 | 5.88k | t->is_running = 1; |
91 | 5.88k | |
92 | 5.88k | spin_unlock_irq(&tasklet_lock); |
93 | 5.88k | sync_local_execstate(); |
94 | 5.88k | t->func(t->data); |
95 | 5.88k | spin_lock_irq(&tasklet_lock); |
96 | 5.88k | |
97 | 5.88k | t->is_running = 0; |
98 | 5.88k | |
99 | 5.88k | if ( t->scheduled_on >= 0 ) |
100 | 0 | { |
101 | 0 | BUG_ON(t->is_dead || !list_empty(&t->list)); |
102 | 0 | tasklet_enqueue(t); |
103 | 0 | } |
104 | 5.88k | } |
105 | | |
106 | | /* VCPU context work */ |
107 | | void do_tasklet(void) |
108 | 44 | { |
109 | 44 | unsigned int cpu = smp_processor_id(); |
110 | 44 | unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu); |
111 | 44 | struct list_head *list = &per_cpu(tasklet_list, cpu); |
112 | 44 | |
113 | 44 | /* |
114 | 44 | * We want to be sure any caller has checked that a tasklet is both |
115 | 44 | * enqueued and scheduled, before calling this. And, if the caller has |
116 | 44 | * actually checked, it's not an issue that we are outside of the |
117 | 44 | * critical region, in fact: |
118 | 44 | * - TASKLET_enqueued is cleared only here, |
119 | 44 | * - TASKLET_scheduled is only cleared when schedule() find it set, |
120 | 44 | * without TASKLET_enqueued being set as well. |
121 | 44 | */ |
122 | 44 | ASSERT(tasklet_work_to_do(cpu)); |
123 | 44 | |
124 | 44 | spin_lock_irq(&tasklet_lock); |
125 | 44 | |
126 | 44 | do_tasklet_work(cpu, list); |
127 | 44 | |
128 | 44 | if ( list_empty(list) ) |
129 | 44 | { |
130 | 44 | clear_bit(_TASKLET_enqueued, work_to_do); |
131 | 44 | raise_softirq(SCHEDULE_SOFTIRQ); |
132 | 44 | } |
133 | 44 | |
134 | 44 | spin_unlock_irq(&tasklet_lock); |
135 | 44 | } |
136 | | |
137 | | /* Softirq context work */ |
138 | | static void tasklet_softirq_action(void) |
139 | 5.83k | { |
140 | 5.83k | unsigned int cpu = smp_processor_id(); |
141 | 5.83k | struct list_head *list = &per_cpu(softirq_tasklet_list, cpu); |
142 | 5.83k | |
143 | 5.83k | spin_lock_irq(&tasklet_lock); |
144 | 5.83k | |
145 | 5.83k | do_tasklet_work(cpu, list); |
146 | 5.83k | |
147 | 5.83k | if ( !list_empty(list) && !cpu_is_offline(cpu) ) |
148 | 4 | raise_softirq(TASKLET_SOFTIRQ); |
149 | 5.83k | |
150 | 5.83k | spin_unlock_irq(&tasklet_lock); |
151 | 5.83k | } |
152 | | |
153 | | void tasklet_kill(struct tasklet *t) |
154 | 0 | { |
155 | 0 | unsigned long flags; |
156 | 0 |
|
157 | 0 | spin_lock_irqsave(&tasklet_lock, flags); |
158 | 0 |
|
159 | 0 | if ( !list_empty(&t->list) ) |
160 | 0 | { |
161 | 0 | BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0)); |
162 | 0 | list_del_init(&t->list); |
163 | 0 | } |
164 | 0 |
|
165 | 0 | t->scheduled_on = -1; |
166 | 0 | t->is_dead = 1; |
167 | 0 |
|
168 | 0 | while ( t->is_running ) |
169 | 0 | { |
170 | 0 | spin_unlock_irqrestore(&tasklet_lock, flags); |
171 | 0 | cpu_relax(); |
172 | 0 | spin_lock_irqsave(&tasklet_lock, flags); |
173 | 0 | } |
174 | 0 |
|
175 | 0 | spin_unlock_irqrestore(&tasklet_lock, flags); |
176 | 0 | } |
177 | | |
178 | | static void migrate_tasklets_from_cpu(unsigned int cpu, struct list_head *list) |
179 | 0 | { |
180 | 0 | unsigned long flags; |
181 | 0 | struct tasklet *t; |
182 | 0 |
|
183 | 0 | spin_lock_irqsave(&tasklet_lock, flags); |
184 | 0 |
|
185 | 0 | while ( !list_empty(list) ) |
186 | 0 | { |
187 | 0 | t = list_entry(list->next, struct tasklet, list); |
188 | 0 | BUG_ON(t->scheduled_on != cpu); |
189 | 0 | t->scheduled_on = smp_processor_id(); |
190 | 0 | list_del(&t->list); |
191 | 0 | tasklet_enqueue(t); |
192 | 0 | } |
193 | 0 |
|
194 | 0 | spin_unlock_irqrestore(&tasklet_lock, flags); |
195 | 0 | } |
196 | | |
197 | | void tasklet_init( |
198 | | struct tasklet *t, void (*func)(unsigned long), unsigned long data) |
199 | 62 | { |
200 | 62 | memset(t, 0, sizeof(*t)); |
201 | 62 | INIT_LIST_HEAD(&t->list); |
202 | 62 | t->scheduled_on = -1; |
203 | 62 | t->func = func; |
204 | 62 | t->data = data; |
205 | 62 | } |
206 | | |
207 | | void softirq_tasklet_init( |
208 | | struct tasklet *t, void (*func)(unsigned long), unsigned long data) |
209 | 13 | { |
210 | 13 | tasklet_init(t, func, data); |
211 | 13 | t->is_softirq = 1; |
212 | 13 | } |
213 | | |
214 | | static int cpu_callback( |
215 | | struct notifier_block *nfb, unsigned long action, void *hcpu) |
216 | 34 | { |
217 | 34 | unsigned int cpu = (unsigned long)hcpu; |
218 | 34 | |
219 | 34 | switch ( action ) |
220 | 34 | { |
221 | 12 | case CPU_UP_PREPARE: |
222 | 12 | INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu)); |
223 | 12 | INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu)); |
224 | 12 | break; |
225 | 0 | case CPU_UP_CANCELED: |
226 | 0 | case CPU_DEAD: |
227 | 0 | migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_list, cpu)); |
228 | 0 | migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_tasklet_list, cpu)); |
229 | 0 | break; |
230 | 22 | default: |
231 | 22 | break; |
232 | 34 | } |
233 | 34 | |
234 | 34 | return NOTIFY_DONE; |
235 | 34 | } |
236 | | |
237 | | static struct notifier_block cpu_nfb = { |
238 | | .notifier_call = cpu_callback, |
239 | | .priority = 99 |
240 | | }; |
241 | | |
242 | | void __init tasklet_subsys_init(void) |
243 | 1 | { |
244 | 1 | void *hcpu = (void *)(long)smp_processor_id(); |
245 | 1 | cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu); |
246 | 1 | register_cpu_notifier(&cpu_nfb); |
247 | 1 | open_softirq(TASKLET_SOFTIRQ, tasklet_softirq_action); |
248 | 1 | tasklets_initialised = 1; |
249 | 1 | } |
250 | | |
251 | | /* |
252 | | * Local variables: |
253 | | * mode: C |
254 | | * c-file-style: "BSD" |
255 | | * c-basic-offset: 4 |
256 | | * tab-width: 4 |
257 | | * indent-tabs-mode: nil |
258 | | * End: |
259 | | */ |