/root/src/xen/xen/common/event_channel.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * event_channel.c |
3 | | * |
4 | | * Event notifications from VIRQs, PIRQs, and other domains. |
5 | | * |
6 | | * Copyright (c) 2003-2006, K A Fraser. |
7 | | * |
8 | | * This program is distributed in the hope that it will be useful, |
9 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | | * GNU General Public License for more details. |
12 | | * |
13 | | * You should have received a copy of the GNU General Public License |
14 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
15 | | */ |
16 | | |
17 | | #include <xen/init.h> |
18 | | #include <xen/lib.h> |
19 | | #include <xen/errno.h> |
20 | | #include <xen/sched.h> |
21 | | #include <xen/event.h> |
22 | | #include <xen/irq.h> |
23 | | #include <xen/iocap.h> |
24 | | #include <xen/compat.h> |
25 | | #include <xen/guest_access.h> |
26 | | #include <xen/keyhandler.h> |
27 | | #include <xen/event_fifo.h> |
28 | | #include <asm/current.h> |
29 | | |
30 | | #include <public/xen.h> |
31 | | #include <public/event_channel.h> |
32 | | #include <xsm/xsm.h> |
33 | | |
34 | | #define ERROR_EXIT(_errno) \ |
35 | 0 | do { \ |
36 | 0 | gdprintk(XENLOG_WARNING, \ |
37 | 0 | "EVTCHNOP failure: error %d\n", \ |
38 | 0 | (_errno)); \ |
39 | 0 | rc = (_errno); \ |
40 | 0 | goto out; \ |
41 | 0 | } while ( 0 ) |
42 | | #define ERROR_EXIT_DOM(_errno, _dom) \ |
43 | 0 | do { \ |
44 | 0 | gdprintk(XENLOG_WARNING, \ |
45 | 0 | "EVTCHNOP failure: domain %d, error %d\n", \ |
46 | 0 | (_dom)->domain_id, (_errno)); \ |
47 | 0 | rc = (_errno); \ |
48 | 0 | goto out; \ |
49 | 0 | } while ( 0 ) |
50 | | |
51 | 85 | #define consumer_is_xen(e) (!!(e)->xen_consumer) |
52 | | |
53 | | /* |
54 | | * The function alloc_unbound_xen_event_channel() allows an arbitrary |
55 | | * notifier function to be specified. However, very few unique functions |
56 | | * are specified in practice, so to prevent bloating the evtchn structure |
57 | | * with a pointer, we stash them dynamically in a small lookup array which |
58 | | * can be indexed by a small integer. |
59 | | */ |
60 | | static xen_event_channel_notification_t xen_consumers[NR_XEN_CONSUMERS]; |
61 | | |
62 | | /* Default notification action: wake up from wait_on_xen_event_channel(). */ |
63 | | static void default_xen_notification_fn(struct vcpu *v, unsigned int port) |
64 | 0 | { |
65 | 0 | /* Consumer needs notification only if blocked. */ |
66 | 0 | if ( test_and_clear_bit(_VPF_blocked_in_xen, &v->pause_flags) ) |
67 | 0 | vcpu_wake(v); |
68 | 0 | } |
69 | | |
70 | | /* |
71 | | * Given a notification function, return the value to stash in |
72 | | * the evtchn->xen_consumer field. |
73 | | */ |
74 | | static uint8_t get_xen_consumer(xen_event_channel_notification_t fn) |
75 | 0 | { |
76 | 0 | unsigned int i; |
77 | 0 |
|
78 | 0 | if ( fn == NULL ) |
79 | 0 | fn = default_xen_notification_fn; |
80 | 0 |
|
81 | 0 | for ( i = 0; i < ARRAY_SIZE(xen_consumers); i++ ) |
82 | 0 | { |
83 | 0 | if ( xen_consumers[i] == NULL ) |
84 | 0 | xen_consumers[i] = fn; |
85 | 0 | if ( xen_consumers[i] == fn ) |
86 | 0 | break; |
87 | 0 | } |
88 | 0 |
|
89 | 0 | BUG_ON(i >= ARRAY_SIZE(xen_consumers)); |
90 | 0 | return i+1; |
91 | 0 | } |
92 | | |
93 | | /* Get the notification function for a given Xen-bound event channel. */ |
94 | 0 | #define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1]) |
95 | | |
96 | | static int virq_is_global(uint32_t virq) |
97 | 6.05k | { |
98 | 6.05k | int rc; |
99 | 6.05k | |
100 | 6.05k | ASSERT(virq < NR_VIRQS); |
101 | 6.05k | |
102 | 6.05k | switch ( virq ) |
103 | 6.05k | { |
104 | 5.76k | case VIRQ_TIMER: |
105 | 5.76k | case VIRQ_DEBUG: |
106 | 5.76k | case VIRQ_XENOPROF: |
107 | 5.76k | case VIRQ_XENPMU: |
108 | 5.76k | rc = 0; |
109 | 5.76k | break; |
110 | 0 | case VIRQ_ARCH_0 ... VIRQ_ARCH_7: |
111 | 0 | rc = arch_virq_is_global(virq); |
112 | 0 | break; |
113 | 286 | default: |
114 | 286 | rc = 1; |
115 | 286 | break; |
116 | 6.05k | } |
117 | 6.05k | |
118 | 6.05k | return rc; |
119 | 6.05k | } |
120 | | |
121 | | |
122 | | static struct evtchn *alloc_evtchn_bucket(struct domain *d, unsigned int port) |
123 | 3 | { |
124 | 3 | struct evtchn *chn; |
125 | 3 | unsigned int i; |
126 | 3 | |
127 | 3 | chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET); |
128 | 3 | if ( !chn ) |
129 | 0 | return NULL; |
130 | 3 | |
131 | 195 | for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ ) |
132 | 192 | { |
133 | 192 | if ( xsm_alloc_security_evtchn(&chn[i]) ) |
134 | 0 | { |
135 | 0 | while ( i-- ) |
136 | 0 | xsm_free_security_evtchn(&chn[i]); |
137 | 0 | xfree(chn); |
138 | 0 | return NULL; |
139 | 0 | } |
140 | 192 | chn[i].port = port + i; |
141 | 192 | spin_lock_init(&chn[i].lock); |
142 | 192 | } |
143 | 3 | return chn; |
144 | 3 | } |
145 | | |
146 | | static void free_evtchn_bucket(struct domain *d, struct evtchn *bucket) |
147 | 0 | { |
148 | 0 | unsigned int i; |
149 | 0 |
|
150 | 0 | if ( !bucket ) |
151 | 0 | return; |
152 | 0 |
|
153 | 0 | for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ ) |
154 | 0 | xsm_free_security_evtchn(bucket + i); |
155 | 0 |
|
156 | 0 | xfree(bucket); |
157 | 0 | } |
158 | | |
159 | | static int get_free_port(struct domain *d) |
160 | 137 | { |
161 | 137 | struct evtchn *chn; |
162 | 137 | struct evtchn **grp; |
163 | 137 | int port; |
164 | 137 | |
165 | 137 | if ( d->is_dying ) |
166 | 0 | return -EINVAL; |
167 | 137 | |
168 | 9.45k | for ( port = 0; port_is_valid(d, port); port++ ) |
169 | 9.45k | { |
170 | 9.45k | if ( port > d->max_evtchn_port ) |
171 | 0 | return -ENOSPC; |
172 | 9.45k | if ( evtchn_from_port(d, port)->state == ECS_FREE |
173 | 135 | && !evtchn_port_is_busy(d, port) ) |
174 | 135 | return port; |
175 | 9.45k | } |
176 | 137 | |
177 | 2 | if ( port == d->max_evtchns || port > d->max_evtchn_port ) |
178 | 0 | return -ENOSPC; |
179 | 2 | |
180 | 2 | if ( !group_from_port(d, port) ) |
181 | 1 | { |
182 | 1 | grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP); |
183 | 1 | if ( !grp ) |
184 | 0 | return -ENOMEM; |
185 | 1 | group_from_port(d, port) = grp; |
186 | 1 | } |
187 | 2 | |
188 | 2 | chn = alloc_evtchn_bucket(d, port); |
189 | 2 | if ( !chn ) |
190 | 0 | return -ENOMEM; |
191 | 2 | bucket_from_port(d, port) = chn; |
192 | 2 | |
193 | 2 | write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET); |
194 | 2 | |
195 | 2 | return port; |
196 | 2 | } |
197 | | |
198 | | static void free_evtchn(struct domain *d, struct evtchn *chn) |
199 | 0 | { |
200 | 0 | /* Clear pending event to avoid unexpected behavior on re-bind. */ |
201 | 0 | evtchn_port_clear_pending(d, chn); |
202 | 0 |
|
203 | 0 | /* Reset binding to vcpu0 when the channel is freed. */ |
204 | 0 | chn->state = ECS_FREE; |
205 | 0 | chn->notify_vcpu_id = 0; |
206 | 0 | chn->xen_consumer = 0; |
207 | 0 |
|
208 | 0 | xsm_evtchn_close_post(chn); |
209 | 0 | } |
210 | | |
211 | | static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) |
212 | 1 | { |
213 | 1 | struct evtchn *chn; |
214 | 1 | struct domain *d; |
215 | 1 | int port; |
216 | 1 | domid_t dom = alloc->dom; |
217 | 1 | long rc; |
218 | 1 | |
219 | 1 | d = rcu_lock_domain_by_any_id(dom); |
220 | 1 | if ( d == NULL ) |
221 | 0 | return -ESRCH; |
222 | 1 | |
223 | 1 | spin_lock(&d->event_lock); |
224 | 1 | |
225 | 1 | if ( (port = get_free_port(d)) < 0 ) |
226 | 0 | ERROR_EXIT_DOM(port, d); |
227 | 1 | chn = evtchn_from_port(d, port); |
228 | 1 | |
229 | 1 | rc = xsm_evtchn_unbound(XSM_TARGET, d, chn, alloc->remote_dom); |
230 | 1 | if ( rc ) |
231 | 0 | goto out; |
232 | 1 | |
233 | 1 | spin_lock(&chn->lock); |
234 | 1 | |
235 | 1 | chn->state = ECS_UNBOUND; |
236 | 1 | if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF ) |
237 | 1 | chn->u.unbound.remote_domid = current->domain->domain_id; |
238 | 1 | evtchn_port_init(d, chn); |
239 | 1 | |
240 | 1 | spin_unlock(&chn->lock); |
241 | 1 | |
242 | 1 | alloc->port = port; |
243 | 1 | |
244 | 1 | out: |
245 | 1 | spin_unlock(&d->event_lock); |
246 | 1 | rcu_unlock_domain(d); |
247 | 1 | |
248 | 1 | return rc; |
249 | 1 | } |
250 | | |
251 | | |
252 | | static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn) |
253 | 1 | { |
254 | 1 | if ( lchn < rchn ) |
255 | 1 | { |
256 | 1 | spin_lock(&lchn->lock); |
257 | 1 | spin_lock(&rchn->lock); |
258 | 1 | } |
259 | 1 | else |
260 | 0 | { |
261 | 0 | if ( lchn != rchn ) |
262 | 0 | spin_lock(&rchn->lock); |
263 | 0 | spin_lock(&lchn->lock); |
264 | 0 | } |
265 | 1 | } |
266 | | |
267 | | static void double_evtchn_unlock(struct evtchn *lchn, struct evtchn *rchn) |
268 | 1 | { |
269 | 1 | spin_unlock(&lchn->lock); |
270 | 1 | if ( lchn != rchn ) |
271 | 1 | spin_unlock(&rchn->lock); |
272 | 1 | } |
273 | | |
274 | | static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) |
275 | 1 | { |
276 | 1 | struct evtchn *lchn, *rchn; |
277 | 1 | struct domain *ld = current->domain, *rd; |
278 | 1 | int lport, rport = bind->remote_port; |
279 | 1 | domid_t rdom = bind->remote_dom; |
280 | 1 | long rc; |
281 | 1 | |
282 | 1 | if ( rdom == DOMID_SELF ) |
283 | 0 | rdom = current->domain->domain_id; |
284 | 1 | |
285 | 1 | if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL ) |
286 | 0 | return -ESRCH; |
287 | 1 | |
288 | 1 | /* Avoid deadlock by first acquiring lock of domain with smaller id. */ |
289 | 1 | if ( ld < rd ) |
290 | 0 | { |
291 | 0 | spin_lock(&ld->event_lock); |
292 | 0 | spin_lock(&rd->event_lock); |
293 | 0 | } |
294 | 1 | else |
295 | 1 | { |
296 | 1 | if ( ld != rd ) |
297 | 0 | spin_lock(&rd->event_lock); |
298 | 1 | spin_lock(&ld->event_lock); |
299 | 1 | } |
300 | 1 | |
301 | 1 | if ( (lport = get_free_port(ld)) < 0 ) |
302 | 0 | ERROR_EXIT(lport); |
303 | 1 | lchn = evtchn_from_port(ld, lport); |
304 | 1 | |
305 | 1 | if ( !port_is_valid(rd, rport) ) |
306 | 0 | ERROR_EXIT_DOM(-EINVAL, rd); |
307 | 1 | rchn = evtchn_from_port(rd, rport); |
308 | 1 | if ( (rchn->state != ECS_UNBOUND) || |
309 | 1 | (rchn->u.unbound.remote_domid != ld->domain_id) ) |
310 | 0 | ERROR_EXIT_DOM(-EINVAL, rd); |
311 | 1 | |
312 | 1 | rc = xsm_evtchn_interdomain(XSM_HOOK, ld, lchn, rd, rchn); |
313 | 1 | if ( rc ) |
314 | 0 | goto out; |
315 | 1 | |
316 | 1 | double_evtchn_lock(lchn, rchn); |
317 | 1 | |
318 | 1 | lchn->u.interdomain.remote_dom = rd; |
319 | 1 | lchn->u.interdomain.remote_port = rport; |
320 | 1 | lchn->state = ECS_INTERDOMAIN; |
321 | 1 | evtchn_port_init(ld, lchn); |
322 | 1 | |
323 | 1 | rchn->u.interdomain.remote_dom = ld; |
324 | 1 | rchn->u.interdomain.remote_port = lport; |
325 | 1 | rchn->state = ECS_INTERDOMAIN; |
326 | 1 | |
327 | 1 | /* |
328 | 1 | * We may have lost notifications on the remote unbound port. Fix that up |
329 | 1 | * here by conservatively always setting a notification on the local port. |
330 | 1 | */ |
331 | 1 | evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn); |
332 | 1 | |
333 | 1 | double_evtchn_unlock(lchn, rchn); |
334 | 1 | |
335 | 1 | bind->local_port = lport; |
336 | 1 | |
337 | 1 | out: |
338 | 1 | spin_unlock(&ld->event_lock); |
339 | 1 | if ( ld != rd ) |
340 | 0 | spin_unlock(&rd->event_lock); |
341 | 1 | |
342 | 1 | rcu_unlock_domain(rd); |
343 | 1 | |
344 | 1 | return rc; |
345 | 1 | } |
346 | | |
347 | | |
348 | | static long evtchn_bind_virq(evtchn_bind_virq_t *bind) |
349 | 26 | { |
350 | 26 | struct evtchn *chn; |
351 | 26 | struct vcpu *v; |
352 | 26 | struct domain *d = current->domain; |
353 | 26 | int port, virq = bind->virq, vcpu = bind->vcpu; |
354 | 26 | long rc = 0; |
355 | 26 | |
356 | 26 | if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) ) |
357 | 0 | return -EINVAL; |
358 | 26 | |
359 | 26 | if ( virq_is_global(virq) && (vcpu != 0) ) |
360 | 0 | return -EINVAL; |
361 | 26 | |
362 | 26 | if ( (vcpu < 0) || (vcpu >= d->max_vcpus) || |
363 | 26 | ((v = d->vcpu[vcpu]) == NULL) ) |
364 | 0 | return -ENOENT; |
365 | 26 | |
366 | 26 | spin_lock(&d->event_lock); |
367 | 26 | |
368 | 26 | if ( v->virq_to_evtchn[virq] != 0 ) |
369 | 0 | ERROR_EXIT(-EEXIST); |
370 | 26 | |
371 | 26 | if ( (port = get_free_port(d)) < 0 ) |
372 | 0 | ERROR_EXIT(port); |
373 | 26 | |
374 | 26 | chn = evtchn_from_port(d, port); |
375 | 26 | |
376 | 26 | spin_lock(&chn->lock); |
377 | 26 | |
378 | 26 | chn->state = ECS_VIRQ; |
379 | 26 | chn->notify_vcpu_id = vcpu; |
380 | 26 | chn->u.virq = virq; |
381 | 26 | evtchn_port_init(d, chn); |
382 | 26 | |
383 | 26 | spin_unlock(&chn->lock); |
384 | 26 | |
385 | 26 | v->virq_to_evtchn[virq] = bind->port = port; |
386 | 26 | |
387 | 26 | out: |
388 | 26 | spin_unlock(&d->event_lock); |
389 | 26 | |
390 | 26 | return rc; |
391 | 26 | } |
392 | | |
393 | | |
394 | | static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) |
395 | 108 | { |
396 | 108 | struct evtchn *chn; |
397 | 108 | struct domain *d = current->domain; |
398 | 108 | int port, vcpu = bind->vcpu; |
399 | 108 | long rc = 0; |
400 | 108 | |
401 | 108 | if ( (vcpu < 0) || (vcpu >= d->max_vcpus) || |
402 | 108 | (d->vcpu[vcpu] == NULL) ) |
403 | 0 | return -ENOENT; |
404 | 108 | |
405 | 108 | spin_lock(&d->event_lock); |
406 | 108 | |
407 | 108 | if ( (port = get_free_port(d)) < 0 ) |
408 | 0 | ERROR_EXIT(port); |
409 | 108 | |
410 | 108 | chn = evtchn_from_port(d, port); |
411 | 108 | |
412 | 108 | spin_lock(&chn->lock); |
413 | 108 | |
414 | 108 | chn->state = ECS_IPI; |
415 | 108 | chn->notify_vcpu_id = vcpu; |
416 | 108 | evtchn_port_init(d, chn); |
417 | 108 | |
418 | 108 | spin_unlock(&chn->lock); |
419 | 108 | |
420 | 108 | bind->port = port; |
421 | 108 | |
422 | 108 | out: |
423 | 108 | spin_unlock(&d->event_lock); |
424 | 108 | |
425 | 108 | return rc; |
426 | 108 | } |
427 | | |
428 | | |
429 | | static void link_pirq_port(int port, struct evtchn *chn, struct vcpu *v) |
430 | 0 | { |
431 | 0 | chn->u.pirq.prev_port = 0; |
432 | 0 | chn->u.pirq.next_port = v->pirq_evtchn_head; |
433 | 0 | if ( v->pirq_evtchn_head ) |
434 | 0 | evtchn_from_port(v->domain, v->pirq_evtchn_head) |
435 | 0 | ->u.pirq.prev_port = port; |
436 | 0 | v->pirq_evtchn_head = port; |
437 | 0 | } |
438 | | |
439 | | static void unlink_pirq_port(struct evtchn *chn, struct vcpu *v) |
440 | 0 | { |
441 | 0 | struct domain *d = v->domain; |
442 | 0 |
|
443 | 0 | if ( chn->u.pirq.prev_port ) |
444 | 0 | evtchn_from_port(d, chn->u.pirq.prev_port)->u.pirq.next_port = |
445 | 0 | chn->u.pirq.next_port; |
446 | 0 | else |
447 | 0 | v->pirq_evtchn_head = chn->u.pirq.next_port; |
448 | 0 | if ( chn->u.pirq.next_port ) |
449 | 0 | evtchn_from_port(d, chn->u.pirq.next_port)->u.pirq.prev_port = |
450 | 0 | chn->u.pirq.prev_port; |
451 | 0 | } |
452 | | |
453 | | |
454 | | static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) |
455 | 0 | { |
456 | 0 | struct evtchn *chn; |
457 | 0 | struct domain *d = current->domain; |
458 | 0 | struct vcpu *v = d->vcpu[0]; |
459 | 0 | struct pirq *info; |
460 | 0 | int port, pirq = bind->pirq; |
461 | 0 | long rc; |
462 | 0 |
|
463 | 0 | if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) |
464 | 0 | return -EINVAL; |
465 | 0 |
|
466 | 0 | if ( !is_hvm_domain(d) && !pirq_access_permitted(d, pirq) ) |
467 | 0 | return -EPERM; |
468 | 0 |
|
469 | 0 | spin_lock(&d->event_lock); |
470 | 0 |
|
471 | 0 | if ( pirq_to_evtchn(d, pirq) != 0 ) |
472 | 0 | ERROR_EXIT(-EEXIST); |
473 | 0 |
|
474 | 0 | if ( (port = get_free_port(d)) < 0 ) |
475 | 0 | ERROR_EXIT(port); |
476 | 0 |
|
477 | 0 | chn = evtchn_from_port(d, port); |
478 | 0 |
|
479 | 0 | info = pirq_get_info(d, pirq); |
480 | 0 | if ( !info ) |
481 | 0 | ERROR_EXIT(-ENOMEM); |
482 | 0 | info->evtchn = port; |
483 | 0 | rc = (!is_hvm_domain(d) |
484 | 0 | ? pirq_guest_bind(v, info, |
485 | 0 | !!(bind->flags & BIND_PIRQ__WILL_SHARE)) |
486 | 0 | : 0); |
487 | 0 | if ( rc != 0 ) |
488 | 0 | { |
489 | 0 | info->evtchn = 0; |
490 | 0 | pirq_cleanup_check(info, d); |
491 | 0 | goto out; |
492 | 0 | } |
493 | 0 |
|
494 | 0 | spin_lock(&chn->lock); |
495 | 0 |
|
496 | 0 | chn->state = ECS_PIRQ; |
497 | 0 | chn->u.pirq.irq = pirq; |
498 | 0 | link_pirq_port(port, chn, v); |
499 | 0 | evtchn_port_init(d, chn); |
500 | 0 |
|
501 | 0 | spin_unlock(&chn->lock); |
502 | 0 |
|
503 | 0 | bind->port = port; |
504 | 0 |
|
505 | 0 | arch_evtchn_bind_pirq(d, pirq); |
506 | 0 |
|
507 | 0 | out: |
508 | 0 | spin_unlock(&d->event_lock); |
509 | 0 |
|
510 | 0 | return rc; |
511 | 0 | } |
512 | | |
513 | | |
514 | | static long evtchn_close(struct domain *d1, int port1, bool_t guest) |
515 | 0 | { |
516 | 0 | struct domain *d2 = NULL; |
517 | 0 | struct vcpu *v; |
518 | 0 | struct evtchn *chn1, *chn2; |
519 | 0 | int port2; |
520 | 0 | long rc = 0; |
521 | 0 |
|
522 | 0 | again: |
523 | 0 | spin_lock(&d1->event_lock); |
524 | 0 |
|
525 | 0 | if ( !port_is_valid(d1, port1) ) |
526 | 0 | { |
527 | 0 | rc = -EINVAL; |
528 | 0 | goto out; |
529 | 0 | } |
530 | 0 |
|
531 | 0 | chn1 = evtchn_from_port(d1, port1); |
532 | 0 |
|
533 | 0 | /* Guest cannot close a Xen-attached event channel. */ |
534 | 0 | if ( unlikely(consumer_is_xen(chn1)) && guest ) |
535 | 0 | { |
536 | 0 | rc = -EINVAL; |
537 | 0 | goto out; |
538 | 0 | } |
539 | 0 |
|
540 | 0 | switch ( chn1->state ) |
541 | 0 | { |
542 | 0 | case ECS_FREE: |
543 | 0 | case ECS_RESERVED: |
544 | 0 | rc = -EINVAL; |
545 | 0 | goto out; |
546 | 0 |
|
547 | 0 | case ECS_UNBOUND: |
548 | 0 | break; |
549 | 0 |
|
550 | 0 | case ECS_PIRQ: { |
551 | 0 | struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq); |
552 | 0 |
|
553 | 0 | if ( !pirq ) |
554 | 0 | break; |
555 | 0 | if ( !is_hvm_domain(d1) ) |
556 | 0 | pirq_guest_unbind(d1, pirq); |
557 | 0 | pirq->evtchn = 0; |
558 | 0 | pirq_cleanup_check(pirq, d1); |
559 | 0 | unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]); |
560 | 0 | #ifdef CONFIG_X86 |
561 | 0 | if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, pirq->pirq) > 0 ) |
562 | 0 | unmap_domain_pirq_emuirq(d1, pirq->pirq); |
563 | 0 | #endif |
564 | 0 | break; |
565 | 0 | } |
566 | 0 |
|
567 | 0 | case ECS_VIRQ: |
568 | 0 | for_each_vcpu ( d1, v ) |
569 | 0 | { |
570 | 0 | if ( v->virq_to_evtchn[chn1->u.virq] != port1 ) |
571 | 0 | continue; |
572 | 0 | v->virq_to_evtchn[chn1->u.virq] = 0; |
573 | 0 | spin_barrier(&v->virq_lock); |
574 | 0 | } |
575 | 0 | break; |
576 | 0 |
|
577 | 0 | case ECS_IPI: |
578 | 0 | break; |
579 | 0 |
|
580 | 0 | case ECS_INTERDOMAIN: |
581 | 0 | if ( d2 == NULL ) |
582 | 0 | { |
583 | 0 | d2 = chn1->u.interdomain.remote_dom; |
584 | 0 |
|
585 | 0 | /* If we unlock d1 then we could lose d2. Must get a reference. */ |
586 | 0 | if ( unlikely(!get_domain(d2)) ) |
587 | 0 | BUG(); |
588 | 0 |
|
589 | 0 | if ( d1 < d2 ) |
590 | 0 | { |
591 | 0 | spin_lock(&d2->event_lock); |
592 | 0 | } |
593 | 0 | else if ( d1 != d2 ) |
594 | 0 | { |
595 | 0 | spin_unlock(&d1->event_lock); |
596 | 0 | spin_lock(&d2->event_lock); |
597 | 0 | goto again; |
598 | 0 | } |
599 | 0 | } |
600 | 0 | else if ( d2 != chn1->u.interdomain.remote_dom ) |
601 | 0 | { |
602 | 0 | /* |
603 | 0 | * We can only get here if the port was closed and re-bound after |
604 | 0 | * unlocking d1 but before locking d2 above. We could retry but |
605 | 0 | * it is easier to return the same error as if we had seen the |
606 | 0 | * port in ECS_CLOSED. It must have passed through that state for |
607 | 0 | * us to end up here, so it's a valid error to return. |
608 | 0 | */ |
609 | 0 | rc = -EINVAL; |
610 | 0 | goto out; |
611 | 0 | } |
612 | 0 |
|
613 | 0 | port2 = chn1->u.interdomain.remote_port; |
614 | 0 | BUG_ON(!port_is_valid(d2, port2)); |
615 | 0 |
|
616 | 0 | chn2 = evtchn_from_port(d2, port2); |
617 | 0 | BUG_ON(chn2->state != ECS_INTERDOMAIN); |
618 | 0 | BUG_ON(chn2->u.interdomain.remote_dom != d1); |
619 | 0 |
|
620 | 0 | double_evtchn_lock(chn1, chn2); |
621 | 0 |
|
622 | 0 | free_evtchn(d1, chn1); |
623 | 0 |
|
624 | 0 | chn2->state = ECS_UNBOUND; |
625 | 0 | chn2->u.unbound.remote_domid = d1->domain_id; |
626 | 0 |
|
627 | 0 | double_evtchn_unlock(chn1, chn2); |
628 | 0 |
|
629 | 0 | goto out; |
630 | 0 |
|
631 | 0 | default: |
632 | 0 | BUG(); |
633 | 0 | } |
634 | 0 |
|
635 | 0 | spin_lock(&chn1->lock); |
636 | 0 | free_evtchn(d1, chn1); |
637 | 0 | spin_unlock(&chn1->lock); |
638 | 0 |
|
639 | 0 | out: |
640 | 0 | if ( d2 != NULL ) |
641 | 0 | { |
642 | 0 | if ( d1 != d2 ) |
643 | 0 | spin_unlock(&d2->event_lock); |
644 | 0 | put_domain(d2); |
645 | 0 | } |
646 | 0 |
|
647 | 0 | spin_unlock(&d1->event_lock); |
648 | 0 |
|
649 | 0 | return rc; |
650 | 0 | } |
651 | | |
652 | | int evtchn_send(struct domain *ld, unsigned int lport) |
653 | 94.0k | { |
654 | 94.0k | struct evtchn *lchn, *rchn; |
655 | 94.0k | struct domain *rd; |
656 | 94.0k | int rport, ret = 0; |
657 | 94.0k | |
658 | 94.0k | if ( !port_is_valid(ld, lport) ) |
659 | 0 | return -EINVAL; |
660 | 94.0k | |
661 | 94.0k | lchn = evtchn_from_port(ld, lport); |
662 | 94.0k | |
663 | 94.0k | spin_lock(&lchn->lock); |
664 | 94.0k | |
665 | 94.0k | /* Guest cannot send via a Xen-attached event channel. */ |
666 | 94.0k | if ( unlikely(consumer_is_xen(lchn)) ) |
667 | 0 | { |
668 | 0 | ret = -EINVAL; |
669 | 0 | goto out; |
670 | 0 | } |
671 | 94.0k | |
672 | 94.0k | ret = xsm_evtchn_send(XSM_HOOK, ld, lchn); |
673 | 94.0k | if ( ret ) |
674 | 0 | goto out; |
675 | 94.0k | |
676 | 94.0k | switch ( lchn->state ) |
677 | 94.0k | { |
678 | 85 | case ECS_INTERDOMAIN: |
679 | 85 | rd = lchn->u.interdomain.remote_dom; |
680 | 85 | rport = lchn->u.interdomain.remote_port; |
681 | 85 | rchn = evtchn_from_port(rd, rport); |
682 | 85 | if ( consumer_is_xen(rchn) ) |
683 | 0 | xen_notification_fn(rchn)(rd->vcpu[rchn->notify_vcpu_id], rport); |
684 | 85 | else |
685 | 85 | evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn); |
686 | 85 | break; |
687 | 94.0k | case ECS_IPI: |
688 | 94.0k | evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn); |
689 | 94.0k | break; |
690 | 0 | case ECS_UNBOUND: |
691 | 0 | /* silently drop the notification */ |
692 | 0 | break; |
693 | 0 | default: |
694 | 0 | ret = -EINVAL; |
695 | 94.0k | } |
696 | 94.0k | |
697 | 94.0k | out: |
698 | 94.0k | spin_unlock(&lchn->lock); |
699 | 94.0k | |
700 | 94.0k | return ret; |
701 | 94.0k | } |
702 | | |
703 | | int guest_enabled_event(struct vcpu *v, uint32_t virq) |
704 | 0 | { |
705 | 0 | return ((v != NULL) && (v->virq_to_evtchn[virq] != 0)); |
706 | 0 | } |
707 | | |
708 | | void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq) |
709 | 5.74k | { |
710 | 5.74k | unsigned long flags; |
711 | 5.74k | int port; |
712 | 5.74k | struct domain *d; |
713 | 5.74k | |
714 | 5.74k | ASSERT(!virq_is_global(virq)); |
715 | 5.74k | |
716 | 5.74k | spin_lock_irqsave(&v->virq_lock, flags); |
717 | 5.74k | |
718 | 5.74k | port = v->virq_to_evtchn[virq]; |
719 | 5.74k | if ( unlikely(port == 0) ) |
720 | 0 | goto out; |
721 | 5.74k | |
722 | 5.74k | d = v->domain; |
723 | 5.74k | evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port)); |
724 | 5.74k | |
725 | 5.75k | out: |
726 | 5.75k | spin_unlock_irqrestore(&v->virq_lock, flags); |
727 | 5.75k | } |
728 | | |
729 | | static void send_guest_global_virq(struct domain *d, uint32_t virq) |
730 | 142 | { |
731 | 142 | unsigned long flags; |
732 | 142 | int port; |
733 | 142 | struct vcpu *v; |
734 | 142 | struct evtchn *chn; |
735 | 142 | |
736 | 142 | ASSERT(virq_is_global(virq)); |
737 | 142 | |
738 | 142 | if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) ) |
739 | 12 | return; |
740 | 142 | |
741 | 130 | v = d->vcpu[0]; |
742 | 130 | if ( unlikely(v == NULL) ) |
743 | 0 | return; |
744 | 130 | |
745 | 130 | spin_lock_irqsave(&v->virq_lock, flags); |
746 | 130 | |
747 | 130 | port = v->virq_to_evtchn[virq]; |
748 | 130 | if ( unlikely(port == 0) ) |
749 | 130 | goto out; |
750 | 130 | |
751 | 0 | chn = evtchn_from_port(d, port); |
752 | 0 | evtchn_port_set_pending(d, chn->notify_vcpu_id, chn); |
753 | 0 |
|
754 | 130 | out: |
755 | 130 | spin_unlock_irqrestore(&v->virq_lock, flags); |
756 | 130 | } |
757 | | |
758 | | void send_guest_pirq(struct domain *d, const struct pirq *pirq) |
759 | 0 | { |
760 | 0 | int port; |
761 | 0 | struct evtchn *chn; |
762 | 0 |
|
763 | 0 | /* |
764 | 0 | * PV guests: It should not be possible to race with __evtchn_close(). The |
765 | 0 | * caller of this function must synchronise with pirq_guest_unbind(). |
766 | 0 | * HVM guests: Port is legitimately zero when the guest disables the |
767 | 0 | * emulated interrupt/evtchn. |
768 | 0 | */ |
769 | 0 | if ( pirq == NULL || (port = pirq->evtchn) == 0 ) |
770 | 0 | { |
771 | 0 | BUG_ON(!is_hvm_domain(d)); |
772 | 0 | return; |
773 | 0 | } |
774 | 0 |
|
775 | 0 | chn = evtchn_from_port(d, port); |
776 | 0 | evtchn_port_set_pending(d, chn->notify_vcpu_id, chn); |
777 | 0 | } |
778 | | |
779 | | static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly; |
780 | | |
781 | | static DEFINE_SPINLOCK(global_virq_handlers_lock); |
782 | | |
783 | | void send_global_virq(uint32_t virq) |
784 | 142 | { |
785 | 142 | ASSERT(virq < NR_VIRQS); |
786 | 142 | ASSERT(virq_is_global(virq)); |
787 | 142 | |
788 | 142 | send_guest_global_virq(global_virq_handlers[virq] ?: hardware_domain, virq); |
789 | 142 | } |
790 | | |
791 | | int set_global_virq_handler(struct domain *d, uint32_t virq) |
792 | 0 | { |
793 | 0 | struct domain *old; |
794 | 0 |
|
795 | 0 | if (virq >= NR_VIRQS) |
796 | 0 | return -EINVAL; |
797 | 0 | if (!virq_is_global(virq)) |
798 | 0 | return -EINVAL; |
799 | 0 |
|
800 | 0 | if (global_virq_handlers[virq] == d) |
801 | 0 | return 0; |
802 | 0 |
|
803 | 0 | if (unlikely(!get_domain(d))) |
804 | 0 | return -EINVAL; |
805 | 0 |
|
806 | 0 | spin_lock(&global_virq_handlers_lock); |
807 | 0 | old = global_virq_handlers[virq]; |
808 | 0 | global_virq_handlers[virq] = d; |
809 | 0 | spin_unlock(&global_virq_handlers_lock); |
810 | 0 |
|
811 | 0 | if (old != NULL) |
812 | 0 | put_domain(old); |
813 | 0 |
|
814 | 0 | return 0; |
815 | 0 | } |
816 | | |
817 | | static void clear_global_virq_handlers(struct domain *d) |
818 | 0 | { |
819 | 0 | uint32_t virq; |
820 | 0 | int put_count = 0; |
821 | 0 |
|
822 | 0 | spin_lock(&global_virq_handlers_lock); |
823 | 0 |
|
824 | 0 | for (virq = 0; virq < NR_VIRQS; virq++) |
825 | 0 | { |
826 | 0 | if (global_virq_handlers[virq] == d) |
827 | 0 | { |
828 | 0 | global_virq_handlers[virq] = NULL; |
829 | 0 | put_count++; |
830 | 0 | } |
831 | 0 | } |
832 | 0 |
|
833 | 0 | spin_unlock(&global_virq_handlers_lock); |
834 | 0 |
|
835 | 0 | while (put_count) |
836 | 0 | { |
837 | 0 | put_domain(d); |
838 | 0 | put_count--; |
839 | 0 | } |
840 | 0 | } |
841 | | |
842 | | static long evtchn_status(evtchn_status_t *status) |
843 | 0 | { |
844 | 0 | struct domain *d; |
845 | 0 | domid_t dom = status->dom; |
846 | 0 | int port = status->port; |
847 | 0 | struct evtchn *chn; |
848 | 0 | long rc = 0; |
849 | 0 |
|
850 | 0 | d = rcu_lock_domain_by_any_id(dom); |
851 | 0 | if ( d == NULL ) |
852 | 0 | return -ESRCH; |
853 | 0 |
|
854 | 0 | spin_lock(&d->event_lock); |
855 | 0 |
|
856 | 0 | if ( !port_is_valid(d, port) ) |
857 | 0 | { |
858 | 0 | rc = -EINVAL; |
859 | 0 | goto out; |
860 | 0 | } |
861 | 0 |
|
862 | 0 | chn = evtchn_from_port(d, port); |
863 | 0 |
|
864 | 0 | rc = xsm_evtchn_status(XSM_TARGET, d, chn); |
865 | 0 | if ( rc ) |
866 | 0 | goto out; |
867 | 0 |
|
868 | 0 | switch ( chn->state ) |
869 | 0 | { |
870 | 0 | case ECS_FREE: |
871 | 0 | case ECS_RESERVED: |
872 | 0 | status->status = EVTCHNSTAT_closed; |
873 | 0 | break; |
874 | 0 | case ECS_UNBOUND: |
875 | 0 | status->status = EVTCHNSTAT_unbound; |
876 | 0 | status->u.unbound.dom = chn->u.unbound.remote_domid; |
877 | 0 | break; |
878 | 0 | case ECS_INTERDOMAIN: |
879 | 0 | status->status = EVTCHNSTAT_interdomain; |
880 | 0 | status->u.interdomain.dom = |
881 | 0 | chn->u.interdomain.remote_dom->domain_id; |
882 | 0 | status->u.interdomain.port = chn->u.interdomain.remote_port; |
883 | 0 | break; |
884 | 0 | case ECS_PIRQ: |
885 | 0 | status->status = EVTCHNSTAT_pirq; |
886 | 0 | status->u.pirq = chn->u.pirq.irq; |
887 | 0 | break; |
888 | 0 | case ECS_VIRQ: |
889 | 0 | status->status = EVTCHNSTAT_virq; |
890 | 0 | status->u.virq = chn->u.virq; |
891 | 0 | break; |
892 | 0 | case ECS_IPI: |
893 | 0 | status->status = EVTCHNSTAT_ipi; |
894 | 0 | break; |
895 | 0 | default: |
896 | 0 | BUG(); |
897 | 0 | } |
898 | 0 |
|
899 | 0 | status->vcpu = chn->notify_vcpu_id; |
900 | 0 |
|
901 | 0 | out: |
902 | 0 | spin_unlock(&d->event_lock); |
903 | 0 | rcu_unlock_domain(d); |
904 | 0 |
|
905 | 0 | return rc; |
906 | 0 | } |
907 | | |
908 | | |
909 | | long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) |
910 | 2 | { |
911 | 2 | struct domain *d = current->domain; |
912 | 2 | struct evtchn *chn; |
913 | 2 | long rc = 0; |
914 | 2 | |
915 | 2 | if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) ) |
916 | 0 | return -ENOENT; |
917 | 2 | |
918 | 2 | spin_lock(&d->event_lock); |
919 | 2 | |
920 | 2 | if ( !port_is_valid(d, port) ) |
921 | 0 | { |
922 | 0 | rc = -EINVAL; |
923 | 0 | goto out; |
924 | 0 | } |
925 | 2 | |
926 | 2 | chn = evtchn_from_port(d, port); |
927 | 2 | |
928 | 2 | /* Guest cannot re-bind a Xen-attached event channel. */ |
929 | 2 | if ( unlikely(consumer_is_xen(chn)) ) |
930 | 0 | { |
931 | 0 | rc = -EINVAL; |
932 | 0 | goto out; |
933 | 0 | } |
934 | 2 | |
935 | 2 | switch ( chn->state ) |
936 | 2 | { |
937 | 0 | case ECS_VIRQ: |
938 | 0 | if ( virq_is_global(chn->u.virq) ) |
939 | 0 | chn->notify_vcpu_id = vcpu_id; |
940 | 0 | else |
941 | 0 | rc = -EINVAL; |
942 | 0 | break; |
943 | 2 | case ECS_UNBOUND: |
944 | 2 | case ECS_INTERDOMAIN: |
945 | 2 | chn->notify_vcpu_id = vcpu_id; |
946 | 2 | break; |
947 | 0 | case ECS_PIRQ: |
948 | 0 | if ( chn->notify_vcpu_id == vcpu_id ) |
949 | 0 | break; |
950 | 0 | unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]); |
951 | 0 | chn->notify_vcpu_id = vcpu_id; |
952 | 0 | pirq_set_affinity(d, chn->u.pirq.irq, |
953 | 0 | cpumask_of(d->vcpu[vcpu_id]->processor)); |
954 | 0 | link_pirq_port(port, chn, d->vcpu[vcpu_id]); |
955 | 0 | break; |
956 | 0 | default: |
957 | 0 | rc = -EINVAL; |
958 | 0 | break; |
959 | 2 | } |
960 | 2 | |
961 | 2 | out: |
962 | 2 | spin_unlock(&d->event_lock); |
963 | 2 | |
964 | 2 | return rc; |
965 | 2 | } |
966 | | |
967 | | |
968 | | int evtchn_unmask(unsigned int port) |
969 | 451 | { |
970 | 451 | struct domain *d = current->domain; |
971 | 451 | struct evtchn *evtchn; |
972 | 451 | |
973 | 451 | if ( unlikely(!port_is_valid(d, port)) ) |
974 | 0 | return -EINVAL; |
975 | 451 | |
976 | 451 | evtchn = evtchn_from_port(d, port); |
977 | 451 | evtchn_port_unmask(d, evtchn); |
978 | 451 | |
979 | 451 | return 0; |
980 | 451 | } |
981 | | |
982 | | |
983 | | int evtchn_reset(struct domain *d) |
984 | 0 | { |
985 | 0 | unsigned int i; |
986 | 0 |
|
987 | 0 | if ( d != current->domain && !d->controller_pause_count ) |
988 | 0 | return -EINVAL; |
989 | 0 |
|
990 | 0 | for ( i = 0; port_is_valid(d, i); i++ ) |
991 | 0 | evtchn_close(d, i, 1); |
992 | 0 |
|
993 | 0 | spin_lock(&d->event_lock); |
994 | 0 |
|
995 | 0 | if ( d->evtchn_fifo ) |
996 | 0 | { |
997 | 0 | /* Switching back to 2-level ABI. */ |
998 | 0 | evtchn_fifo_destroy(d); |
999 | 0 | evtchn_2l_init(d); |
1000 | 0 | } |
1001 | 0 |
|
1002 | 0 | spin_unlock(&d->event_lock); |
1003 | 0 |
|
1004 | 0 | return 0; |
1005 | 0 | } |
1006 | | |
1007 | | static long evtchn_set_priority(const struct evtchn_set_priority *set_priority) |
1008 | 0 | { |
1009 | 0 | struct domain *d = current->domain; |
1010 | 0 | unsigned int port = set_priority->port; |
1011 | 0 | long ret; |
1012 | 0 |
|
1013 | 0 | spin_lock(&d->event_lock); |
1014 | 0 |
|
1015 | 0 | if ( !port_is_valid(d, port) ) |
1016 | 0 | { |
1017 | 0 | spin_unlock(&d->event_lock); |
1018 | 0 | return -EINVAL; |
1019 | 0 | } |
1020 | 0 |
|
1021 | 0 | ret = evtchn_port_set_priority(d, evtchn_from_port(d, port), |
1022 | 0 | set_priority->priority); |
1023 | 0 |
|
1024 | 0 | spin_unlock(&d->event_lock); |
1025 | 0 |
|
1026 | 0 | return ret; |
1027 | 0 | } |
1028 | | |
1029 | | long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) |
1030 | 94.6k | { |
1031 | 94.6k | long rc; |
1032 | 94.6k | |
1033 | 94.6k | switch ( cmd ) |
1034 | 94.6k | { |
1035 | 1 | case EVTCHNOP_alloc_unbound: { |
1036 | 1 | struct evtchn_alloc_unbound alloc_unbound; |
1037 | 1 | if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 ) |
1038 | 0 | return -EFAULT; |
1039 | 1 | rc = evtchn_alloc_unbound(&alloc_unbound); |
1040 | 1 | if ( !rc && __copy_to_guest(arg, &alloc_unbound, 1) ) |
1041 | 0 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
1042 | 1 | break; |
1043 | 1 | } |
1044 | 1 | |
1045 | 1 | case EVTCHNOP_bind_interdomain: { |
1046 | 1 | struct evtchn_bind_interdomain bind_interdomain; |
1047 | 1 | if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 ) |
1048 | 0 | return -EFAULT; |
1049 | 1 | rc = evtchn_bind_interdomain(&bind_interdomain); |
1050 | 1 | if ( !rc && __copy_to_guest(arg, &bind_interdomain, 1) ) |
1051 | 0 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
1052 | 1 | break; |
1053 | 1 | } |
1054 | 1 | |
1055 | 26 | case EVTCHNOP_bind_virq: { |
1056 | 26 | struct evtchn_bind_virq bind_virq; |
1057 | 26 | if ( copy_from_guest(&bind_virq, arg, 1) != 0 ) |
1058 | 0 | return -EFAULT; |
1059 | 26 | rc = evtchn_bind_virq(&bind_virq); |
1060 | 26 | if ( !rc && __copy_to_guest(arg, &bind_virq, 1) ) |
1061 | 0 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
1062 | 26 | break; |
1063 | 26 | } |
1064 | 26 | |
1065 | 108 | case EVTCHNOP_bind_ipi: { |
1066 | 108 | struct evtchn_bind_ipi bind_ipi; |
1067 | 108 | if ( copy_from_guest(&bind_ipi, arg, 1) != 0 ) |
1068 | 0 | return -EFAULT; |
1069 | 108 | rc = evtchn_bind_ipi(&bind_ipi); |
1070 | 108 | if ( !rc && __copy_to_guest(arg, &bind_ipi, 1) ) |
1071 | 0 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
1072 | 108 | break; |
1073 | 108 | } |
1074 | 108 | |
1075 | 0 | case EVTCHNOP_bind_pirq: { |
1076 | 0 | struct evtchn_bind_pirq bind_pirq; |
1077 | 0 | if ( copy_from_guest(&bind_pirq, arg, 1) != 0 ) |
1078 | 0 | return -EFAULT; |
1079 | 0 | rc = evtchn_bind_pirq(&bind_pirq); |
1080 | 0 | if ( !rc && __copy_to_guest(arg, &bind_pirq, 1) ) |
1081 | 0 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
1082 | 0 | break; |
1083 | 0 | } |
1084 | 0 |
|
1085 | 0 | case EVTCHNOP_close: { |
1086 | 0 | struct evtchn_close close; |
1087 | 0 | if ( copy_from_guest(&close, arg, 1) != 0 ) |
1088 | 0 | return -EFAULT; |
1089 | 0 | rc = evtchn_close(current->domain, close.port, 1); |
1090 | 0 | break; |
1091 | 0 | } |
1092 | 0 |
|
1093 | 94.0k | case EVTCHNOP_send: { |
1094 | 94.0k | struct evtchn_send send; |
1095 | 94.0k | if ( copy_from_guest(&send, arg, 1) != 0 ) |
1096 | 0 | return -EFAULT; |
1097 | 94.0k | rc = evtchn_send(current->domain, send.port); |
1098 | 94.0k | break; |
1099 | 94.0k | } |
1100 | 94.0k | |
1101 | 0 | case EVTCHNOP_status: { |
1102 | 0 | struct evtchn_status status; |
1103 | 0 | if ( copy_from_guest(&status, arg, 1) != 0 ) |
1104 | 0 | return -EFAULT; |
1105 | 0 | rc = evtchn_status(&status); |
1106 | 0 | if ( !rc && __copy_to_guest(arg, &status, 1) ) |
1107 | 0 | rc = -EFAULT; |
1108 | 0 | break; |
1109 | 0 | } |
1110 | 0 |
|
1111 | 2 | case EVTCHNOP_bind_vcpu: { |
1112 | 2 | struct evtchn_bind_vcpu bind_vcpu; |
1113 | 2 | if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 ) |
1114 | 0 | return -EFAULT; |
1115 | 2 | rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu); |
1116 | 2 | break; |
1117 | 2 | } |
1118 | 2 | |
1119 | 451 | case EVTCHNOP_unmask: { |
1120 | 451 | struct evtchn_unmask unmask; |
1121 | 451 | if ( copy_from_guest(&unmask, arg, 1) != 0 ) |
1122 | 0 | return -EFAULT; |
1123 | 451 | rc = evtchn_unmask(unmask.port); |
1124 | 451 | break; |
1125 | 451 | } |
1126 | 451 | |
1127 | 0 | case EVTCHNOP_reset: { |
1128 | 0 | struct evtchn_reset reset; |
1129 | 0 | struct domain *d; |
1130 | 0 |
|
1131 | 0 | if ( copy_from_guest(&reset, arg, 1) != 0 ) |
1132 | 0 | return -EFAULT; |
1133 | 0 |
|
1134 | 0 | d = rcu_lock_domain_by_any_id(reset.dom); |
1135 | 0 | if ( d == NULL ) |
1136 | 0 | return -ESRCH; |
1137 | 0 |
|
1138 | 0 | rc = xsm_evtchn_reset(XSM_TARGET, current->domain, d); |
1139 | 0 | if ( !rc ) |
1140 | 0 | rc = evtchn_reset(d); |
1141 | 0 |
|
1142 | 0 | rcu_unlock_domain(d); |
1143 | 0 | break; |
1144 | 0 | } |
1145 | 0 |
|
1146 | 0 | case EVTCHNOP_init_control: { |
1147 | 0 | struct evtchn_init_control init_control; |
1148 | 0 | if ( copy_from_guest(&init_control, arg, 1) != 0 ) |
1149 | 0 | return -EFAULT; |
1150 | 0 | rc = evtchn_fifo_init_control(&init_control); |
1151 | 0 | if ( !rc && __copy_to_guest(arg, &init_control, 1) ) |
1152 | 0 | rc = -EFAULT; |
1153 | 0 | break; |
1154 | 0 | } |
1155 | 0 |
|
1156 | 0 | case EVTCHNOP_expand_array: { |
1157 | 0 | struct evtchn_expand_array expand_array; |
1158 | 0 | if ( copy_from_guest(&expand_array, arg, 1) != 0 ) |
1159 | 0 | return -EFAULT; |
1160 | 0 | rc = evtchn_fifo_expand_array(&expand_array); |
1161 | 0 | break; |
1162 | 0 | } |
1163 | 0 |
|
1164 | 0 | case EVTCHNOP_set_priority: { |
1165 | 0 | struct evtchn_set_priority set_priority; |
1166 | 0 | if ( copy_from_guest(&set_priority, arg, 1) != 0 ) |
1167 | 0 | return -EFAULT; |
1168 | 0 | rc = evtchn_set_priority(&set_priority); |
1169 | 0 | break; |
1170 | 0 | } |
1171 | 0 |
|
1172 | 0 | default: |
1173 | 0 | rc = -ENOSYS; |
1174 | 0 | break; |
1175 | 94.6k | } |
1176 | 94.6k | |
1177 | 94.6k | return rc; |
1178 | 94.6k | } |
1179 | | |
1180 | | |
1181 | | int alloc_unbound_xen_event_channel( |
1182 | | struct domain *ld, unsigned int lvcpu, domid_t remote_domid, |
1183 | | xen_event_channel_notification_t notification_fn) |
1184 | 0 | { |
1185 | 0 | struct evtchn *chn; |
1186 | 0 | int port, rc; |
1187 | 0 |
|
1188 | 0 | spin_lock(&ld->event_lock); |
1189 | 0 |
|
1190 | 0 | rc = get_free_port(ld); |
1191 | 0 | if ( rc < 0 ) |
1192 | 0 | goto out; |
1193 | 0 | port = rc; |
1194 | 0 | chn = evtchn_from_port(ld, port); |
1195 | 0 |
|
1196 | 0 | rc = xsm_evtchn_unbound(XSM_TARGET, ld, chn, remote_domid); |
1197 | 0 | if ( rc ) |
1198 | 0 | goto out; |
1199 | 0 |
|
1200 | 0 | spin_lock(&chn->lock); |
1201 | 0 |
|
1202 | 0 | chn->state = ECS_UNBOUND; |
1203 | 0 | chn->xen_consumer = get_xen_consumer(notification_fn); |
1204 | 0 | chn->notify_vcpu_id = lvcpu; |
1205 | 0 | chn->u.unbound.remote_domid = remote_domid; |
1206 | 0 |
|
1207 | 0 | spin_unlock(&chn->lock); |
1208 | 0 |
|
1209 | 0 | out: |
1210 | 0 | spin_unlock(&ld->event_lock); |
1211 | 0 |
|
1212 | 0 | return rc < 0 ? rc : port; |
1213 | 0 | } |
1214 | | |
1215 | | void free_xen_event_channel(struct domain *d, int port) |
1216 | 0 | { |
1217 | 0 | BUG_ON(!port_is_valid(d, port)); |
1218 | 0 |
|
1219 | 0 | evtchn_close(d, port, 0); |
1220 | 0 | } |
1221 | | |
1222 | | |
1223 | | void notify_via_xen_event_channel(struct domain *ld, int lport) |
1224 | 0 | { |
1225 | 0 | struct evtchn *lchn, *rchn; |
1226 | 0 | struct domain *rd; |
1227 | 0 |
|
1228 | 0 | ASSERT(port_is_valid(ld, lport)); |
1229 | 0 | lchn = evtchn_from_port(ld, lport); |
1230 | 0 |
|
1231 | 0 | spin_lock(&lchn->lock); |
1232 | 0 |
|
1233 | 0 | if ( likely(lchn->state == ECS_INTERDOMAIN) ) |
1234 | 0 | { |
1235 | 0 | ASSERT(consumer_is_xen(lchn)); |
1236 | 0 | rd = lchn->u.interdomain.remote_dom; |
1237 | 0 | rchn = evtchn_from_port(rd, lchn->u.interdomain.remote_port); |
1238 | 0 | evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn); |
1239 | 0 | } |
1240 | 0 |
|
1241 | 0 | spin_unlock(&lchn->lock); |
1242 | 0 | } |
1243 | | |
1244 | | void evtchn_check_pollers(struct domain *d, unsigned int port) |
1245 | 99.8k | { |
1246 | 99.8k | struct vcpu *v; |
1247 | 99.8k | unsigned int vcpuid; |
1248 | 99.8k | |
1249 | 99.8k | /* Check if some VCPU might be polling for this event. */ |
1250 | 99.8k | if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) ) |
1251 | 99.8k | return; |
1252 | 99.8k | |
1253 | 99.8k | /* Wake any interested (or potentially interested) pollers. */ |
1254 | 18.4E | for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus); |
1255 | 18.4E | vcpuid < d->max_vcpus; |
1256 | 0 | vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) ) |
1257 | 0 | { |
1258 | 0 | v = d->vcpu[vcpuid]; |
1259 | 0 | if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) && |
1260 | 0 | test_and_clear_bit(vcpuid, d->poll_mask) ) |
1261 | 0 | { |
1262 | 0 | v->poll_evtchn = 0; |
1263 | 0 | vcpu_unblock(v); |
1264 | 0 | } |
1265 | 0 | } |
1266 | 18.4E | } |
1267 | | |
1268 | | int evtchn_init(struct domain *d) |
1269 | 1 | { |
1270 | 1 | evtchn_2l_init(d); |
1271 | 1 | d->max_evtchn_port = INT_MAX; |
1272 | 1 | |
1273 | 1 | d->evtchn = alloc_evtchn_bucket(d, 0); |
1274 | 1 | if ( !d->evtchn ) |
1275 | 0 | return -ENOMEM; |
1276 | 1 | d->valid_evtchns = EVTCHNS_PER_BUCKET; |
1277 | 1 | |
1278 | 1 | spin_lock_init_prof(d, event_lock); |
1279 | 1 | if ( get_free_port(d) != 0 ) |
1280 | 0 | { |
1281 | 0 | free_evtchn_bucket(d, d->evtchn); |
1282 | 0 | return -EINVAL; |
1283 | 0 | } |
1284 | 1 | evtchn_from_port(d, 0)->state = ECS_RESERVED; |
1285 | 1 | |
1286 | 1 | #if MAX_VIRT_CPUS > BITS_PER_LONG |
1287 | 1 | d->poll_mask = xzalloc_array(unsigned long, |
1288 | 1 | BITS_TO_LONGS(domain_max_vcpus(d))); |
1289 | 1 | if ( !d->poll_mask ) |
1290 | 0 | { |
1291 | 0 | free_evtchn_bucket(d, d->evtchn); |
1292 | 0 | return -ENOMEM; |
1293 | 0 | } |
1294 | 1 | #endif |
1295 | 1 | |
1296 | 1 | return 0; |
1297 | 1 | } |
1298 | | |
1299 | | |
1300 | | void evtchn_destroy(struct domain *d) |
1301 | 0 | { |
1302 | 0 | unsigned int i; |
1303 | 0 |
|
1304 | 0 | /* After this barrier no new event-channel allocations can occur. */ |
1305 | 0 | BUG_ON(!d->is_dying); |
1306 | 0 | spin_barrier(&d->event_lock); |
1307 | 0 |
|
1308 | 0 | /* Close all existing event channels. */ |
1309 | 0 | for ( i = 0; port_is_valid(d, i); i++ ) |
1310 | 0 | evtchn_close(d, i, 0); |
1311 | 0 |
|
1312 | 0 | clear_global_virq_handlers(d); |
1313 | 0 |
|
1314 | 0 | evtchn_fifo_destroy(d); |
1315 | 0 | } |
1316 | | |
1317 | | |
1318 | | void evtchn_destroy_final(struct domain *d) |
1319 | 0 | { |
1320 | 0 | unsigned int i, j; |
1321 | 0 |
|
1322 | 0 | /* Free all event-channel buckets. */ |
1323 | 0 | for ( i = 0; i < NR_EVTCHN_GROUPS; i++ ) |
1324 | 0 | { |
1325 | 0 | if ( !d->evtchn_group[i] ) |
1326 | 0 | continue; |
1327 | 0 | for ( j = 0; j < BUCKETS_PER_GROUP; j++ ) |
1328 | 0 | free_evtchn_bucket(d, d->evtchn_group[i][j]); |
1329 | 0 | xfree(d->evtchn_group[i]); |
1330 | 0 | } |
1331 | 0 | free_evtchn_bucket(d, d->evtchn); |
1332 | 0 |
|
1333 | 0 | #if MAX_VIRT_CPUS > BITS_PER_LONG |
1334 | 0 | xfree(d->poll_mask); |
1335 | 0 | d->poll_mask = NULL; |
1336 | 0 | #endif |
1337 | 0 | } |
1338 | | |
1339 | | |
1340 | | void evtchn_move_pirqs(struct vcpu *v) |
1341 | 536 | { |
1342 | 536 | struct domain *d = v->domain; |
1343 | 536 | const cpumask_t *mask = cpumask_of(v->processor); |
1344 | 536 | unsigned int port; |
1345 | 536 | struct evtchn *chn; |
1346 | 536 | |
1347 | 536 | spin_lock(&d->event_lock); |
1348 | 536 | for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port ) |
1349 | 0 | { |
1350 | 0 | chn = evtchn_from_port(d, port); |
1351 | 0 | pirq_set_affinity(d, chn->u.pirq.irq, mask); |
1352 | 0 | } |
1353 | 536 | spin_unlock(&d->event_lock); |
1354 | 536 | } |
1355 | | |
1356 | | |
1357 | | static void domain_dump_evtchn_info(struct domain *d) |
1358 | 0 | { |
1359 | 0 | unsigned int port; |
1360 | 0 | int irq; |
1361 | 0 |
|
1362 | 0 | bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch), |
1363 | 0 | d->poll_mask, d->max_vcpus); |
1364 | 0 | printk("Event channel information for domain %d:\n" |
1365 | 0 | "Polling vCPUs: {%s}\n" |
1366 | 0 | " port [p/m/s]\n", d->domain_id, keyhandler_scratch); |
1367 | 0 |
|
1368 | 0 | spin_lock(&d->event_lock); |
1369 | 0 |
|
1370 | 0 | for ( port = 1; port < d->max_evtchns; ++port ) |
1371 | 0 | { |
1372 | 0 | const struct evtchn *chn; |
1373 | 0 | char *ssid; |
1374 | 0 |
|
1375 | 0 | if ( !port_is_valid(d, port) ) |
1376 | 0 | continue; |
1377 | 0 | chn = evtchn_from_port(d, port); |
1378 | 0 | if ( chn->state == ECS_FREE ) |
1379 | 0 | continue; |
1380 | 0 |
|
1381 | 0 | printk(" %4u [%d/%d/", |
1382 | 0 | port, |
1383 | 0 | evtchn_port_is_pending(d, port), |
1384 | 0 | evtchn_port_is_masked(d, port)); |
1385 | 0 | evtchn_port_print_state(d, chn); |
1386 | 0 | printk("]: s=%d n=%d x=%d", |
1387 | 0 | chn->state, chn->notify_vcpu_id, chn->xen_consumer); |
1388 | 0 |
|
1389 | 0 | switch ( chn->state ) |
1390 | 0 | { |
1391 | 0 | case ECS_UNBOUND: |
1392 | 0 | printk(" d=%d", chn->u.unbound.remote_domid); |
1393 | 0 | break; |
1394 | 0 | case ECS_INTERDOMAIN: |
1395 | 0 | printk(" d=%d p=%d", |
1396 | 0 | chn->u.interdomain.remote_dom->domain_id, |
1397 | 0 | chn->u.interdomain.remote_port); |
1398 | 0 | break; |
1399 | 0 | case ECS_PIRQ: |
1400 | 0 | irq = domain_pirq_to_irq(d, chn->u.pirq.irq); |
1401 | 0 | printk(" p=%d i=%d", chn->u.pirq.irq, irq); |
1402 | 0 | break; |
1403 | 0 | case ECS_VIRQ: |
1404 | 0 | printk(" v=%d", chn->u.virq); |
1405 | 0 | break; |
1406 | 0 | } |
1407 | 0 |
|
1408 | 0 | ssid = xsm_show_security_evtchn(d, chn); |
1409 | 0 | if (ssid) { |
1410 | 0 | printk(" Z=%s\n", ssid); |
1411 | 0 | xfree(ssid); |
1412 | 0 | } else { |
1413 | 0 | printk("\n"); |
1414 | 0 | } |
1415 | 0 | } |
1416 | 0 |
|
1417 | 0 | spin_unlock(&d->event_lock); |
1418 | 0 | } |
1419 | | |
1420 | | static void dump_evtchn_info(unsigned char key) |
1421 | 0 | { |
1422 | 0 | struct domain *d; |
1423 | 0 |
|
1424 | 0 | printk("'%c' pressed -> dumping event-channel info\n", key); |
1425 | 0 |
|
1426 | 0 | rcu_read_lock(&domlist_read_lock); |
1427 | 0 |
|
1428 | 0 | for_each_domain ( d ) |
1429 | 0 | domain_dump_evtchn_info(d); |
1430 | 0 |
|
1431 | 0 | rcu_read_unlock(&domlist_read_lock); |
1432 | 0 | } |
1433 | | |
1434 | | static int __init dump_evtchn_info_key_init(void) |
1435 | 1 | { |
1436 | 1 | register_keyhandler('e', dump_evtchn_info, "dump evtchn info", 1); |
1437 | 1 | return 0; |
1438 | 1 | } |
1439 | | __initcall(dump_evtchn_info_key_init); |
1440 | | |
1441 | | /* |
1442 | | * Local variables: |
1443 | | * mode: C |
1444 | | * c-file-style: "BSD" |
1445 | | * c-basic-offset: 4 |
1446 | | * tab-width: 4 |
1447 | | * indent-tabs-mode: nil |
1448 | | * End: |
1449 | | */ |