debuggers.hg

view xen/common/event_channel.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents 899131a8f9d2
children
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <xen/keyhandler.h>
29 #include <asm/current.h>
31 #include <public/xen.h>
32 #include <public/event_channel.h>
33 #include <xsm/xsm.h>
35 #define bucket_from_port(d,p) \
36 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
37 #define port_is_valid(d,p) \
38 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
39 (bucket_from_port(d,p) != NULL))
40 #define evtchn_from_port(d,p) \
41 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
43 #define ERROR_EXIT(_errno) \
44 do { \
45 gdprintk(XENLOG_WARNING, \
46 "EVTCHNOP failure: error %d\n", \
47 (_errno)); \
48 rc = (_errno); \
49 goto out; \
50 } while ( 0 )
51 #define ERROR_EXIT_DOM(_errno, _dom) \
52 do { \
53 gdprintk(XENLOG_WARNING, \
54 "EVTCHNOP failure: domain %d, error %d\n", \
55 (_dom)->domain_id, (_errno)); \
56 rc = (_errno); \
57 goto out; \
58 } while ( 0 )
60 static int evtchn_set_pending(struct vcpu *v, int port);
62 static int virq_is_global(int virq)
63 {
64 int rc;
66 ASSERT((virq >= 0) && (virq < NR_VIRQS));
68 switch ( virq )
69 {
70 case VIRQ_TIMER:
71 case VIRQ_DEBUG:
72 case VIRQ_XENOPROF:
73 rc = 0;
74 break;
75 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
76 rc = arch_virq_is_global(virq);
77 break;
78 default:
79 rc = 1;
80 break;
81 }
83 return rc;
84 }
87 static int get_free_port(struct domain *d)
88 {
89 struct evtchn *chn;
90 int port;
91 int i, j;
93 if ( d->is_dying )
94 return -EINVAL;
96 for ( port = 0; port_is_valid(d, port); port++ )
97 if ( evtchn_from_port(d, port)->state == ECS_FREE )
98 return port;
100 if ( port == MAX_EVTCHNS(d) )
101 return -ENOSPC;
103 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
104 if ( unlikely(chn == NULL) )
105 return -ENOMEM;
106 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
107 bucket_from_port(d, port) = chn;
109 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
110 {
111 if ( xsm_alloc_security_evtchn(&chn[i]) )
112 {
113 for ( j = 0; j < i; j++ )
114 xsm_free_security_evtchn(&chn[j]);
115 xfree(chn);
116 return -ENOMEM;
117 }
118 }
120 return port;
121 }
124 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
125 {
126 struct evtchn *chn;
127 struct domain *d;
128 int port;
129 domid_t dom = alloc->dom;
130 long rc;
132 rc = rcu_lock_target_domain_by_id(dom, &d);
133 if ( rc )
134 return rc;
136 spin_lock(&d->event_lock);
138 if ( (port = get_free_port(d)) < 0 )
139 ERROR_EXIT_DOM(port, d);
140 chn = evtchn_from_port(d, port);
142 rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
143 if ( rc )
144 goto out;
146 chn->state = ECS_UNBOUND;
147 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
148 chn->u.unbound.remote_domid = current->domain->domain_id;
150 alloc->port = port;
152 out:
153 spin_unlock(&d->event_lock);
154 rcu_unlock_domain(d);
156 return rc;
157 }
160 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
161 {
162 struct evtchn *lchn, *rchn;
163 struct domain *ld = current->domain, *rd;
164 int lport, rport = bind->remote_port;
165 domid_t rdom = bind->remote_dom;
166 long rc;
168 if ( rdom == DOMID_SELF )
169 rdom = current->domain->domain_id;
171 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
172 return -ESRCH;
174 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
175 if ( ld < rd )
176 {
177 spin_lock(&ld->event_lock);
178 spin_lock(&rd->event_lock);
179 }
180 else
181 {
182 if ( ld != rd )
183 spin_lock(&rd->event_lock);
184 spin_lock(&ld->event_lock);
185 }
187 if ( (lport = get_free_port(ld)) < 0 )
188 ERROR_EXIT(lport);
189 lchn = evtchn_from_port(ld, lport);
191 if ( !port_is_valid(rd, rport) )
192 ERROR_EXIT_DOM(-EINVAL, rd);
193 rchn = evtchn_from_port(rd, rport);
194 if ( (rchn->state != ECS_UNBOUND) ||
195 (rchn->u.unbound.remote_domid != ld->domain_id) )
196 ERROR_EXIT_DOM(-EINVAL, rd);
198 rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
199 if ( rc )
200 goto out;
202 lchn->u.interdomain.remote_dom = rd;
203 lchn->u.interdomain.remote_port = (u16)rport;
204 lchn->state = ECS_INTERDOMAIN;
206 rchn->u.interdomain.remote_dom = ld;
207 rchn->u.interdomain.remote_port = (u16)lport;
208 rchn->state = ECS_INTERDOMAIN;
210 /*
211 * We may have lost notifications on the remote unbound port. Fix that up
212 * here by conservatively always setting a notification on the local port.
213 */
214 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
216 bind->local_port = lport;
218 out:
219 spin_unlock(&ld->event_lock);
220 if ( ld != rd )
221 spin_unlock(&rd->event_lock);
223 rcu_unlock_domain(rd);
225 return rc;
226 }
229 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
230 {
231 struct evtchn *chn;
232 struct vcpu *v;
233 struct domain *d = current->domain;
234 int port, virq = bind->virq, vcpu = bind->vcpu;
235 long rc = 0;
237 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
238 return -EINVAL;
240 if ( virq_is_global(virq) && (vcpu != 0) )
241 return -EINVAL;
243 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
244 ((v = d->vcpu[vcpu]) == NULL) )
245 return -ENOENT;
247 spin_lock(&d->event_lock);
249 if ( v->virq_to_evtchn[virq] != 0 )
250 ERROR_EXIT(-EEXIST);
252 if ( (port = get_free_port(d)) < 0 )
253 ERROR_EXIT(port);
255 chn = evtchn_from_port(d, port);
256 chn->state = ECS_VIRQ;
257 chn->notify_vcpu_id = vcpu;
258 chn->u.virq = virq;
260 v->virq_to_evtchn[virq] = bind->port = port;
262 out:
263 spin_unlock(&d->event_lock);
265 return rc;
266 }
269 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
270 {
271 struct evtchn *chn;
272 struct domain *d = current->domain;
273 int port, vcpu = bind->vcpu;
274 long rc = 0;
276 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
277 (d->vcpu[vcpu] == NULL) )
278 return -ENOENT;
280 spin_lock(&d->event_lock);
282 if ( (port = get_free_port(d)) < 0 )
283 ERROR_EXIT(port);
285 chn = evtchn_from_port(d, port);
286 chn->state = ECS_IPI;
287 chn->notify_vcpu_id = vcpu;
289 bind->port = port;
291 out:
292 spin_unlock(&d->event_lock);
294 return rc;
295 }
298 static void link_pirq_port(int port, struct evtchn *chn, struct vcpu *v)
299 {
300 chn->u.pirq.prev_port = 0;
301 chn->u.pirq.next_port = v->pirq_evtchn_head;
302 if ( v->pirq_evtchn_head )
303 evtchn_from_port(v->domain, v->pirq_evtchn_head)
304 ->u.pirq.prev_port = port;
305 v->pirq_evtchn_head = port;
306 }
308 static void unlink_pirq_port(struct evtchn *chn, struct vcpu *v)
309 {
310 struct domain *d = v->domain;
312 if ( chn->u.pirq.prev_port )
313 evtchn_from_port(d, chn->u.pirq.prev_port)->u.pirq.next_port =
314 chn->u.pirq.next_port;
315 else
316 v->pirq_evtchn_head = chn->u.pirq.next_port;
317 if ( chn->u.pirq.next_port )
318 evtchn_from_port(d, chn->u.pirq.next_port)->u.pirq.prev_port =
319 chn->u.pirq.prev_port;
320 }
323 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
324 {
325 struct evtchn *chn;
326 struct domain *d = current->domain;
327 struct vcpu *v = d->vcpu[0];
328 int port, pirq = bind->pirq;
329 long rc;
331 if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
332 return -EINVAL;
334 if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) )
335 return -EPERM;
337 spin_lock(&d->event_lock);
339 if ( d->pirq_to_evtchn[pirq] != 0 )
340 ERROR_EXIT(-EEXIST);
342 if ( (port = get_free_port(d)) < 0 )
343 ERROR_EXIT(port);
345 chn = evtchn_from_port(d, port);
347 d->pirq_to_evtchn[pirq] = port;
348 rc = (!is_hvm_domain(d)
349 ? pirq_guest_bind(
350 v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE))
351 : 0);
352 if ( rc != 0 )
353 {
354 d->pirq_to_evtchn[pirq] = 0;
355 goto out;
356 }
358 chn->state = ECS_PIRQ;
359 chn->u.pirq.irq = pirq;
360 link_pirq_port(port, chn, v);
362 bind->port = port;
364 out:
365 spin_unlock(&d->event_lock);
367 return rc;
368 }
371 static long __evtchn_close(struct domain *d1, int port1)
372 {
373 struct domain *d2 = NULL;
374 struct vcpu *v;
375 struct evtchn *chn1, *chn2;
376 int port2;
377 long rc = 0;
379 again:
380 spin_lock(&d1->event_lock);
382 if ( !port_is_valid(d1, port1) )
383 {
384 rc = -EINVAL;
385 goto out;
386 }
388 chn1 = evtchn_from_port(d1, port1);
390 /* Guest cannot close a Xen-attached event channel. */
391 if ( unlikely(chn1->consumer_is_xen) )
392 {
393 rc = -EINVAL;
394 goto out;
395 }
397 switch ( chn1->state )
398 {
399 case ECS_FREE:
400 case ECS_RESERVED:
401 rc = -EINVAL;
402 goto out;
404 case ECS_UNBOUND:
405 break;
407 case ECS_PIRQ:
408 if ( !is_hvm_domain(d1) )
409 pirq_guest_unbind(d1, chn1->u.pirq.irq);
410 d1->pirq_to_evtchn[chn1->u.pirq.irq] = 0;
411 unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
412 break;
414 case ECS_VIRQ:
415 for_each_vcpu ( d1, v )
416 {
417 if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
418 continue;
419 v->virq_to_evtchn[chn1->u.virq] = 0;
420 spin_barrier_irq(&v->virq_lock);
421 }
422 break;
424 case ECS_IPI:
425 break;
427 case ECS_INTERDOMAIN:
428 if ( d2 == NULL )
429 {
430 d2 = chn1->u.interdomain.remote_dom;
432 /* If we unlock d1 then we could lose d2. Must get a reference. */
433 if ( unlikely(!get_domain(d2)) )
434 BUG();
436 if ( d1 < d2 )
437 {
438 spin_lock(&d2->event_lock);
439 }
440 else if ( d1 != d2 )
441 {
442 spin_unlock(&d1->event_lock);
443 spin_lock(&d2->event_lock);
444 goto again;
445 }
446 }
447 else if ( d2 != chn1->u.interdomain.remote_dom )
448 {
449 /*
450 * We can only get here if the port was closed and re-bound after
451 * unlocking d1 but before locking d2 above. We could retry but
452 * it is easier to return the same error as if we had seen the
453 * port in ECS_CLOSED. It must have passed through that state for
454 * us to end up here, so it's a valid error to return.
455 */
456 rc = -EINVAL;
457 goto out;
458 }
460 port2 = chn1->u.interdomain.remote_port;
461 BUG_ON(!port_is_valid(d2, port2));
463 chn2 = evtchn_from_port(d2, port2);
464 BUG_ON(chn2->state != ECS_INTERDOMAIN);
465 BUG_ON(chn2->u.interdomain.remote_dom != d1);
467 chn2->state = ECS_UNBOUND;
468 chn2->u.unbound.remote_domid = d1->domain_id;
469 break;
471 default:
472 BUG();
473 }
475 /* Clear pending event to avoid unexpected behavior on re-bind. */
476 clear_bit(port1, &shared_info(d1, evtchn_pending));
478 /* Reset binding to vcpu0 when the channel is freed. */
479 chn1->state = ECS_FREE;
480 chn1->notify_vcpu_id = 0;
482 xsm_evtchn_close_post(chn1);
484 out:
485 if ( d2 != NULL )
486 {
487 if ( d1 != d2 )
488 spin_unlock(&d2->event_lock);
489 put_domain(d2);
490 }
492 spin_unlock(&d1->event_lock);
494 return rc;
495 }
498 static long evtchn_close(evtchn_close_t *close)
499 {
500 return __evtchn_close(current->domain, close->port);
501 }
503 int evtchn_send(struct domain *d, unsigned int lport)
504 {
505 struct evtchn *lchn, *rchn;
506 struct domain *ld = d, *rd;
507 struct vcpu *rvcpu;
508 int rport, ret = 0;
510 spin_lock(&ld->event_lock);
512 if ( unlikely(!port_is_valid(ld, lport)) )
513 {
514 spin_unlock(&ld->event_lock);
515 return -EINVAL;
516 }
518 lchn = evtchn_from_port(ld, lport);
520 /* Guest cannot send via a Xen-attached event channel. */
521 if ( unlikely(lchn->consumer_is_xen) )
522 {
523 spin_unlock(&ld->event_lock);
524 return -EINVAL;
525 }
527 ret = xsm_evtchn_send(ld, lchn);
528 if ( ret )
529 goto out;
531 switch ( lchn->state )
532 {
533 case ECS_INTERDOMAIN:
534 rd = lchn->u.interdomain.remote_dom;
535 rport = lchn->u.interdomain.remote_port;
536 rchn = evtchn_from_port(rd, rport);
537 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
538 if ( rchn->consumer_is_xen )
539 {
540 /* Xen consumers need notification only if they are blocked. */
541 if ( test_and_clear_bit(_VPF_blocked_in_xen,
542 &rvcpu->pause_flags) )
543 vcpu_wake(rvcpu);
544 }
545 else
546 {
547 evtchn_set_pending(rvcpu, rport);
548 }
549 break;
550 case ECS_IPI:
551 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
552 break;
553 case ECS_UNBOUND:
554 /* silently drop the notification */
555 break;
556 default:
557 ret = -EINVAL;
558 }
560 out:
561 spin_unlock(&ld->event_lock);
563 return ret;
564 }
566 static int evtchn_set_pending(struct vcpu *v, int port)
567 {
568 struct domain *d = v->domain;
569 int vcpuid;
571 /*
572 * The following bit operations must happen in strict order.
573 * NB. On x86, the atomic bit operations also act as memory barriers.
574 * There is therefore sufficiently strict ordering for this architecture --
575 * others may require explicit memory barriers.
576 */
578 if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
579 return 1;
581 if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
582 !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
583 &vcpu_info(v, evtchn_pending_sel)) )
584 {
585 vcpu_mark_events_pending(v);
586 }
588 /* Check if some VCPU might be polling for this event. */
589 if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
590 return 0;
592 /* Wake any interested (or potentially interested) pollers. */
593 for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
594 vcpuid < d->max_vcpus;
595 vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
596 {
597 v = d->vcpu[vcpuid];
598 if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
599 test_and_clear_bit(vcpuid, d->poll_mask) )
600 {
601 v->poll_evtchn = 0;
602 vcpu_unblock(v);
603 }
604 }
606 return 0;
607 }
609 int guest_enabled_event(struct vcpu *v, int virq)
610 {
611 return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
612 }
614 void send_guest_vcpu_virq(struct vcpu *v, int virq)
615 {
616 unsigned long flags;
617 int port;
619 ASSERT(!virq_is_global(virq));
621 spin_lock_irqsave(&v->virq_lock, flags);
623 port = v->virq_to_evtchn[virq];
624 if ( unlikely(port == 0) )
625 goto out;
627 evtchn_set_pending(v, port);
629 out:
630 spin_unlock_irqrestore(&v->virq_lock, flags);
631 }
633 void send_guest_global_virq(struct domain *d, int virq)
634 {
635 unsigned long flags;
636 int port;
637 struct vcpu *v;
638 struct evtchn *chn;
640 ASSERT(virq_is_global(virq));
642 if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
643 return;
645 v = d->vcpu[0];
646 if ( unlikely(v == NULL) )
647 return;
649 spin_lock_irqsave(&v->virq_lock, flags);
651 port = v->virq_to_evtchn[virq];
652 if ( unlikely(port == 0) )
653 goto out;
655 chn = evtchn_from_port(d, port);
656 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
658 out:
659 spin_unlock_irqrestore(&v->virq_lock, flags);
660 }
662 int send_guest_pirq(struct domain *d, int pirq)
663 {
664 int port = d->pirq_to_evtchn[pirq];
665 struct evtchn *chn;
667 /*
668 * PV guests: It should not be possible to race with __evtchn_close(). The
669 * caller of this function must synchronise with pirq_guest_unbind().
670 * HVM guests: Port is legitimately zero when the guest disables the
671 * emulated interrupt/evtchn.
672 */
673 if ( port == 0 )
674 {
675 BUG_ON(!is_hvm_domain(d));
676 return 0;
677 }
679 chn = evtchn_from_port(d, port);
680 return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
681 }
684 static long evtchn_status(evtchn_status_t *status)
685 {
686 struct domain *d;
687 domid_t dom = status->dom;
688 int port = status->port;
689 struct evtchn *chn;
690 long rc = 0;
692 rc = rcu_lock_target_domain_by_id(dom, &d);
693 if ( rc )
694 return rc;
696 spin_lock(&d->event_lock);
698 if ( !port_is_valid(d, port) )
699 {
700 rc = -EINVAL;
701 goto out;
702 }
704 chn = evtchn_from_port(d, port);
706 rc = xsm_evtchn_status(d, chn);
707 if ( rc )
708 goto out;
710 switch ( chn->state )
711 {
712 case ECS_FREE:
713 case ECS_RESERVED:
714 status->status = EVTCHNSTAT_closed;
715 break;
716 case ECS_UNBOUND:
717 status->status = EVTCHNSTAT_unbound;
718 status->u.unbound.dom = chn->u.unbound.remote_domid;
719 break;
720 case ECS_INTERDOMAIN:
721 status->status = EVTCHNSTAT_interdomain;
722 status->u.interdomain.dom =
723 chn->u.interdomain.remote_dom->domain_id;
724 status->u.interdomain.port = chn->u.interdomain.remote_port;
725 break;
726 case ECS_PIRQ:
727 status->status = EVTCHNSTAT_pirq;
728 status->u.pirq = chn->u.pirq.irq;
729 break;
730 case ECS_VIRQ:
731 status->status = EVTCHNSTAT_virq;
732 status->u.virq = chn->u.virq;
733 break;
734 case ECS_IPI:
735 status->status = EVTCHNSTAT_ipi;
736 break;
737 default:
738 BUG();
739 }
741 status->vcpu = chn->notify_vcpu_id;
743 out:
744 spin_unlock(&d->event_lock);
745 rcu_unlock_domain(d);
747 return rc;
748 }
751 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
752 {
753 struct domain *d = current->domain;
754 struct evtchn *chn;
755 long rc = 0;
757 if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
758 return -ENOENT;
760 spin_lock(&d->event_lock);
762 if ( !port_is_valid(d, port) )
763 {
764 rc = -EINVAL;
765 goto out;
766 }
768 chn = evtchn_from_port(d, port);
770 /* Guest cannot re-bind a Xen-attached event channel. */
771 if ( unlikely(chn->consumer_is_xen) )
772 {
773 rc = -EINVAL;
774 goto out;
775 }
777 switch ( chn->state )
778 {
779 case ECS_VIRQ:
780 if ( virq_is_global(chn->u.virq) )
781 chn->notify_vcpu_id = vcpu_id;
782 else
783 rc = -EINVAL;
784 break;
785 case ECS_UNBOUND:
786 case ECS_INTERDOMAIN:
787 chn->notify_vcpu_id = vcpu_id;
788 break;
789 case ECS_PIRQ:
790 if ( chn->notify_vcpu_id == vcpu_id )
791 break;
792 unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
793 chn->notify_vcpu_id = vcpu_id;
794 pirq_set_affinity(d, chn->u.pirq.irq,
795 cpumask_of(d->vcpu[vcpu_id]->processor));
796 link_pirq_port(port, chn, d->vcpu[vcpu_id]);
797 break;
798 default:
799 rc = -EINVAL;
800 break;
801 }
803 out:
804 spin_unlock(&d->event_lock);
806 return rc;
807 }
810 int evtchn_unmask(unsigned int port)
811 {
812 struct domain *d = current->domain;
813 struct vcpu *v;
815 spin_lock(&d->event_lock);
817 if ( unlikely(!port_is_valid(d, port)) )
818 {
819 spin_unlock(&d->event_lock);
820 return -EINVAL;
821 }
823 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
825 /*
826 * These operations must happen in strict order. Based on
827 * include/xen/event.h:evtchn_set_pending().
828 */
829 if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
830 test_bit (port, &shared_info(d, evtchn_pending)) &&
831 !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
832 &vcpu_info(v, evtchn_pending_sel)) )
833 {
834 vcpu_mark_events_pending(v);
835 }
837 spin_unlock(&d->event_lock);
839 return 0;
840 }
843 static long evtchn_reset(evtchn_reset_t *r)
844 {
845 domid_t dom = r->dom;
846 struct domain *d;
847 int i, rc;
849 rc = rcu_lock_target_domain_by_id(dom, &d);
850 if ( rc )
851 return rc;
853 rc = xsm_evtchn_reset(current->domain, d);
854 if ( rc )
855 goto out;
857 for ( i = 0; port_is_valid(d, i); i++ )
858 (void)__evtchn_close(d, i);
860 rc = 0;
862 out:
863 rcu_unlock_domain(d);
865 return rc;
866 }
869 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
870 {
871 long rc;
873 switch ( cmd )
874 {
875 case EVTCHNOP_alloc_unbound: {
876 struct evtchn_alloc_unbound alloc_unbound;
877 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
878 return -EFAULT;
879 rc = evtchn_alloc_unbound(&alloc_unbound);
880 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
881 rc = -EFAULT; /* Cleaning up here would be a mess! */
882 break;
883 }
885 case EVTCHNOP_bind_interdomain: {
886 struct evtchn_bind_interdomain bind_interdomain;
887 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
888 return -EFAULT;
889 rc = evtchn_bind_interdomain(&bind_interdomain);
890 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
891 rc = -EFAULT; /* Cleaning up here would be a mess! */
892 break;
893 }
895 case EVTCHNOP_bind_virq: {
896 struct evtchn_bind_virq bind_virq;
897 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
898 return -EFAULT;
899 rc = evtchn_bind_virq(&bind_virq);
900 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
901 rc = -EFAULT; /* Cleaning up here would be a mess! */
902 break;
903 }
905 case EVTCHNOP_bind_ipi: {
906 struct evtchn_bind_ipi bind_ipi;
907 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
908 return -EFAULT;
909 rc = evtchn_bind_ipi(&bind_ipi);
910 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
911 rc = -EFAULT; /* Cleaning up here would be a mess! */
912 break;
913 }
915 case EVTCHNOP_bind_pirq: {
916 struct evtchn_bind_pirq bind_pirq;
917 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
918 return -EFAULT;
919 rc = evtchn_bind_pirq(&bind_pirq);
920 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
921 rc = -EFAULT; /* Cleaning up here would be a mess! */
922 break;
923 }
925 case EVTCHNOP_close: {
926 struct evtchn_close close;
927 if ( copy_from_guest(&close, arg, 1) != 0 )
928 return -EFAULT;
929 rc = evtchn_close(&close);
930 break;
931 }
933 case EVTCHNOP_send: {
934 struct evtchn_send send;
935 if ( copy_from_guest(&send, arg, 1) != 0 )
936 return -EFAULT;
937 rc = evtchn_send(current->domain, send.port);
938 break;
939 }
941 case EVTCHNOP_status: {
942 struct evtchn_status status;
943 if ( copy_from_guest(&status, arg, 1) != 0 )
944 return -EFAULT;
945 rc = evtchn_status(&status);
946 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
947 rc = -EFAULT;
948 break;
949 }
951 case EVTCHNOP_bind_vcpu: {
952 struct evtchn_bind_vcpu bind_vcpu;
953 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
954 return -EFAULT;
955 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
956 break;
957 }
959 case EVTCHNOP_unmask: {
960 struct evtchn_unmask unmask;
961 if ( copy_from_guest(&unmask, arg, 1) != 0 )
962 return -EFAULT;
963 rc = evtchn_unmask(unmask.port);
964 break;
965 }
967 case EVTCHNOP_reset: {
968 struct evtchn_reset reset;
969 if ( copy_from_guest(&reset, arg, 1) != 0 )
970 return -EFAULT;
971 rc = evtchn_reset(&reset);
972 break;
973 }
975 default:
976 rc = -ENOSYS;
977 break;
978 }
980 return rc;
981 }
984 int alloc_unbound_xen_event_channel(
985 struct vcpu *local_vcpu, domid_t remote_domid)
986 {
987 struct evtchn *chn;
988 struct domain *d = local_vcpu->domain;
989 int port;
991 spin_lock(&d->event_lock);
993 if ( (port = get_free_port(d)) < 0 )
994 goto out;
995 chn = evtchn_from_port(d, port);
997 chn->state = ECS_UNBOUND;
998 chn->consumer_is_xen = 1;
999 chn->notify_vcpu_id = local_vcpu->vcpu_id;
1000 chn->u.unbound.remote_domid = remote_domid;
1002 out:
1003 spin_unlock(&d->event_lock);
1005 return port;
1009 void free_xen_event_channel(
1010 struct vcpu *local_vcpu, int port)
1012 struct evtchn *chn;
1013 struct domain *d = local_vcpu->domain;
1015 spin_lock(&d->event_lock);
1017 if ( unlikely(d->is_dying) )
1019 spin_unlock(&d->event_lock);
1020 return;
1023 BUG_ON(!port_is_valid(d, port));
1024 chn = evtchn_from_port(d, port);
1025 BUG_ON(!chn->consumer_is_xen);
1026 chn->consumer_is_xen = 0;
1028 spin_unlock(&d->event_lock);
1030 (void)__evtchn_close(d, port);
1034 void notify_via_xen_event_channel(struct domain *ld, int lport)
1036 struct evtchn *lchn, *rchn;
1037 struct domain *rd;
1038 int rport;
1040 spin_lock(&ld->event_lock);
1042 if ( unlikely(ld->is_dying) )
1044 spin_unlock(&ld->event_lock);
1045 return;
1048 ASSERT(port_is_valid(ld, lport));
1049 lchn = evtchn_from_port(ld, lport);
1050 ASSERT(lchn->consumer_is_xen);
1052 if ( likely(lchn->state == ECS_INTERDOMAIN) )
1054 rd = lchn->u.interdomain.remote_dom;
1055 rport = lchn->u.interdomain.remote_port;
1056 rchn = evtchn_from_port(rd, rport);
1057 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
1060 spin_unlock(&ld->event_lock);
1064 int evtchn_init(struct domain *d)
1066 spin_lock_init(&d->event_lock);
1067 if ( get_free_port(d) != 0 )
1068 return -EINVAL;
1069 evtchn_from_port(d, 0)->state = ECS_RESERVED;
1071 #if MAX_VIRT_CPUS > BITS_PER_LONG
1072 d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
1073 if ( !d->poll_mask )
1074 return -ENOMEM;
1075 bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
1076 #endif
1078 return 0;
1082 void evtchn_destroy(struct domain *d)
1084 int i;
1086 /* After this barrier no new event-channel allocations can occur. */
1087 BUG_ON(!d->is_dying);
1088 spin_barrier(&d->event_lock);
1090 /* Close all existing event channels. */
1091 for ( i = 0; port_is_valid(d, i); i++ )
1093 evtchn_from_port(d, i)->consumer_is_xen = 0;
1094 (void)__evtchn_close(d, i);
1097 /* Free all event-channel buckets. */
1098 spin_lock(&d->event_lock);
1099 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
1101 xsm_free_security_evtchn(d->evtchn[i]);
1102 xfree(d->evtchn[i]);
1103 d->evtchn[i] = NULL;
1105 spin_unlock(&d->event_lock);
1109 void evtchn_destroy_final(struct domain *d)
1111 #if MAX_VIRT_CPUS > BITS_PER_LONG
1112 xfree(d->poll_mask);
1113 d->poll_mask = NULL;
1114 #endif
1118 void evtchn_move_pirqs(struct vcpu *v)
1120 struct domain *d = v->domain;
1121 const cpumask_t *mask = cpumask_of(v->processor);
1122 unsigned int port;
1123 struct evtchn *chn;
1125 spin_lock(&d->event_lock);
1126 for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port )
1128 chn = evtchn_from_port(d, port);
1129 pirq_set_affinity(d, chn->u.pirq.irq, mask);
1131 spin_unlock(&d->event_lock);
1135 static void domain_dump_evtchn_info(struct domain *d)
1137 unsigned int port;
1139 bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
1140 d->poll_mask, d->max_vcpus);
1141 printk("Event channel information for domain %d:\n"
1142 "Polling vCPUs: {%s}\n"
1143 " port [p/m]\n", d->domain_id, keyhandler_scratch);
1145 spin_lock(&d->event_lock);
1147 for ( port = 1; port < MAX_EVTCHNS(d); ++port )
1149 const struct evtchn *chn;
1151 if ( !port_is_valid(d, port) )
1152 continue;
1153 chn = evtchn_from_port(d, port);
1154 if ( chn->state == ECS_FREE )
1155 continue;
1157 printk(" %4u [%d/%d]: s=%d n=%d",
1158 port,
1159 !!test_bit(port, &shared_info(d, evtchn_pending)),
1160 !!test_bit(port, &shared_info(d, evtchn_mask)),
1161 chn->state, chn->notify_vcpu_id);
1162 switch ( chn->state )
1164 case ECS_UNBOUND:
1165 printk(" d=%d", chn->u.unbound.remote_domid);
1166 break;
1167 case ECS_INTERDOMAIN:
1168 printk(" d=%d p=%d",
1169 chn->u.interdomain.remote_dom->domain_id,
1170 chn->u.interdomain.remote_port);
1171 break;
1172 case ECS_PIRQ:
1173 printk(" p=%d", chn->u.pirq.irq);
1174 break;
1175 case ECS_VIRQ:
1176 printk(" v=%d", chn->u.virq);
1177 break;
1179 printk(" x=%d\n", chn->consumer_is_xen);
1182 spin_unlock(&d->event_lock);
1185 static void dump_evtchn_info(unsigned char key)
1187 struct domain *d;
1189 printk("'%c' pressed -> dumping event-channel info\n", key);
1191 rcu_read_lock(&domlist_read_lock);
1193 for_each_domain ( d )
1194 domain_dump_evtchn_info(d);
1196 rcu_read_unlock(&domlist_read_lock);
1199 static struct keyhandler dump_evtchn_info_keyhandler = {
1200 .diagnostic = 1,
1201 .u.fn = dump_evtchn_info,
1202 .desc = "dump evtchn info"
1203 };
1205 static int __init dump_evtchn_info_key_init(void)
1207 register_keyhandler('e', &dump_evtchn_info_keyhandler);
1208 return 0;
1210 __initcall(dump_evtchn_info_key_init);
1212 /*
1213 * Local variables:
1214 * mode: C
1215 * c-set-style: "BSD"
1216 * c-basic-offset: 4
1217 * tab-width: 4
1218 * indent-tabs-mode: nil
1219 * End:
1220 */