debuggers.hg

view xen/common/event_channel.c @ 20976:0e8557c6a47a

Dump full vCPU polling mask from 'e' key handler

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 11 21:14:12 2010 +0000 (2010-02-11)
parents f03bb5277f04
children 936f4c08eaef
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <xen/keyhandler.h>
29 #include <asm/current.h>
31 #include <public/xen.h>
32 #include <public/event_channel.h>
33 #include <xsm/xsm.h>
35 #define bucket_from_port(d,p) \
36 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
37 #define port_is_valid(d,p) \
38 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
39 (bucket_from_port(d,p) != NULL))
40 #define evtchn_from_port(d,p) \
41 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
43 #define ERROR_EXIT(_errno) \
44 do { \
45 gdprintk(XENLOG_WARNING, \
46 "EVTCHNOP failure: error %d\n", \
47 (_errno)); \
48 rc = (_errno); \
49 goto out; \
50 } while ( 0 )
51 #define ERROR_EXIT_DOM(_errno, _dom) \
52 do { \
53 gdprintk(XENLOG_WARNING, \
54 "EVTCHNOP failure: domain %d, error %d\n", \
55 (_dom)->domain_id, (_errno)); \
56 rc = (_errno); \
57 goto out; \
58 } while ( 0 )
60 static int evtchn_set_pending(struct vcpu *v, int port);
62 static int virq_is_global(int virq)
63 {
64 int rc;
66 ASSERT((virq >= 0) && (virq < NR_VIRQS));
68 switch ( virq )
69 {
70 case VIRQ_TIMER:
71 case VIRQ_DEBUG:
72 case VIRQ_XENOPROF:
73 rc = 0;
74 break;
75 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
76 rc = arch_virq_is_global(virq);
77 break;
78 default:
79 rc = 1;
80 break;
81 }
83 return rc;
84 }
87 static int get_free_port(struct domain *d)
88 {
89 struct evtchn *chn;
90 int port;
91 int i, j;
93 if ( d->is_dying )
94 return -EINVAL;
96 for ( port = 0; port_is_valid(d, port); port++ )
97 if ( evtchn_from_port(d, port)->state == ECS_FREE )
98 return port;
100 if ( port == MAX_EVTCHNS(d) )
101 return -ENOSPC;
103 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
104 if ( unlikely(chn == NULL) )
105 return -ENOMEM;
106 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
107 bucket_from_port(d, port) = chn;
109 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
110 {
111 if ( xsm_alloc_security_evtchn(&chn[i]) )
112 {
113 for ( j = 0; j < i; j++ )
114 xsm_free_security_evtchn(&chn[j]);
115 xfree(chn);
116 return -ENOMEM;
117 }
118 }
120 return port;
121 }
124 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
125 {
126 struct evtchn *chn;
127 struct domain *d;
128 int port;
129 domid_t dom = alloc->dom;
130 long rc;
132 rc = rcu_lock_target_domain_by_id(dom, &d);
133 if ( rc )
134 return rc;
136 spin_lock(&d->event_lock);
138 if ( (port = get_free_port(d)) < 0 )
139 ERROR_EXIT_DOM(port, d);
140 chn = evtchn_from_port(d, port);
142 rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
143 if ( rc )
144 goto out;
146 chn->state = ECS_UNBOUND;
147 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
148 chn->u.unbound.remote_domid = current->domain->domain_id;
150 alloc->port = port;
152 out:
153 spin_unlock(&d->event_lock);
154 rcu_unlock_domain(d);
156 return rc;
157 }
160 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
161 {
162 struct evtchn *lchn, *rchn;
163 struct domain *ld = current->domain, *rd;
164 int lport, rport = bind->remote_port;
165 domid_t rdom = bind->remote_dom;
166 long rc;
168 if ( rdom == DOMID_SELF )
169 rdom = current->domain->domain_id;
171 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
172 return -ESRCH;
174 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
175 if ( ld < rd )
176 {
177 spin_lock(&ld->event_lock);
178 spin_lock(&rd->event_lock);
179 }
180 else
181 {
182 if ( ld != rd )
183 spin_lock(&rd->event_lock);
184 spin_lock(&ld->event_lock);
185 }
187 if ( (lport = get_free_port(ld)) < 0 )
188 ERROR_EXIT(lport);
189 lchn = evtchn_from_port(ld, lport);
191 if ( !port_is_valid(rd, rport) )
192 ERROR_EXIT_DOM(-EINVAL, rd);
193 rchn = evtchn_from_port(rd, rport);
194 if ( (rchn->state != ECS_UNBOUND) ||
195 (rchn->u.unbound.remote_domid != ld->domain_id) )
196 ERROR_EXIT_DOM(-EINVAL, rd);
198 rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
199 if ( rc )
200 goto out;
202 lchn->u.interdomain.remote_dom = rd;
203 lchn->u.interdomain.remote_port = (u16)rport;
204 lchn->state = ECS_INTERDOMAIN;
206 rchn->u.interdomain.remote_dom = ld;
207 rchn->u.interdomain.remote_port = (u16)lport;
208 rchn->state = ECS_INTERDOMAIN;
210 /*
211 * We may have lost notifications on the remote unbound port. Fix that up
212 * here by conservatively always setting a notification on the local port.
213 */
214 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
216 bind->local_port = lport;
218 out:
219 spin_unlock(&ld->event_lock);
220 if ( ld != rd )
221 spin_unlock(&rd->event_lock);
223 rcu_unlock_domain(rd);
225 return rc;
226 }
229 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
230 {
231 struct evtchn *chn;
232 struct vcpu *v;
233 struct domain *d = current->domain;
234 int port, virq = bind->virq, vcpu = bind->vcpu;
235 long rc = 0;
237 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
238 return -EINVAL;
240 if ( virq_is_global(virq) && (vcpu != 0) )
241 return -EINVAL;
243 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
244 ((v = d->vcpu[vcpu]) == NULL) )
245 return -ENOENT;
247 spin_lock(&d->event_lock);
249 if ( v->virq_to_evtchn[virq] != 0 )
250 ERROR_EXIT(-EEXIST);
252 if ( (port = get_free_port(d)) < 0 )
253 ERROR_EXIT(port);
255 chn = evtchn_from_port(d, port);
256 chn->state = ECS_VIRQ;
257 chn->notify_vcpu_id = vcpu;
258 chn->u.virq = virq;
260 v->virq_to_evtchn[virq] = bind->port = port;
262 out:
263 spin_unlock(&d->event_lock);
265 return rc;
266 }
269 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
270 {
271 struct evtchn *chn;
272 struct domain *d = current->domain;
273 int port, vcpu = bind->vcpu;
274 long rc = 0;
276 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
277 (d->vcpu[vcpu] == NULL) )
278 return -ENOENT;
280 spin_lock(&d->event_lock);
282 if ( (port = get_free_port(d)) < 0 )
283 ERROR_EXIT(port);
285 chn = evtchn_from_port(d, port);
286 chn->state = ECS_IPI;
287 chn->notify_vcpu_id = vcpu;
289 bind->port = port;
291 out:
292 spin_unlock(&d->event_lock);
294 return rc;
295 }
298 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
299 {
300 struct evtchn *chn;
301 struct domain *d = current->domain;
302 int port, pirq = bind->pirq;
303 long rc;
305 if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
306 return -EINVAL;
308 if ( !irq_access_permitted(d, pirq) )
309 return -EPERM;
311 spin_lock(&d->event_lock);
313 if ( d->pirq_to_evtchn[pirq] != 0 )
314 ERROR_EXIT(-EEXIST);
316 if ( (port = get_free_port(d)) < 0 )
317 ERROR_EXIT(port);
319 chn = evtchn_from_port(d, port);
321 d->pirq_to_evtchn[pirq] = port;
322 rc = pirq_guest_bind(d->vcpu[0], pirq,
323 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
324 if ( rc != 0 )
325 {
326 d->pirq_to_evtchn[pirq] = 0;
327 goto out;
328 }
330 chn->state = ECS_PIRQ;
331 chn->u.pirq = pirq;
333 bind->port = port;
335 out:
336 spin_unlock(&d->event_lock);
338 return rc;
339 }
342 static long __evtchn_close(struct domain *d1, int port1)
343 {
344 struct domain *d2 = NULL;
345 struct vcpu *v;
346 struct evtchn *chn1, *chn2;
347 int port2;
348 long rc = 0;
350 again:
351 spin_lock(&d1->event_lock);
353 if ( !port_is_valid(d1, port1) )
354 {
355 rc = -EINVAL;
356 goto out;
357 }
359 chn1 = evtchn_from_port(d1, port1);
361 /* Guest cannot close a Xen-attached event channel. */
362 if ( unlikely(chn1->consumer_is_xen) )
363 {
364 rc = -EINVAL;
365 goto out;
366 }
368 switch ( chn1->state )
369 {
370 case ECS_FREE:
371 case ECS_RESERVED:
372 rc = -EINVAL;
373 goto out;
375 case ECS_UNBOUND:
376 break;
378 case ECS_PIRQ:
379 pirq_guest_unbind(d1, chn1->u.pirq);
380 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
381 break;
383 case ECS_VIRQ:
384 for_each_vcpu ( d1, v )
385 {
386 if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
387 continue;
388 v->virq_to_evtchn[chn1->u.virq] = 0;
389 spin_barrier_irq(&v->virq_lock);
390 }
391 break;
393 case ECS_IPI:
394 break;
396 case ECS_INTERDOMAIN:
397 if ( d2 == NULL )
398 {
399 d2 = chn1->u.interdomain.remote_dom;
401 /* If we unlock d1 then we could lose d2. Must get a reference. */
402 if ( unlikely(!get_domain(d2)) )
403 BUG();
405 if ( d1 < d2 )
406 {
407 spin_lock(&d2->event_lock);
408 }
409 else if ( d1 != d2 )
410 {
411 spin_unlock(&d1->event_lock);
412 spin_lock(&d2->event_lock);
413 goto again;
414 }
415 }
416 else if ( d2 != chn1->u.interdomain.remote_dom )
417 {
418 /*
419 * We can only get here if the port was closed and re-bound after
420 * unlocking d1 but before locking d2 above. We could retry but
421 * it is easier to return the same error as if we had seen the
422 * port in ECS_CLOSED. It must have passed through that state for
423 * us to end up here, so it's a valid error to return.
424 */
425 rc = -EINVAL;
426 goto out;
427 }
429 port2 = chn1->u.interdomain.remote_port;
430 BUG_ON(!port_is_valid(d2, port2));
432 chn2 = evtchn_from_port(d2, port2);
433 BUG_ON(chn2->state != ECS_INTERDOMAIN);
434 BUG_ON(chn2->u.interdomain.remote_dom != d1);
436 chn2->state = ECS_UNBOUND;
437 chn2->u.unbound.remote_domid = d1->domain_id;
438 break;
440 default:
441 BUG();
442 }
444 /* Clear pending event to avoid unexpected behavior on re-bind. */
445 clear_bit(port1, &shared_info(d1, evtchn_pending));
447 /* Reset binding to vcpu0 when the channel is freed. */
448 chn1->state = ECS_FREE;
449 chn1->notify_vcpu_id = 0;
451 xsm_evtchn_close_post(chn1);
453 out:
454 if ( d2 != NULL )
455 {
456 if ( d1 != d2 )
457 spin_unlock(&d2->event_lock);
458 put_domain(d2);
459 }
461 spin_unlock(&d1->event_lock);
463 return rc;
464 }
467 static long evtchn_close(evtchn_close_t *close)
468 {
469 return __evtchn_close(current->domain, close->port);
470 }
472 int evtchn_send(struct domain *d, unsigned int lport)
473 {
474 struct evtchn *lchn, *rchn;
475 struct domain *ld = d, *rd;
476 struct vcpu *rvcpu;
477 int rport, ret = 0;
479 spin_lock(&ld->event_lock);
481 if ( unlikely(!port_is_valid(ld, lport)) )
482 {
483 spin_unlock(&ld->event_lock);
484 return -EINVAL;
485 }
487 lchn = evtchn_from_port(ld, lport);
489 /* Guest cannot send via a Xen-attached event channel. */
490 if ( unlikely(lchn->consumer_is_xen) )
491 {
492 spin_unlock(&ld->event_lock);
493 return -EINVAL;
494 }
496 ret = xsm_evtchn_send(ld, lchn);
497 if ( ret )
498 goto out;
500 switch ( lchn->state )
501 {
502 case ECS_INTERDOMAIN:
503 rd = lchn->u.interdomain.remote_dom;
504 rport = lchn->u.interdomain.remote_port;
505 rchn = evtchn_from_port(rd, rport);
506 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
507 if ( rchn->consumer_is_xen )
508 {
509 /* Xen consumers need notification only if they are blocked. */
510 if ( test_and_clear_bit(_VPF_blocked_in_xen,
511 &rvcpu->pause_flags) )
512 vcpu_wake(rvcpu);
513 }
514 else
515 {
516 evtchn_set_pending(rvcpu, rport);
517 }
518 break;
519 case ECS_IPI:
520 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
521 break;
522 case ECS_UNBOUND:
523 /* silently drop the notification */
524 break;
525 default:
526 ret = -EINVAL;
527 }
529 out:
530 spin_unlock(&ld->event_lock);
532 return ret;
533 }
535 static int evtchn_set_pending(struct vcpu *v, int port)
536 {
537 struct domain *d = v->domain;
538 int vcpuid;
540 /*
541 * The following bit operations must happen in strict order.
542 * NB. On x86, the atomic bit operations also act as memory barriers.
543 * There is therefore sufficiently strict ordering for this architecture --
544 * others may require explicit memory barriers.
545 */
547 if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
548 return 1;
550 if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
551 !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
552 &vcpu_info(v, evtchn_pending_sel)) )
553 {
554 vcpu_mark_events_pending(v);
555 }
557 /* Check if some VCPU might be polling for this event. */
558 if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
559 return 0;
561 /* Wake any interested (or potentially interested) pollers. */
562 for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
563 vcpuid < d->max_vcpus;
564 vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
565 {
566 v = d->vcpu[vcpuid];
567 if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
568 test_and_clear_bit(vcpuid, d->poll_mask) )
569 {
570 v->poll_evtchn = 0;
571 vcpu_unblock(v);
572 }
573 }
575 return 0;
576 }
578 int guest_enabled_event(struct vcpu *v, int virq)
579 {
580 return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
581 }
583 void send_guest_vcpu_virq(struct vcpu *v, int virq)
584 {
585 unsigned long flags;
586 int port;
588 ASSERT(!virq_is_global(virq));
590 spin_lock_irqsave(&v->virq_lock, flags);
592 port = v->virq_to_evtchn[virq];
593 if ( unlikely(port == 0) )
594 goto out;
596 evtchn_set_pending(v, port);
598 out:
599 spin_unlock_irqrestore(&v->virq_lock, flags);
600 }
602 void send_guest_global_virq(struct domain *d, int virq)
603 {
604 unsigned long flags;
605 int port;
606 struct vcpu *v;
607 struct evtchn *chn;
609 ASSERT(virq_is_global(virq));
611 if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
612 return;
614 v = d->vcpu[0];
615 if ( unlikely(v == NULL) )
616 return;
618 spin_lock_irqsave(&v->virq_lock, flags);
620 port = v->virq_to_evtchn[virq];
621 if ( unlikely(port == 0) )
622 goto out;
624 chn = evtchn_from_port(d, port);
625 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
627 out:
628 spin_unlock_irqrestore(&v->virq_lock, flags);
629 }
631 int send_guest_pirq(struct domain *d, int pirq)
632 {
633 int port = d->pirq_to_evtchn[pirq];
634 struct evtchn *chn;
636 /*
637 * It should not be possible to race with __evtchn_close():
638 * The caller of this function must synchronise with pirq_guest_unbind().
639 */
640 ASSERT(port != 0);
642 chn = evtchn_from_port(d, port);
643 return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
644 }
647 static long evtchn_status(evtchn_status_t *status)
648 {
649 struct domain *d;
650 domid_t dom = status->dom;
651 int port = status->port;
652 struct evtchn *chn;
653 long rc = 0;
655 rc = rcu_lock_target_domain_by_id(dom, &d);
656 if ( rc )
657 return rc;
659 spin_lock(&d->event_lock);
661 if ( !port_is_valid(d, port) )
662 {
663 rc = -EINVAL;
664 goto out;
665 }
667 chn = evtchn_from_port(d, port);
669 rc = xsm_evtchn_status(d, chn);
670 if ( rc )
671 goto out;
673 switch ( chn->state )
674 {
675 case ECS_FREE:
676 case ECS_RESERVED:
677 status->status = EVTCHNSTAT_closed;
678 break;
679 case ECS_UNBOUND:
680 status->status = EVTCHNSTAT_unbound;
681 status->u.unbound.dom = chn->u.unbound.remote_domid;
682 break;
683 case ECS_INTERDOMAIN:
684 status->status = EVTCHNSTAT_interdomain;
685 status->u.interdomain.dom =
686 chn->u.interdomain.remote_dom->domain_id;
687 status->u.interdomain.port = chn->u.interdomain.remote_port;
688 break;
689 case ECS_PIRQ:
690 status->status = EVTCHNSTAT_pirq;
691 status->u.pirq = chn->u.pirq;
692 break;
693 case ECS_VIRQ:
694 status->status = EVTCHNSTAT_virq;
695 status->u.virq = chn->u.virq;
696 break;
697 case ECS_IPI:
698 status->status = EVTCHNSTAT_ipi;
699 break;
700 default:
701 BUG();
702 }
704 status->vcpu = chn->notify_vcpu_id;
706 out:
707 spin_unlock(&d->event_lock);
708 rcu_unlock_domain(d);
710 return rc;
711 }
714 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
715 {
716 struct domain *d = current->domain;
717 struct evtchn *chn;
718 long rc = 0;
720 if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
721 return -ENOENT;
723 spin_lock(&d->event_lock);
725 if ( !port_is_valid(d, port) )
726 {
727 rc = -EINVAL;
728 goto out;
729 }
731 chn = evtchn_from_port(d, port);
733 /* Guest cannot re-bind a Xen-attached event channel. */
734 if ( unlikely(chn->consumer_is_xen) )
735 {
736 rc = -EINVAL;
737 goto out;
738 }
740 switch ( chn->state )
741 {
742 case ECS_VIRQ:
743 if ( virq_is_global(chn->u.virq) )
744 chn->notify_vcpu_id = vcpu_id;
745 else
746 rc = -EINVAL;
747 break;
748 case ECS_UNBOUND:
749 case ECS_INTERDOMAIN:
750 case ECS_PIRQ:
751 chn->notify_vcpu_id = vcpu_id;
752 break;
753 default:
754 rc = -EINVAL;
755 break;
756 }
758 out:
759 spin_unlock(&d->event_lock);
761 return rc;
762 }
765 int evtchn_unmask(unsigned int port)
766 {
767 struct domain *d = current->domain;
768 struct vcpu *v;
770 spin_lock(&d->event_lock);
772 if ( unlikely(!port_is_valid(d, port)) )
773 {
774 spin_unlock(&d->event_lock);
775 return -EINVAL;
776 }
778 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
780 /*
781 * These operations must happen in strict order. Based on
782 * include/xen/event.h:evtchn_set_pending().
783 */
784 if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
785 test_bit (port, &shared_info(d, evtchn_pending)) &&
786 !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
787 &vcpu_info(v, evtchn_pending_sel)) )
788 {
789 vcpu_mark_events_pending(v);
790 }
792 spin_unlock(&d->event_lock);
794 return 0;
795 }
798 static long evtchn_reset(evtchn_reset_t *r)
799 {
800 domid_t dom = r->dom;
801 struct domain *d;
802 int i, rc;
804 rc = rcu_lock_target_domain_by_id(dom, &d);
805 if ( rc )
806 return rc;
808 rc = xsm_evtchn_reset(current->domain, d);
809 if ( rc )
810 goto out;
812 for ( i = 0; port_is_valid(d, i); i++ )
813 (void)__evtchn_close(d, i);
815 rc = 0;
817 out:
818 rcu_unlock_domain(d);
820 return rc;
821 }
824 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
825 {
826 long rc;
828 switch ( cmd )
829 {
830 case EVTCHNOP_alloc_unbound: {
831 struct evtchn_alloc_unbound alloc_unbound;
832 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
833 return -EFAULT;
834 rc = evtchn_alloc_unbound(&alloc_unbound);
835 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
836 rc = -EFAULT; /* Cleaning up here would be a mess! */
837 break;
838 }
840 case EVTCHNOP_bind_interdomain: {
841 struct evtchn_bind_interdomain bind_interdomain;
842 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
843 return -EFAULT;
844 rc = evtchn_bind_interdomain(&bind_interdomain);
845 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
846 rc = -EFAULT; /* Cleaning up here would be a mess! */
847 break;
848 }
850 case EVTCHNOP_bind_virq: {
851 struct evtchn_bind_virq bind_virq;
852 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
853 return -EFAULT;
854 rc = evtchn_bind_virq(&bind_virq);
855 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
856 rc = -EFAULT; /* Cleaning up here would be a mess! */
857 break;
858 }
860 case EVTCHNOP_bind_ipi: {
861 struct evtchn_bind_ipi bind_ipi;
862 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
863 return -EFAULT;
864 rc = evtchn_bind_ipi(&bind_ipi);
865 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
866 rc = -EFAULT; /* Cleaning up here would be a mess! */
867 break;
868 }
870 case EVTCHNOP_bind_pirq: {
871 struct evtchn_bind_pirq bind_pirq;
872 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
873 return -EFAULT;
874 rc = evtchn_bind_pirq(&bind_pirq);
875 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
876 rc = -EFAULT; /* Cleaning up here would be a mess! */
877 break;
878 }
880 case EVTCHNOP_close: {
881 struct evtchn_close close;
882 if ( copy_from_guest(&close, arg, 1) != 0 )
883 return -EFAULT;
884 rc = evtchn_close(&close);
885 break;
886 }
888 case EVTCHNOP_send: {
889 struct evtchn_send send;
890 if ( copy_from_guest(&send, arg, 1) != 0 )
891 return -EFAULT;
892 rc = evtchn_send(current->domain, send.port);
893 break;
894 }
896 case EVTCHNOP_status: {
897 struct evtchn_status status;
898 if ( copy_from_guest(&status, arg, 1) != 0 )
899 return -EFAULT;
900 rc = evtchn_status(&status);
901 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
902 rc = -EFAULT;
903 break;
904 }
906 case EVTCHNOP_bind_vcpu: {
907 struct evtchn_bind_vcpu bind_vcpu;
908 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
909 return -EFAULT;
910 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
911 break;
912 }
914 case EVTCHNOP_unmask: {
915 struct evtchn_unmask unmask;
916 if ( copy_from_guest(&unmask, arg, 1) != 0 )
917 return -EFAULT;
918 rc = evtchn_unmask(unmask.port);
919 break;
920 }
922 case EVTCHNOP_reset: {
923 struct evtchn_reset reset;
924 if ( copy_from_guest(&reset, arg, 1) != 0 )
925 return -EFAULT;
926 rc = evtchn_reset(&reset);
927 break;
928 }
930 default:
931 rc = -ENOSYS;
932 break;
933 }
935 return rc;
936 }
939 int alloc_unbound_xen_event_channel(
940 struct vcpu *local_vcpu, domid_t remote_domid)
941 {
942 struct evtchn *chn;
943 struct domain *d = local_vcpu->domain;
944 int port;
946 spin_lock(&d->event_lock);
948 if ( (port = get_free_port(d)) < 0 )
949 goto out;
950 chn = evtchn_from_port(d, port);
952 chn->state = ECS_UNBOUND;
953 chn->consumer_is_xen = 1;
954 chn->notify_vcpu_id = local_vcpu->vcpu_id;
955 chn->u.unbound.remote_domid = remote_domid;
957 out:
958 spin_unlock(&d->event_lock);
960 return port;
961 }
964 void free_xen_event_channel(
965 struct vcpu *local_vcpu, int port)
966 {
967 struct evtchn *chn;
968 struct domain *d = local_vcpu->domain;
970 spin_lock(&d->event_lock);
972 if ( unlikely(d->is_dying) )
973 {
974 spin_unlock(&d->event_lock);
975 return;
976 }
978 BUG_ON(!port_is_valid(d, port));
979 chn = evtchn_from_port(d, port);
980 BUG_ON(!chn->consumer_is_xen);
981 chn->consumer_is_xen = 0;
983 spin_unlock(&d->event_lock);
985 (void)__evtchn_close(d, port);
986 }
989 void notify_via_xen_event_channel(int lport)
990 {
991 struct evtchn *lchn, *rchn;
992 struct domain *ld = current->domain, *rd;
993 int rport;
995 spin_lock(&ld->event_lock);
997 ASSERT(port_is_valid(ld, lport));
998 lchn = evtchn_from_port(ld, lport);
999 ASSERT(lchn->consumer_is_xen);
1001 if ( likely(lchn->state == ECS_INTERDOMAIN) )
1003 rd = lchn->u.interdomain.remote_dom;
1004 rport = lchn->u.interdomain.remote_port;
1005 rchn = evtchn_from_port(rd, rport);
1006 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
1009 spin_unlock(&ld->event_lock);
1013 int evtchn_init(struct domain *d)
1015 spin_lock_init(&d->event_lock);
1016 if ( get_free_port(d) != 0 )
1017 return -EINVAL;
1018 evtchn_from_port(d, 0)->state = ECS_RESERVED;
1020 #if MAX_VIRT_CPUS > BITS_PER_LONG
1021 d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
1022 if ( !d->poll_mask )
1023 return -ENOMEM;
1024 bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
1025 #endif
1027 return 0;
1031 void evtchn_destroy(struct domain *d)
1033 int i;
1035 /* After this barrier no new event-channel allocations can occur. */
1036 BUG_ON(!d->is_dying);
1037 spin_barrier(&d->event_lock);
1039 /* Close all existing event channels. */
1040 for ( i = 0; port_is_valid(d, i); i++ )
1042 evtchn_from_port(d, i)->consumer_is_xen = 0;
1043 (void)__evtchn_close(d, i);
1046 /* Free all event-channel buckets. */
1047 spin_lock(&d->event_lock);
1048 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
1050 xsm_free_security_evtchn(d->evtchn[i]);
1051 xfree(d->evtchn[i]);
1052 d->evtchn[i] = NULL;
1054 spin_unlock(&d->event_lock);
1058 void evtchn_destroy_final(struct domain *d)
1060 #if MAX_VIRT_CPUS > BITS_PER_LONG
1061 xfree(d->poll_mask);
1062 d->poll_mask = NULL;
1063 #endif
1067 static void domain_dump_evtchn_info(struct domain *d)
1069 unsigned int port;
1071 bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
1072 d->poll_mask, d->max_vcpus);
1073 printk("Domain %d polling vCPUs: {%s}\n",
1074 d->domain_id, keyhandler_scratch);
1076 if ( !spin_trylock(&d->event_lock) )
1077 return;
1079 printk("Event channel information for domain %d:\n"
1080 " port [p/m]\n", d->domain_id);
1082 for ( port = 1; port < MAX_EVTCHNS(d); ++port )
1084 const struct evtchn *chn;
1086 if ( !port_is_valid(d, port) )
1087 continue;
1088 chn = evtchn_from_port(d, port);
1089 if ( chn->state == ECS_FREE )
1090 continue;
1092 printk(" %4u [%d/%d]: s=%d n=%d",
1093 port,
1094 !!test_bit(port, &shared_info(d, evtchn_pending)),
1095 !!test_bit(port, &shared_info(d, evtchn_mask)),
1096 chn->state, chn->notify_vcpu_id);
1097 switch ( chn->state )
1099 case ECS_UNBOUND:
1100 printk(" d=%d", chn->u.unbound.remote_domid);
1101 break;
1102 case ECS_INTERDOMAIN:
1103 printk(" d=%d p=%d",
1104 chn->u.interdomain.remote_dom->domain_id,
1105 chn->u.interdomain.remote_port);
1106 break;
1107 case ECS_PIRQ:
1108 printk(" p=%d", chn->u.pirq);
1109 break;
1110 case ECS_VIRQ:
1111 printk(" v=%d", chn->u.virq);
1112 break;
1114 printk(" x=%d\n", chn->consumer_is_xen);
1117 spin_unlock(&d->event_lock);
1120 static void dump_evtchn_info(unsigned char key)
1122 struct domain *d;
1124 printk("'%c' pressed -> dumping event-channel info\n", key);
1126 rcu_read_lock(&domlist_read_lock);
1128 for_each_domain ( d )
1129 domain_dump_evtchn_info(d);
1131 rcu_read_unlock(&domlist_read_lock);
1134 static struct keyhandler dump_evtchn_info_keyhandler = {
1135 .diagnostic = 1,
1136 .u.fn = dump_evtchn_info,
1137 .desc = "dump evtchn info"
1138 };
1140 static int __init dump_evtchn_info_key_init(void)
1142 register_keyhandler('e', &dump_evtchn_info_keyhandler);
1143 return 0;
1145 __initcall(dump_evtchn_info_key_init);
1147 /*
1148 * Local variables:
1149 * mode: C
1150 * c-set-style: "BSD"
1151 * c-basic-offset: 4
1152 * tab-width: 4
1153 * indent-tabs-mode: nil
1154 * End:
1155 */