debuggers.hg

view xen/common/event_channel.c @ 3705:4294cfa9fad3

bitkeeper revision 1.1159.212.95 (4204aa0ee0re5Xx1zWrJ9ejxzgRs3w)

Various cleanups. Remove PDB pending simpler GDB stub and/or NetBSD debugger.
Force emacs mode to appropriate tabbing in various files.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 11:12:14 2005 +0000 (2005-02-05)
parents 0ef6e8e6e85d
children 88957a238191
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /******************************************************************************
3 * event_channel.c
4 *
5 * Event notifications from VIRQs, PIRQs, and other domains.
6 *
7 * Copyright (c) 2003-2004, K A Fraser.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/lib.h>
22 #include <xen/errno.h>
23 #include <xen/sched.h>
24 #include <xen/event.h>
25 #include <xen/irq.h>
27 #include <public/xen.h>
28 #include <public/event_channel.h>
30 #define INIT_EVENT_CHANNELS 16
31 #define MAX_EVENT_CHANNELS 1024
32 #define EVENT_CHANNELS_SPREAD 32
35 static int get_free_port(struct exec_domain *ed)
36 {
37 struct domain *d = ed->domain;
38 int max, port;
39 event_channel_t *chn;
41 max = d->max_event_channel;
42 chn = d->event_channel;
44 for ( port = ed->eid * EVENT_CHANNELS_SPREAD; port < max; port++ )
45 if ( chn[port].state == ECS_FREE )
46 break;
48 if ( port >= max )
49 {
50 if ( max == MAX_EVENT_CHANNELS )
51 return -ENOSPC;
53 if ( port == 0 )
54 max = INIT_EVENT_CHANNELS;
55 else
56 max = port + EVENT_CHANNELS_SPREAD;
58 chn = xmalloc_array(event_channel_t, max);
59 if ( unlikely(chn == NULL) )
60 return -ENOMEM;
62 memset(chn, 0, max * sizeof(event_channel_t));
64 if ( d->event_channel != NULL )
65 {
66 memcpy(chn, d->event_channel, d->max_event_channel *
67 sizeof(event_channel_t));
68 xfree(d->event_channel);
69 }
71 d->event_channel = chn;
72 d->max_event_channel = max;
73 }
75 return port;
76 }
79 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
80 {
81 struct domain *d = current->domain;
82 int port;
84 spin_lock(&d->event_channel_lock);
86 if ( (port = get_free_port(current)) >= 0 )
87 {
88 d->event_channel[port].state = ECS_UNBOUND;
89 d->event_channel[port].u.unbound.remote_domid = alloc->dom;
90 }
92 spin_unlock(&d->event_channel_lock);
94 if ( port < 0 )
95 return port;
97 alloc->port = port;
98 return 0;
99 }
102 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
103 {
104 #define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
105 struct domain *d1, *d2;
106 struct exec_domain *ed1, *ed2;
107 int port1 = bind->port1, port2 = bind->port2;
108 domid_t dom1 = bind->dom1, dom2 = bind->dom2;
109 long rc = 0;
111 if ( !IS_PRIV(current->domain) && (dom1 != DOMID_SELF) )
112 return -EPERM;
114 if ( (port1 < 0) || (port2 < 0) )
115 return -EINVAL;
117 if ( dom1 == DOMID_SELF )
118 dom1 = current->domain->id;
119 if ( dom2 == DOMID_SELF )
120 dom2 = current->domain->id;
122 if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
123 ((d2 = find_domain_by_id(dom2)) == NULL) )
124 {
125 if ( d1 != NULL )
126 put_domain(d1);
127 return -ESRCH;
128 }
130 ed1 = d1->exec_domain[0]; /* XXX */
131 ed2 = d2->exec_domain[0]; /* XXX */
133 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
134 if ( d1 < d2 )
135 {
136 spin_lock(&d1->event_channel_lock);
137 spin_lock(&d2->event_channel_lock);
138 }
139 else
140 {
141 if ( d1 != d2 )
142 spin_lock(&d2->event_channel_lock);
143 spin_lock(&d1->event_channel_lock);
144 }
146 /* Obtain, or ensure that we already have, a valid <port1>. */
147 if ( port1 == 0 )
148 {
149 if ( (port1 = get_free_port(ed1)) < 0 )
150 ERROR_EXIT(port1);
151 }
152 else if ( port1 >= d1->max_event_channel )
153 ERROR_EXIT(-EINVAL);
155 /* Obtain, or ensure that we already have, a valid <port2>. */
156 if ( port2 == 0 )
157 {
158 /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
159 u16 tmp = d1->event_channel[port1].state;
160 d1->event_channel[port1].state = ECS_INTERDOMAIN;
161 port2 = get_free_port(ed2);
162 d1->event_channel[port1].state = tmp;
163 if ( port2 < 0 )
164 ERROR_EXIT(port2);
165 }
166 else if ( port2 >= d2->max_event_channel )
167 ERROR_EXIT(-EINVAL);
169 /* Validate <dom1,port1>'s current state. */
170 switch ( d1->event_channel[port1].state )
171 {
172 case ECS_FREE:
173 break;
175 case ECS_UNBOUND:
176 if ( d1->event_channel[port1].u.unbound.remote_domid != dom2 )
177 ERROR_EXIT(-EINVAL);
178 break;
180 case ECS_INTERDOMAIN:
181 if ( d1->event_channel[port1].u.interdomain.remote_dom != ed2 )
182 ERROR_EXIT(-EINVAL);
183 if ( (d1->event_channel[port1].u.interdomain.remote_port != port2) &&
184 (bind->port2 != 0) )
185 ERROR_EXIT(-EINVAL);
186 port2 = d1->event_channel[port1].u.interdomain.remote_port;
187 goto out;
189 default:
190 ERROR_EXIT(-EINVAL);
191 }
193 /* Validate <dom2,port2>'s current state. */
194 switch ( d2->event_channel[port2].state )
195 {
196 case ECS_FREE:
197 if ( !IS_PRIV(current->domain) && (dom2 != DOMID_SELF) )
198 ERROR_EXIT(-EPERM);
199 break;
201 case ECS_UNBOUND:
202 if ( d2->event_channel[port2].u.unbound.remote_domid != dom1 )
203 ERROR_EXIT(-EINVAL);
204 break;
206 case ECS_INTERDOMAIN:
207 if ( d2->event_channel[port2].u.interdomain.remote_dom != ed1 )
208 ERROR_EXIT(-EINVAL);
209 if ( (d2->event_channel[port2].u.interdomain.remote_port != port1) &&
210 (bind->port1 != 0) )
211 ERROR_EXIT(-EINVAL);
212 port1 = d2->event_channel[port2].u.interdomain.remote_port;
213 goto out;
215 default:
216 ERROR_EXIT(-EINVAL);
217 }
219 /*
220 * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
221 */
223 d1->event_channel[port1].u.interdomain.remote_dom = ed2;
224 d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
225 d1->event_channel[port1].state = ECS_INTERDOMAIN;
227 d2->event_channel[port2].u.interdomain.remote_dom = ed1;
228 d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
229 d2->event_channel[port2].state = ECS_INTERDOMAIN;
231 out:
232 spin_unlock(&d1->event_channel_lock);
233 if ( d1 != d2 )
234 spin_unlock(&d2->event_channel_lock);
236 put_domain(d1);
237 put_domain(d2);
239 bind->port1 = port1;
240 bind->port2 = port2;
242 return rc;
243 #undef ERROR_EXIT
244 }
247 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
248 {
249 struct exec_domain *ed = current;
250 struct domain *d = ed->domain;
251 int port, virq = bind->virq;
253 if ( virq >= ARRAY_SIZE(ed->virq_to_evtchn) )
254 return -EINVAL;
256 spin_lock(&d->event_channel_lock);
258 /*
259 * Port 0 is the fallback port for VIRQs that haven't been explicitly
260 * bound yet.
261 */
262 if ( ((port = ed->virq_to_evtchn[virq]) != 0) ||
263 ((port = get_free_port(ed)) < 0) )
264 goto out;
266 d->event_channel[port].state = ECS_VIRQ;
267 d->event_channel[port].u.virq = virq;
269 ed->virq_to_evtchn[virq] = port;
271 out:
272 spin_unlock(&d->event_channel_lock);
274 if ( port < 0 )
275 return port;
277 bind->port = port;
278 return 0;
279 }
281 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
282 {
283 struct exec_domain *ed = current;
284 struct domain *d = ed->domain;
285 int port, ipi_edom = bind->ipi_edom;
287 spin_lock(&d->event_channel_lock);
289 if ( (port = get_free_port(ed)) >= 0 )
290 {
291 d->event_channel[port].state = ECS_IPI;
292 d->event_channel[port].u.ipi_edom = ipi_edom;
293 }
295 spin_unlock(&d->event_channel_lock);
297 if ( port < 0 )
298 return port;
300 bind->port = port;
301 return 0;
302 }
305 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
306 {
307 struct domain *d = current->domain;
308 int port, rc, pirq = bind->pirq;
310 if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
311 return -EINVAL;
313 spin_lock(&d->event_channel_lock);
315 if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
316 ((rc = port = get_free_port(current)) < 0) )
317 goto out;
319 d->pirq_to_evtchn[pirq] = port;
320 rc = pirq_guest_bind(current, pirq,
321 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
322 if ( rc != 0 )
323 {
324 d->pirq_to_evtchn[pirq] = 0;
325 goto out;
326 }
328 d->event_channel[port].state = ECS_PIRQ;
329 d->event_channel[port].u.pirq = pirq;
331 out:
332 spin_unlock(&d->event_channel_lock);
334 if ( rc < 0 )
335 return rc;
337 bind->port = port;
338 return 0;
339 }
342 static long __evtchn_close(struct domain *d1, int port1)
343 {
344 struct domain *d2 = NULL;
345 struct exec_domain *ed;
346 event_channel_t *chn1, *chn2;
347 int port2;
348 long rc = 0;
350 again:
351 spin_lock(&d1->event_channel_lock);
353 chn1 = d1->event_channel;
355 if ( (port1 < 0) || (port1 >= d1->max_event_channel) )
356 {
357 rc = -EINVAL;
358 goto out;
359 }
361 switch ( chn1[port1].state )
362 {
363 case ECS_FREE:
364 case ECS_RESERVED:
365 rc = -EINVAL;
366 goto out;
368 case ECS_UNBOUND:
369 break;
371 case ECS_PIRQ:
372 if ( (rc = pirq_guest_unbind(d1, chn1[port1].u.pirq)) == 0 )
373 d1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
374 break;
376 case ECS_VIRQ:
377 /* XXX could store exec_domain in chn1[port1].u */
378 for_each_exec_domain(d1, ed)
379 if (ed->virq_to_evtchn[chn1[port1].u.virq] == port1)
380 ed->virq_to_evtchn[chn1[port1].u.virq] = 0;
381 break;
383 case ECS_IPI:
384 break;
386 case ECS_INTERDOMAIN:
387 if ( d2 == NULL )
388 {
389 d2 = chn1[port1].u.interdomain.remote_dom->domain;
391 /* If we unlock d1 then we could lose d2. Must get a reference. */
392 if ( unlikely(!get_domain(d2)) )
393 {
394 /*
395 * Failed to obtain a reference. No matter: d2 must be dying
396 * and so will close this event channel for us.
397 */
398 d2 = NULL;
399 goto out;
400 }
402 if ( d1 < d2 )
403 {
404 spin_lock(&d2->event_channel_lock);
405 }
406 else if ( d1 != d2 )
407 {
408 spin_unlock(&d1->event_channel_lock);
409 spin_lock(&d2->event_channel_lock);
410 goto again;
411 }
412 }
413 else if ( d2 != chn1[port1].u.interdomain.remote_dom->domain )
414 {
415 rc = -EINVAL;
416 goto out;
417 }
419 chn2 = d2->event_channel;
420 port2 = chn1[port1].u.interdomain.remote_port;
422 if ( port2 >= d2->max_event_channel )
423 BUG();
424 if ( chn2[port2].state != ECS_INTERDOMAIN )
425 BUG();
426 if ( chn2[port2].u.interdomain.remote_dom->domain != d1 )
427 BUG();
429 chn2[port2].state = ECS_UNBOUND;
430 chn2[port2].u.unbound.remote_domid = d1->id;
431 break;
433 default:
434 BUG();
435 }
437 chn1[port1].state = ECS_FREE;
439 out:
440 if ( d2 != NULL )
441 {
442 if ( d1 != d2 )
443 spin_unlock(&d2->event_channel_lock);
444 put_domain(d2);
445 }
447 spin_unlock(&d1->event_channel_lock);
449 return rc;
450 }
453 static long evtchn_close(evtchn_close_t *close)
454 {
455 struct domain *d;
456 long rc;
457 domid_t dom = close->dom;
459 if ( dom == DOMID_SELF )
460 dom = current->domain->id;
461 else if ( !IS_PRIV(current->domain) )
462 return -EPERM;
464 if ( (d = find_domain_by_id(dom)) == NULL )
465 return -ESRCH;
467 rc = __evtchn_close(d, close->port);
469 put_domain(d);
470 return rc;
471 }
474 long evtchn_send(int lport)
475 {
476 struct domain *ld = current->domain;
477 struct exec_domain *rd;
478 int rport, ret = 0;
480 spin_lock(&ld->event_channel_lock);
482 if ( unlikely(lport < 0) ||
483 unlikely(lport >= ld->max_event_channel))
484 {
485 spin_unlock(&ld->event_channel_lock);
486 return -EINVAL;
487 }
489 switch ( ld->event_channel[lport].state )
490 {
491 case ECS_INTERDOMAIN:
492 rd = ld->event_channel[lport].u.interdomain.remote_dom;
493 rport = ld->event_channel[lport].u.interdomain.remote_port;
495 evtchn_set_pending(rd, rport);
496 break;
497 case ECS_IPI:
498 rd = ld->exec_domain[ld->event_channel[lport].u.ipi_edom];
499 if ( rd )
500 evtchn_set_pending(rd, lport);
501 else
502 ret = -EINVAL;
503 break;
504 default:
505 ret = -EINVAL;
506 }
508 spin_unlock(&ld->event_channel_lock);
510 return ret;
511 }
514 static long evtchn_status(evtchn_status_t *status)
515 {
516 struct domain *d;
517 domid_t dom = status->dom;
518 int port = status->port;
519 event_channel_t *chn;
520 long rc = 0;
522 if ( dom == DOMID_SELF )
523 dom = current->domain->id;
524 else if ( !IS_PRIV(current->domain) )
525 return -EPERM;
527 if ( (d = find_domain_by_id(dom)) == NULL )
528 return -ESRCH;
530 spin_lock(&d->event_channel_lock);
532 chn = d->event_channel;
534 if ( (port < 0) || (port >= d->max_event_channel) )
535 {
536 rc = -EINVAL;
537 goto out;
538 }
540 switch ( chn[port].state )
541 {
542 case ECS_FREE:
543 case ECS_RESERVED:
544 status->status = EVTCHNSTAT_closed;
545 break;
546 case ECS_UNBOUND:
547 status->status = EVTCHNSTAT_unbound;
548 status->u.unbound.dom = chn[port].u.unbound.remote_domid;
549 break;
550 case ECS_INTERDOMAIN:
551 status->status = EVTCHNSTAT_interdomain;
552 status->u.interdomain.dom =
553 chn[port].u.interdomain.remote_dom->domain->id;
554 status->u.interdomain.port = chn[port].u.interdomain.remote_port;
555 break;
556 case ECS_PIRQ:
557 status->status = EVTCHNSTAT_pirq;
558 status->u.pirq = chn[port].u.pirq;
559 break;
560 case ECS_VIRQ:
561 status->status = EVTCHNSTAT_virq;
562 status->u.virq = chn[port].u.virq;
563 break;
564 case ECS_IPI:
565 status->status = EVTCHNSTAT_ipi;
566 status->u.ipi_edom = chn[port].u.ipi_edom;
567 break;
568 default:
569 BUG();
570 }
572 out:
573 spin_unlock(&d->event_channel_lock);
574 put_domain(d);
575 return rc;
576 }
579 long do_event_channel_op(evtchn_op_t *uop)
580 {
581 long rc;
582 evtchn_op_t op;
584 if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
585 return -EFAULT;
587 switch ( op.cmd )
588 {
589 case EVTCHNOP_alloc_unbound:
590 rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
591 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
592 rc = -EFAULT; /* Cleaning up here would be a mess! */
593 break;
595 case EVTCHNOP_bind_interdomain:
596 rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
597 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
598 rc = -EFAULT; /* Cleaning up here would be a mess! */
599 break;
601 case EVTCHNOP_bind_virq:
602 rc = evtchn_bind_virq(&op.u.bind_virq);
603 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
604 rc = -EFAULT; /* Cleaning up here would be a mess! */
605 break;
607 case EVTCHNOP_bind_ipi:
608 rc = evtchn_bind_ipi(&op.u.bind_ipi);
609 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
610 rc = -EFAULT; /* Cleaning up here would be a mess! */
611 break;
613 case EVTCHNOP_bind_pirq:
614 rc = evtchn_bind_pirq(&op.u.bind_pirq);
615 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
616 rc = -EFAULT; /* Cleaning up here would be a mess! */
617 break;
619 case EVTCHNOP_close:
620 rc = evtchn_close(&op.u.close);
621 break;
623 case EVTCHNOP_send:
624 rc = evtchn_send(op.u.send.local_port);
625 break;
627 case EVTCHNOP_status:
628 rc = evtchn_status(&op.u.status);
629 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
630 rc = -EFAULT;
631 break;
633 default:
634 rc = -ENOSYS;
635 break;
636 }
638 return rc;
639 }
642 int init_event_channels(struct domain *d)
643 {
644 spin_lock_init(&d->event_channel_lock);
645 /* Call get_free_port to initialize d->event_channel */
646 if ( get_free_port(d->exec_domain[0]) != 0 )
647 return -EINVAL;
648 d->event_channel[0].state = ECS_RESERVED;
649 return 0;
650 }
653 void destroy_event_channels(struct domain *d)
654 {
655 int i;
656 if ( d->event_channel != NULL )
657 {
658 for ( i = 0; i < d->max_event_channel; i++ )
659 (void)__evtchn_close(d, i);
660 xfree(d->event_channel);
661 }
662 }