debuggers.hg

annotate xen/common/event_channel.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents 899131a8f9d2
children
rev   line source
kaf24@992 1 /******************************************************************************
kaf24@992 2 * event_channel.c
kaf24@992 3 *
kaf24@1547 4 * Event notifications from VIRQs, PIRQs, and other domains.
kaf24@992 5 *
kaf24@9582 6 * Copyright (c) 2003-2006, K A Fraser.
kaf24@992 7 *
kaf24@992 8 * This program is distributed in the hope that it will be useful,
kaf24@992 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@992 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@992 11 * GNU General Public License for more details.
kaf24@992 12 *
kaf24@992 13 * You should have received a copy of the GNU General Public License
kaf24@992 14 * along with this program; if not, write to the Free Software
kaf24@992 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@992 16 */
kaf24@992 17
kaf24@1248 18 #include <xen/config.h>
kaf24@1248 19 #include <xen/init.h>
kaf24@1248 20 #include <xen/lib.h>
kaf24@1248 21 #include <xen/errno.h>
kaf24@1248 22 #include <xen/sched.h>
kaf24@1248 23 #include <xen/event.h>
kaf24@1277 24 #include <xen/irq.h>
kaf24@8498 25 #include <xen/iocap.h>
ack@13294 26 #include <xen/compat.h>
kaf24@9197 27 #include <xen/guest_access.h>
keir@18566 28 #include <xen/keyhandler.h>
cl349@5329 29 #include <asm/current.h>
kaf24@992 30
kaf24@2827 31 #include <public/xen.h>
kaf24@2827 32 #include <public/event_channel.h>
kfraser@15846 33 #include <xsm/xsm.h>
kaf24@1165 34
kaf24@5346 35 #define bucket_from_port(d,p) \
kaf24@5346 36 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
kaf24@5346 37 #define port_is_valid(d,p) \
ack@13294 38 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
kaf24@5346 39 (bucket_from_port(d,p) != NULL))
kaf24@5346 40 #define evtchn_from_port(d,p) \
kaf24@5346 41 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
kaf24@992 42
kaf24@7274 43 #define ERROR_EXIT(_errno) \
kaf24@7274 44 do { \
kaf24@12062 45 gdprintk(XENLOG_WARNING, \
keir@16612 46 "EVTCHNOP failure: error %d\n", \
keir@16612 47 (_errno)); \
keir@16612 48 rc = (_errno); \
keir@16612 49 goto out; \
keir@16612 50 } while ( 0 )
keir@16612 51 #define ERROR_EXIT_DOM(_errno, _dom) \
keir@16612 52 do { \
keir@16612 53 gdprintk(XENLOG_WARNING, \
keir@16612 54 "EVTCHNOP failure: domain %d, error %d\n", \
keir@16612 55 (_dom)->domain_id, (_errno)); \
kaf24@7274 56 rc = (_errno); \
kaf24@7274 57 goto out; \
kaf24@7274 58 } while ( 0 )
kaf24@5363 59
keir@18004 60 static int evtchn_set_pending(struct vcpu *v, int port);
keir@18004 61
kaf24@9582 62 static int virq_is_global(int virq)
kaf24@9582 63 {
kaf24@9582 64 int rc;
kaf24@9582 65
kaf24@9582 66 ASSERT((virq >= 0) && (virq < NR_VIRQS));
kaf24@9582 67
kaf24@9582 68 switch ( virq )
kaf24@9582 69 {
kaf24@9582 70 case VIRQ_TIMER:
kaf24@9582 71 case VIRQ_DEBUG:
kaf24@9614 72 case VIRQ_XENOPROF:
kaf24@9582 73 rc = 0;
kaf24@9582 74 break;
kaf24@10069 75 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
kaf24@10069 76 rc = arch_virq_is_global(virq);
kaf24@10069 77 break;
kaf24@9582 78 default:
kaf24@9582 79 rc = 1;
kaf24@9582 80 break;
kaf24@9582 81 }
kaf24@9582 82
kaf24@9582 83 return rc;
kaf24@9582 84 }
kaf24@9582 85
kaf24@9582 86
kaf24@5346 87 static int get_free_port(struct domain *d)
kaf24@992 88 {
kaf24@5346 89 struct evtchn *chn;
kaf24@5346 90 int port;
kfraser@15846 91 int i, j;
kaf24@1165 92
kfraser@15503 93 if ( d->is_dying )
kfraser@15503 94 return -EINVAL;
kfraser@15503 95
kaf24@5346 96 for ( port = 0; port_is_valid(d, port); port++ )
kaf24@5346 97 if ( evtchn_from_port(d, port)->state == ECS_FREE )
kaf24@5346 98 return port;
cl349@3152 99
ack@13294 100 if ( port == MAX_EVTCHNS(d) )
kaf24@5346 101 return -ENOSPC;
kaf24@1165 102
kaf24@5346 103 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
kaf24@5346 104 if ( unlikely(chn == NULL) )
kaf24@5346 105 return -ENOMEM;
kaf24@5346 106 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
kaf24@5346 107 bucket_from_port(d, port) = chn;
kaf24@1165 108
kfraser@15846 109 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
kfraser@15846 110 {
kfraser@15846 111 if ( xsm_alloc_security_evtchn(&chn[i]) )
kfraser@15846 112 {
kfraser@15846 113 for ( j = 0; j < i; j++ )
kfraser@15846 114 xsm_free_security_evtchn(&chn[j]);
kfraser@15846 115 xfree(chn);
kfraser@15846 116 return -ENOMEM;
kfraser@15846 117 }
kfraser@15846 118 }
kfraser@15846 119
kaf24@1165 120 return port;
kaf24@1165 121 }
kaf24@1165 122
kaf24@2751 123
kaf24@2751 124 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
kaf24@2751 125 {
kaf24@5346 126 struct evtchn *chn;
kaf24@7261 127 struct domain *d;
kaf24@7279 128 int port;
kaf24@7261 129 domid_t dom = alloc->dom;
kaf24@9927 130 long rc;
kaf24@9927 131
keir@18604 132 rc = rcu_lock_target_domain_by_id(dom, &d);
keir@18604 133 if ( rc )
keir@18604 134 return rc;
kaf24@7261 135
keir@18622 136 spin_lock(&d->event_lock);
kaf24@2751 137
kaf24@7279 138 if ( (port = get_free_port(d)) < 0 )
keir@16612 139 ERROR_EXIT_DOM(port, d);
kaf24@5363 140 chn = evtchn_from_port(d, port);
kaf24@5363 141
kfraser@15846 142 rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
kfraser@15846 143 if ( rc )
kfraser@15846 144 goto out;
kfraser@15846 145
kaf24@7279 146 chn->state = ECS_UNBOUND;
kaf24@7449 147 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
kaf24@7449 148 chn->u.unbound.remote_domid = current->domain->domain_id;
kaf24@5363 149
kaf24@7279 150 alloc->port = port;
kaf24@2751 151
kaf24@5363 152 out:
keir@18622 153 spin_unlock(&d->event_lock);
kfraser@14220 154 rcu_unlock_domain(d);
kaf24@7261 155
kaf24@5363 156 return rc;
kaf24@2751 157 }
kaf24@2751 158
kaf24@2751 159
kaf24@1256 160 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
kaf24@1165 161 {
kaf24@7279 162 struct evtchn *lchn, *rchn;
kaf24@7279 163 struct domain *ld = current->domain, *rd;
kaf24@7279 164 int lport, rport = bind->remote_port;
kaf24@7449 165 domid_t rdom = bind->remote_dom;
kaf24@9927 166 long rc;
kaf24@9927 167
kaf24@7449 168 if ( rdom == DOMID_SELF )
kaf24@7449 169 rdom = current->domain->domain_id;
kaf24@7449 170
kfraser@14220 171 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
kaf24@1165 172 return -ESRCH;
kaf24@1165 173
kaf24@1165 174 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
kaf24@7279 175 if ( ld < rd )
kaf24@992 176 {
keir@18622 177 spin_lock(&ld->event_lock);
keir@18622 178 spin_lock(&rd->event_lock);
kaf24@992 179 }
kaf24@992 180 else
kaf24@992 181 {
kaf24@7279 182 if ( ld != rd )
keir@18622 183 spin_lock(&rd->event_lock);
keir@18622 184 spin_lock(&ld->event_lock);
kaf24@992 185 }
kaf24@992 186
kaf24@7279 187 if ( (lport = get_free_port(ld)) < 0 )
kaf24@7279 188 ERROR_EXIT(lport);
kaf24@7279 189 lchn = evtchn_from_port(ld, lport);
kaf24@2751 190
kaf24@7279 191 if ( !port_is_valid(rd, rport) )
keir@16612 192 ERROR_EXIT_DOM(-EINVAL, rd);
kaf24@7279 193 rchn = evtchn_from_port(rd, rport);
kaf24@7279 194 if ( (rchn->state != ECS_UNBOUND) ||
keir@17357 195 (rchn->u.unbound.remote_domid != ld->domain_id) )
keir@16612 196 ERROR_EXIT_DOM(-EINVAL, rd);
kaf24@992 197
kfraser@15846 198 rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
kfraser@15846 199 if ( rc )
kfraser@15846 200 goto out;
kfraser@15846 201
kaf24@7279 202 lchn->u.interdomain.remote_dom = rd;
kaf24@7279 203 lchn->u.interdomain.remote_port = (u16)rport;
kaf24@7279 204 lchn->state = ECS_INTERDOMAIN;
kaf24@7279 205
kaf24@7279 206 rchn->u.interdomain.remote_dom = ld;
kaf24@7279 207 rchn->u.interdomain.remote_port = (u16)lport;
kaf24@7279 208 rchn->state = ECS_INTERDOMAIN;
kaf24@992 209
kaf24@2751 210 /*
kaf24@7279 211 * We may have lost notifications on the remote unbound port. Fix that up
kaf24@7279 212 * here by conservatively always setting a notification on the local port.
kaf24@2751 213 */
kaf24@7279 214 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
kaf24@2751 215
kaf24@7279 216 bind->local_port = lport;
kaf24@1176 217
kaf24@992 218 out:
keir@18622 219 spin_unlock(&ld->event_lock);
kaf24@7279 220 if ( ld != rd )
keir@18622 221 spin_unlock(&rd->event_lock);
kaf24@1165 222
kfraser@14220 223 rcu_unlock_domain(rd);
kaf24@992 224
kaf24@992 225 return rc;
kaf24@992 226 }
kaf24@992 227
kaf24@992 228
kaf24@1256 229 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
kaf24@1256 230 {
kaf24@5346 231 struct evtchn *chn;
kaf24@7233 232 struct vcpu *v;
kaf24@7233 233 struct domain *d = current->domain;
kaf24@7279 234 int port, virq = bind->virq, vcpu = bind->vcpu;
kaf24@7279 235 long rc = 0;
kaf24@1256 236
kaf24@9927 237 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
kaf24@1256 238 return -EINVAL;
kaf24@1256 239
kaf24@9582 240 if ( virq_is_global(virq) && (vcpu != 0) )
kaf24@9582 241 return -EINVAL;
kaf24@9582 242
keir@19826 243 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
kaf24@9927 244 ((v = d->vcpu[vcpu]) == NULL) )
kaf24@7233 245 return -ENOENT;
cl349@6700 246
keir@18622 247 spin_lock(&d->event_lock);
kaf24@1256 248
kaf24@7279 249 if ( v->virq_to_evtchn[virq] != 0 )
kaf24@7279 250 ERROR_EXIT(-EEXIST);
kaf24@7279 251
kaf24@7279 252 if ( (port = get_free_port(d)) < 0 )
kaf24@7279 253 ERROR_EXIT(port);
kaf24@1256 254
kaf24@5346 255 chn = evtchn_from_port(d, port);
kaf24@5346 256 chn->state = ECS_VIRQ;
kaf24@7279 257 chn->notify_vcpu_id = vcpu;
kaf24@5346 258 chn->u.virq = virq;
kaf24@1256 259
kaf24@7279 260 v->virq_to_evtchn[virq] = bind->port = port;
kaf24@1256 261
kaf24@1256 262 out:
keir@18622 263 spin_unlock(&d->event_lock);
kaf24@1256 264
kaf24@7279 265 return rc;
cl349@2970 266 }
cl349@2970 267
kaf24@5346 268
cl349@2970 269 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
cl349@2970 270 {
kaf24@5346 271 struct evtchn *chn;
kaf24@5346 272 struct domain *d = current->domain;
kaf24@7279 273 int port, vcpu = bind->vcpu;
kaf24@7279 274 long rc = 0;
kaf24@4240 275
keir@19826 276 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
kaf24@9927 277 (d->vcpu[vcpu] == NULL) )
kaf24@7233 278 return -ENOENT;
kaf24@7233 279
keir@18622 280 spin_lock(&d->event_lock);
cl349@2970 281
kaf24@7279 282 if ( (port = get_free_port(d)) < 0 )
kaf24@7279 283 ERROR_EXIT(port);
cl349@2970 284
kaf24@7279 285 chn = evtchn_from_port(d, port);
kaf24@7279 286 chn->state = ECS_IPI;
kaf24@7279 287 chn->notify_vcpu_id = vcpu;
kaf24@7279 288
kaf24@7279 289 bind->port = port;
kaf24@7279 290
kaf24@7279 291 out:
keir@18622 292 spin_unlock(&d->event_lock);
cl349@2970 293
kaf24@7279 294 return rc;
kaf24@1256 295 }
kaf24@1256 296
kaf24@1256 297
keir@21671 298 static void link_pirq_port(int port, struct evtchn *chn, struct vcpu *v)
keir@21671 299 {
keir@21671 300 chn->u.pirq.prev_port = 0;
keir@21671 301 chn->u.pirq.next_port = v->pirq_evtchn_head;
keir@21671 302 if ( v->pirq_evtchn_head )
keir@21671 303 evtchn_from_port(v->domain, v->pirq_evtchn_head)
keir@21671 304 ->u.pirq.prev_port = port;
keir@21671 305 v->pirq_evtchn_head = port;
keir@21671 306 }
keir@21671 307
keir@21671 308 static void unlink_pirq_port(struct evtchn *chn, struct vcpu *v)
keir@21671 309 {
keir@21671 310 struct domain *d = v->domain;
keir@21671 311
keir@21671 312 if ( chn->u.pirq.prev_port )
keir@21671 313 evtchn_from_port(d, chn->u.pirq.prev_port)->u.pirq.next_port =
keir@21671 314 chn->u.pirq.next_port;
keir@21671 315 else
keir@21671 316 v->pirq_evtchn_head = chn->u.pirq.next_port;
keir@21671 317 if ( chn->u.pirq.next_port )
keir@21671 318 evtchn_from_port(d, chn->u.pirq.next_port)->u.pirq.prev_port =
keir@21671 319 chn->u.pirq.prev_port;
keir@21671 320 }
keir@21671 321
keir@21671 322
kaf24@1273 323 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
kaf24@1273 324 {
kaf24@5346 325 struct evtchn *chn;
cl349@2957 326 struct domain *d = current->domain;
keir@21671 327 struct vcpu *v = d->vcpu[0];
kaf24@7279 328 int port, pirq = bind->pirq;
kaf24@7279 329 long rc;
kaf24@1273 330
keir@19688 331 if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
kaf24@1273 332 return -EINVAL;
kaf24@1273 333
keir@22455 334 if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) )
kaf24@8498 335 return -EPERM;
kaf24@8498 336
keir@18622 337 spin_lock(&d->event_lock);
kaf24@1273 338
kaf24@7279 339 if ( d->pirq_to_evtchn[pirq] != 0 )
kaf24@7279 340 ERROR_EXIT(-EEXIST);
kaf24@7279 341
kaf24@7279 342 if ( (port = get_free_port(d)) < 0 )
kaf24@7279 343 ERROR_EXIT(port);
kaf24@1273 344
kaf24@5346 345 chn = evtchn_from_port(d, port);
kaf24@5346 346
kaf24@1544 347 d->pirq_to_evtchn[pirq] = port;
keir@22461 348 rc = (!is_hvm_domain(d)
keir@22461 349 ? pirq_guest_bind(
keir@22461 350 v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE))
keir@22461 351 : 0);
keir@22461 352 if ( rc != 0 )
kaf24@1277 353 {
keir@22461 354 d->pirq_to_evtchn[pirq] = 0;
keir@22461 355 goto out;
kaf24@1277 356 }
kaf24@1277 357
kaf24@5346 358 chn->state = ECS_PIRQ;
keir@21671 359 chn->u.pirq.irq = pirq;
keir@21671 360 link_pirq_port(port, chn, v);
kaf24@1273 361
kaf24@7279 362 bind->port = port;
kaf24@7279 363
kaf24@1273 364 out:
keir@18622 365 spin_unlock(&d->event_lock);
kaf24@1273 366
kaf24@7279 367 return rc;
kaf24@1273 368 }
kaf24@1273 369
kaf24@1273 370
kaf24@1544 371 static long __evtchn_close(struct domain *d1, int port1)
kaf24@992 372 {
kaf24@5346 373 struct domain *d2 = NULL;
kaf24@5346 374 struct vcpu *v;
kaf24@5346 375 struct evtchn *chn1, *chn2;
kaf24@5346 376 int port2;
kaf24@5346 377 long rc = 0;
kaf24@992 378
kaf24@992 379 again:
keir@18622 380 spin_lock(&d1->event_lock);
kaf24@992 381
kaf24@5346 382 if ( !port_is_valid(d1, port1) )
kaf24@992 383 {
kaf24@992 384 rc = -EINVAL;
kaf24@992 385 goto out;
kaf24@992 386 }
kaf24@992 387
kaf24@5346 388 chn1 = evtchn_from_port(d1, port1);
kfraser@10987 389
kfraser@10987 390 /* Guest cannot close a Xen-attached event channel. */
kfraser@10987 391 if ( unlikely(chn1->consumer_is_xen) )
kfraser@10987 392 {
kfraser@10987 393 rc = -EINVAL;
kfraser@10987 394 goto out;
kfraser@10987 395 }
kfraser@10987 396
kaf24@5346 397 switch ( chn1->state )
kaf24@992 398 {
kaf24@1256 399 case ECS_FREE:
cl349@3335 400 case ECS_RESERVED:
kaf24@1256 401 rc = -EINVAL;
kaf24@1256 402 goto out;
kaf24@1256 403
kaf24@1256 404 case ECS_UNBOUND:
kaf24@1256 405 break;
kaf24@1256 406
kaf24@1256 407 case ECS_PIRQ:
keir@22455 408 if ( !is_hvm_domain(d1) )
keir@22455 409 pirq_guest_unbind(d1, chn1->u.pirq.irq);
keir@21671 410 d1->pirq_to_evtchn[chn1->u.pirq.irq] = 0;
keir@21671 411 unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
kaf24@1256 412 break;
kaf24@1256 413
kaf24@1256 414 case ECS_VIRQ:
kaf24@5327 415 for_each_vcpu ( d1, v )
keir@18220 416 {
keir@18220 417 if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
keir@18220 418 continue;
keir@18220 419 v->virq_to_evtchn[chn1->u.virq] = 0;
keir@18734 420 spin_barrier_irq(&v->virq_lock);
keir@18220 421 }
kaf24@1256 422 break;
kaf24@1256 423
cl349@2970 424 case ECS_IPI:
cl349@2970 425 break;
cl349@2970 426
kaf24@1256 427 case ECS_INTERDOMAIN:
kaf24@1544 428 if ( d2 == NULL )
kaf24@992 429 {
kaf24@5346 430 d2 = chn1->u.interdomain.remote_dom;
kaf24@1543 431
kaf24@1544 432 /* If we unlock d1 then we could lose d2. Must get a reference. */
kaf24@1544 433 if ( unlikely(!get_domain(d2)) )
kfraser@15503 434 BUG();
kaf24@1165 435
kaf24@1580 436 if ( d1 < d2 )
kaf24@992 437 {
keir@18622 438 spin_lock(&d2->event_lock);
kaf24@992 439 }
kaf24@1544 440 else if ( d1 != d2 )
kaf24@992 441 {
keir@18622 442 spin_unlock(&d1->event_lock);
keir@18622 443 spin_lock(&d2->event_lock);
kaf24@992 444 goto again;
kaf24@992 445 }
kaf24@992 446 }
kaf24@5346 447 else if ( d2 != chn1->u.interdomain.remote_dom )
kaf24@992 448 {
kaf24@8270 449 /*
kaf24@8270 450 * We can only get here if the port was closed and re-bound after
kaf24@8270 451 * unlocking d1 but before locking d2 above. We could retry but
kaf24@8270 452 * it is easier to return the same error as if we had seen the
kaf24@8270 453 * port in ECS_CLOSED. It must have passed through that state for
kaf24@8270 454 * us to end up here, so it's a valid error to return.
kaf24@8270 455 */
kaf24@992 456 rc = -EINVAL;
kaf24@992 457 goto out;
kaf24@992 458 }
kaf24@9581 459
kaf24@5346 460 port2 = chn1->u.interdomain.remote_port;
kaf24@5346 461 BUG_ON(!port_is_valid(d2, port2));
kaf24@992 462
kaf24@5346 463 chn2 = evtchn_from_port(d2, port2);
kaf24@5346 464 BUG_ON(chn2->state != ECS_INTERDOMAIN);
kaf24@5346 465 BUG_ON(chn2->u.interdomain.remote_dom != d1);
kaf24@1165 466
kaf24@5346 467 chn2->state = ECS_UNBOUND;
kaf24@5346 468 chn2->u.unbound.remote_domid = d1->domain_id;
kaf24@1256 469 break;
kaf24@1256 470
kaf24@1256 471 default:
kaf24@1256 472 BUG();
kaf24@992 473 }
kaf24@992 474
keir@18220 475 /* Clear pending event to avoid unexpected behavior on re-bind. */
keir@18220 476 clear_bit(port1, &shared_info(d1, evtchn_pending));
keir@18220 477
kaf24@5741 478 /* Reset binding to vcpu0 when the channel is freed. */
kaf24@5741 479 chn1->state = ECS_FREE;
kaf24@5741 480 chn1->notify_vcpu_id = 0;
kaf24@1171 481
kfraser@15846 482 xsm_evtchn_close_post(chn1);
kfraser@15846 483
kaf24@992 484 out:
kaf24@1544 485 if ( d2 != NULL )
kaf24@992 486 {
kaf24@1544 487 if ( d1 != d2 )
keir@18622 488 spin_unlock(&d2->event_lock);
kaf24@1544 489 put_domain(d2);
kaf24@992 490 }
kfraser@14224 491
keir@18622 492 spin_unlock(&d1->event_lock);
kaf24@1183 493
kaf24@992 494 return rc;
kaf24@992 495 }
kaf24@992 496
kaf24@992 497
kaf24@1256 498 static long evtchn_close(evtchn_close_t *close)
kaf24@1165 499 {
kaf24@7279 500 return __evtchn_close(current->domain, close->port);
kaf24@1165 501 }
kaf24@1165 502
keir@18004 503 int evtchn_send(struct domain *d, unsigned int lport)
kaf24@992 504 {
kaf24@5346 505 struct evtchn *lchn, *rchn;
keir@18004 506 struct domain *ld = d, *rd;
kfraser@10987 507 struct vcpu *rvcpu;
cl349@2975 508 int rport, ret = 0;
kaf24@992 509
keir@18622 510 spin_lock(&ld->event_lock);
kaf24@992 511
kaf24@5346 512 if ( unlikely(!port_is_valid(ld, lport)) )
kaf24@992 513 {
keir@18622 514 spin_unlock(&ld->event_lock);
kaf24@992 515 return -EINVAL;
kaf24@992 516 }
kaf24@992 517
kaf24@5346 518 lchn = evtchn_from_port(ld, lport);
kfraser@10987 519
kfraser@10987 520 /* Guest cannot send via a Xen-attached event channel. */
kfraser@10987 521 if ( unlikely(lchn->consumer_is_xen) )
kfraser@10987 522 {
keir@18622 523 spin_unlock(&ld->event_lock);
kfraser@10987 524 return -EINVAL;
kfraser@10987 525 }
kfraser@10987 526
kfraser@15846 527 ret = xsm_evtchn_send(ld, lchn);
kfraser@15846 528 if ( ret )
kfraser@15846 529 goto out;
kfraser@15846 530
kaf24@5346 531 switch ( lchn->state )
cl349@2975 532 {
cl349@2975 533 case ECS_INTERDOMAIN:
kaf24@5346 534 rd = lchn->u.interdomain.remote_dom;
kaf24@5346 535 rport = lchn->u.interdomain.remote_port;
kaf24@5346 536 rchn = evtchn_from_port(rd, rport);
kfraser@10987 537 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
kfraser@10987 538 if ( rchn->consumer_is_xen )
kfraser@10987 539 {
kfraser@10987 540 /* Xen consumers need notification only if they are blocked. */
kfraser@14698 541 if ( test_and_clear_bit(_VPF_blocked_in_xen,
kfraser@14698 542 &rvcpu->pause_flags) )
kfraser@10987 543 vcpu_wake(rvcpu);
kfraser@10987 544 }
kfraser@10987 545 else
kfraser@10987 546 {
kfraser@10987 547 evtchn_set_pending(rvcpu, rport);
kfraser@10987 548 }
cl349@2975 549 break;
cl349@2975 550 case ECS_IPI:
kaf24@5346 551 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
cl349@2975 552 break;
kaf24@7279 553 case ECS_UNBOUND:
kaf24@7279 554 /* silently drop the notification */
kaf24@7279 555 break;
cl349@2975 556 default:
cl349@2975 557 ret = -EINVAL;
cl349@2975 558 }
kaf24@992 559
kfraser@15846 560 out:
keir@18622 561 spin_unlock(&ld->event_lock);
kaf24@992 562
cl349@2975 563 return ret;
kaf24@992 564 }
kaf24@992 565
keir@18004 566 static int evtchn_set_pending(struct vcpu *v, int port)
kaf24@9276 567 {
kaf24@9276 568 struct domain *d = v->domain;
keir@18466 569 int vcpuid;
kaf24@9276 570
kaf24@9276 571 /*
kaf24@9276 572 * The following bit operations must happen in strict order.
kaf24@9276 573 * NB. On x86, the atomic bit operations also act as memory barriers.
kaf24@9276 574 * There is therefore sufficiently strict ordering for this architecture --
kaf24@9276 575 * others may require explicit memory barriers.
kaf24@9276 576 */
kaf24@9276 577
keir@17232 578 if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
keir@17998 579 return 1;
kaf24@9276 580
keir@17232 581 if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
keir@19304 582 !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
keir@17232 583 &vcpu_info(v, evtchn_pending_sel)) )
kaf24@9276 584 {
kfraser@10388 585 vcpu_mark_events_pending(v);
kaf24@9276 586 }
kaf24@10357 587
kaf24@10357 588 /* Check if some VCPU might be polling for this event. */
keir@19826 589 if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
keir@18466 590 return 0;
keir@18466 591
keir@18466 592 /* Wake any interested (or potentially interested) pollers. */
keir@19826 593 for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
keir@19826 594 vcpuid < d->max_vcpus;
keir@19826 595 vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
kaf24@9276 596 {
keir@18466 597 v = d->vcpu[vcpuid];
keir@18466 598 if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
keir@18466 599 test_and_clear_bit(vcpuid, d->poll_mask) )
kfraser@14692 600 {
keir@18466 601 v->poll_evtchn = 0;
kfraser@14692 602 vcpu_unblock(v);
kfraser@14692 603 }
kaf24@9276 604 }
keir@17998 605
keir@17998 606 return 0;
kaf24@9276 607 }
kaf24@9276 608
keir@18220 609 int guest_enabled_event(struct vcpu *v, int virq)
keir@18220 610 {
keir@18220 611 return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
keir@18220 612 }
kaf24@9581 613
kaf24@9582 614 void send_guest_vcpu_virq(struct vcpu *v, int virq)
kaf24@9276 615 {
keir@18220 616 unsigned long flags;
kaf24@9582 617 int port;
kaf24@9582 618
kaf24@9582 619 ASSERT(!virq_is_global(virq));
kaf24@9582 620
keir@18220 621 spin_lock_irqsave(&v->virq_lock, flags);
keir@18220 622
kaf24@9582 623 port = v->virq_to_evtchn[virq];
kaf24@9582 624 if ( unlikely(port == 0) )
keir@18220 625 goto out;
kaf24@9582 626
kaf24@9582 627 evtchn_set_pending(v, port);
kaf24@9276 628
keir@18220 629 out:
keir@18220 630 spin_unlock_irqrestore(&v->virq_lock, flags);
keir@18006 631 }
keir@18006 632
kaf24@9582 633 void send_guest_global_virq(struct domain *d, int virq)
kaf24@9582 634 {
keir@18220 635 unsigned long flags;
kaf24@9582 636 int port;
kfraser@10679 637 struct vcpu *v;
kaf24@9582 638 struct evtchn *chn;
kaf24@9582 639
kaf24@9582 640 ASSERT(virq_is_global(virq));
kaf24@9582 641
keir@19826 642 if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
kfraser@14325 643 return;
kfraser@14325 644
kfraser@10679 645 v = d->vcpu[0];
kfraser@10679 646 if ( unlikely(v == NULL) )
kfraser@10679 647 return;
kfraser@10679 648
keir@18220 649 spin_lock_irqsave(&v->virq_lock, flags);
keir@18220 650
kfraser@10679 651 port = v->virq_to_evtchn[virq];
kaf24@9582 652 if ( unlikely(port == 0) )
keir@18220 653 goto out;
kaf24@9582 654
kaf24@9582 655 chn = evtchn_from_port(d, port);
kaf24@9582 656 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
keir@18220 657
keir@18220 658 out:
keir@18220 659 spin_unlock_irqrestore(&v->virq_lock, flags);
kaf24@9276 660 }
kaf24@9276 661
keir@17998 662 int send_guest_pirq(struct domain *d, int pirq)
kaf24@5346 663 {
kaf24@5346 664 int port = d->pirq_to_evtchn[pirq];
kaf24@9582 665 struct evtchn *chn;
kaf24@9582 666
keir@18220 667 /*
keir@22455 668 * PV guests: It should not be possible to race with __evtchn_close(). The
keir@22455 669 * caller of this function must synchronise with pirq_guest_unbind().
keir@22455 670 * HVM guests: Port is legitimately zero when the guest disables the
keir@22455 671 * emulated interrupt/evtchn.
keir@18220 672 */
keir@22455 673 if ( port == 0 )
keir@22455 674 {
keir@22455 675 BUG_ON(!is_hvm_domain(d));
keir@22455 676 return 0;
keir@22455 677 }
kaf24@9582 678
kaf24@9582 679 chn = evtchn_from_port(d, port);
keir@17998 680 return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
kaf24@5346 681 }
kaf24@992 682
kaf24@9581 683
kaf24@1256 684 static long evtchn_status(evtchn_status_t *status)
kaf24@992 685 {
kaf24@1544 686 struct domain *d;
kaf24@1544 687 domid_t dom = status->dom;
kaf24@1544 688 int port = status->port;
kaf24@5346 689 struct evtchn *chn;
kaf24@1544 690 long rc = 0;
kaf24@1165 691
keir@18604 692 rc = rcu_lock_target_domain_by_id(dom, &d);
keir@18604 693 if ( rc )
keir@18604 694 return rc;
kaf24@992 695
keir@18622 696 spin_lock(&d->event_lock);
kaf24@992 697
kaf24@5346 698 if ( !port_is_valid(d, port) )
kaf24@1165 699 {
kaf24@1304 700 rc = -EINVAL;
kaf24@1304 701 goto out;
kaf24@1165 702 }
kaf24@1165 703
kaf24@5346 704 chn = evtchn_from_port(d, port);
kfraser@15846 705
kfraser@15846 706 rc = xsm_evtchn_status(d, chn);
kfraser@15846 707 if ( rc )
kfraser@15846 708 goto out;
kfraser@15846 709
kaf24@5346 710 switch ( chn->state )
kaf24@992 711 {
kaf24@1165 712 case ECS_FREE:
cl349@3335 713 case ECS_RESERVED:
kaf24@1165 714 status->status = EVTCHNSTAT_closed;
kaf24@1165 715 break;
kaf24@1256 716 case ECS_UNBOUND:
kaf24@1256 717 status->status = EVTCHNSTAT_unbound;
kaf24@5346 718 status->u.unbound.dom = chn->u.unbound.remote_domid;
kaf24@1256 719 break;
kaf24@1256 720 case ECS_INTERDOMAIN:
kaf24@1256 721 status->status = EVTCHNSTAT_interdomain;
cl349@2962 722 status->u.interdomain.dom =
kaf24@5346 723 chn->u.interdomain.remote_dom->domain_id;
kaf24@5346 724 status->u.interdomain.port = chn->u.interdomain.remote_port;
kaf24@1165 725 break;
kaf24@1256 726 case ECS_PIRQ:
kaf24@1256 727 status->status = EVTCHNSTAT_pirq;
keir@21671 728 status->u.pirq = chn->u.pirq.irq;
kaf24@1256 729 break;
kaf24@1256 730 case ECS_VIRQ:
kaf24@1256 731 status->status = EVTCHNSTAT_virq;
kaf24@5346 732 status->u.virq = chn->u.virq;
kaf24@1165 733 break;
cl349@2970 734 case ECS_IPI:
kaf24@5741 735 status->status = EVTCHNSTAT_ipi;
cl349@2970 736 break;
kaf24@1165 737 default:
kaf24@1165 738 BUG();
kaf24@992 739 }
kaf24@992 740
kaf24@5741 741 status->vcpu = chn->notify_vcpu_id;
kaf24@5741 742
kaf24@1304 743 out:
keir@18622 744 spin_unlock(&d->event_lock);
kfraser@14220 745 rcu_unlock_domain(d);
keir@17357 746
kaf24@1304 747 return rc;
kaf24@992 748 }
kaf24@992 749
kaf24@9581 750
kaf24@8973 751 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
iap10@5729 752 {
kaf24@8973 753 struct domain *d = current->domain;
iap10@5729 754 struct evtchn *chn;
kaf24@5741 755 long rc = 0;
kaf24@5741 756
keir@19826 757 if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
kaf24@7279 758 return -ENOENT;
iap10@5729 759
keir@18622 760 spin_lock(&d->event_lock);
iap10@5729 761
iap10@5729 762 if ( !port_is_valid(d, port) )
iap10@5729 763 {
iap10@5729 764 rc = -EINVAL;
iap10@5729 765 goto out;
iap10@5729 766 }
iap10@5729 767
iap10@5729 768 chn = evtchn_from_port(d, port);
kfraser@10987 769
kfraser@10987 770 /* Guest cannot re-bind a Xen-attached event channel. */
kfraser@10987 771 if ( unlikely(chn->consumer_is_xen) )
kfraser@10987 772 {
kfraser@10987 773 rc = -EINVAL;
kfraser@10987 774 goto out;
kfraser@10987 775 }
kfraser@10987 776
kaf24@5741 777 switch ( chn->state )
kaf24@5741 778 {
kaf24@9582 779 case ECS_VIRQ:
kaf24@9582 780 if ( virq_is_global(chn->u.virq) )
kaf24@9582 781 chn->notify_vcpu_id = vcpu_id;
kaf24@9582 782 else
kaf24@9582 783 rc = -EINVAL;
kaf24@9582 784 break;
kaf24@5741 785 case ECS_UNBOUND:
kaf24@5741 786 case ECS_INTERDOMAIN:
keir@21671 787 chn->notify_vcpu_id = vcpu_id;
keir@21671 788 break;
kaf24@5741 789 case ECS_PIRQ:
keir@21671 790 if ( chn->notify_vcpu_id == vcpu_id )
keir@21671 791 break;
keir@21671 792 unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
kaf24@8973 793 chn->notify_vcpu_id = vcpu_id;
keir@21671 794 pirq_set_affinity(d, chn->u.pirq.irq,
keir@21671 795 cpumask_of(d->vcpu[vcpu_id]->processor));
keir@21671 796 link_pirq_port(port, chn, d->vcpu[vcpu_id]);
kaf24@5741 797 break;
kaf24@5741 798 default:
kaf24@5741 799 rc = -EINVAL;
kaf24@5741 800 break;
kaf24@5741 801 }
iap10@5729 802
iap10@5729 803 out:
keir@18622 804 spin_unlock(&d->event_lock);
keir@17357 805
iap10@5729 806 return rc;
iap10@5729 807 }
kaf24@992 808
kaf24@9581 809
keir@18882 810 int evtchn_unmask(unsigned int port)
kaf24@8389 811 {
kaf24@8389 812 struct domain *d = current->domain;
kaf24@8389 813 struct vcpu *v;
kaf24@8389 814
keir@18622 815 spin_lock(&d->event_lock);
kaf24@8389 816
kaf24@8389 817 if ( unlikely(!port_is_valid(d, port)) )
kaf24@8389 818 {
keir@18622 819 spin_unlock(&d->event_lock);
kaf24@8389 820 return -EINVAL;
kaf24@8389 821 }
kaf24@8389 822
kaf24@8389 823 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
kaf24@8389 824
kaf24@8389 825 /*
kaf24@8389 826 * These operations must happen in strict order. Based on
kaf24@8389 827 * include/xen/event.h:evtchn_set_pending().
kaf24@8389 828 */
keir@17232 829 if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
keir@17232 830 test_bit (port, &shared_info(d, evtchn_pending)) &&
keir@19304 831 !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
keir@17232 832 &vcpu_info(v, evtchn_pending_sel)) )
kaf24@8389 833 {
kfraser@10388 834 vcpu_mark_events_pending(v);
kaf24@8389 835 }
kaf24@8389 836
keir@18622 837 spin_unlock(&d->event_lock);
kaf24@8389 838
kaf24@8389 839 return 0;
kaf24@8389 840 }
kaf24@8389 841
kaf24@9581 842
kfraser@13559 843 static long evtchn_reset(evtchn_reset_t *r)
kfraser@13559 844 {
kfraser@13559 845 domid_t dom = r->dom;
kfraser@13559 846 struct domain *d;
keir@17357 847 int i, rc;
kfraser@13559 848
keir@18604 849 rc = rcu_lock_target_domain_by_id(dom, &d);
keir@18604 850 if ( rc )
keir@18604 851 return rc;
kfraser@13559 852
kfraser@15846 853 rc = xsm_evtchn_reset(current->domain, d);
kfraser@15846 854 if ( rc )
keir@16894 855 goto out;
kfraser@15846 856
kfraser@13559 857 for ( i = 0; port_is_valid(d, i); i++ )
kfraser@13559 858 (void)__evtchn_close(d, i);
kfraser@13559 859
keir@16894 860 rc = 0;
keir@17357 861
keir@16894 862 out:
kfraser@14220 863 rcu_unlock_domain(d);
kfraser@13559 864
keir@16894 865 return rc;
kfraser@13559 866 }
kfraser@13559 867
kfraser@13559 868
kaf24@9927 869 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@992 870 {
kaf24@992 871 long rc;
kaf24@992 872
kaf24@9927 873 switch ( cmd )
kaf24@992 874 {
kaf24@9927 875 case EVTCHNOP_alloc_unbound: {
kaf24@9927 876 struct evtchn_alloc_unbound alloc_unbound;
kaf24@9927 877 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
kaf24@9927 878 return -EFAULT;
kaf24@9927 879 rc = evtchn_alloc_unbound(&alloc_unbound);
kaf24@9927 880 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
kaf24@2751 881 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@2751 882 break;
kaf24@9927 883 }
kaf24@2751 884
kaf24@9927 885 case EVTCHNOP_bind_interdomain: {
kaf24@9927 886 struct evtchn_bind_interdomain bind_interdomain;
kaf24@9927 887 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
kaf24@9927 888 return -EFAULT;
kaf24@9927 889 rc = evtchn_bind_interdomain(&bind_interdomain);
kaf24@9927 890 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
kaf24@1256 891 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@1256 892 break;
kaf24@9927 893 }
kaf24@1256 894
kaf24@9927 895 case EVTCHNOP_bind_virq: {
kaf24@9927 896 struct evtchn_bind_virq bind_virq;
kaf24@9927 897 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
kaf24@9927 898 return -EFAULT;
kaf24@9927 899 rc = evtchn_bind_virq(&bind_virq);
kaf24@9927 900 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
kaf24@9927 901 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@9927 902 break;
kaf24@9927 903 }
kaf24@9927 904
kaf24@9927 905 case EVTCHNOP_bind_ipi: {
kaf24@9927 906 struct evtchn_bind_ipi bind_ipi;
kaf24@9927 907 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
kaf24@9927 908 return -EFAULT;
kaf24@9927 909 rc = evtchn_bind_ipi(&bind_ipi);
kaf24@9927 910 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
kaf24@1273 911 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@1273 912 break;
kaf24@9927 913 }
kaf24@1273 914
kaf24@9927 915 case EVTCHNOP_bind_pirq: {
kaf24@9927 916 struct evtchn_bind_pirq bind_pirq;
kaf24@9927 917 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
kaf24@9927 918 return -EFAULT;
kaf24@9927 919 rc = evtchn_bind_pirq(&bind_pirq);
kaf24@9927 920 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
kaf24@1165 921 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@992 922 break;
kaf24@9927 923 }
kaf24@992 924
kaf24@9927 925 case EVTCHNOP_close: {
kaf24@9927 926 struct evtchn_close close;
kaf24@9927 927 if ( copy_from_guest(&close, arg, 1) != 0 )
kaf24@9927 928 return -EFAULT;
kaf24@9927 929 rc = evtchn_close(&close);
kaf24@992 930 break;
kaf24@9927 931 }
kaf24@992 932
kaf24@9927 933 case EVTCHNOP_send: {
kaf24@9927 934 struct evtchn_send send;
kaf24@9927 935 if ( copy_from_guest(&send, arg, 1) != 0 )
kaf24@9927 936 return -EFAULT;
keir@18004 937 rc = evtchn_send(current->domain, send.port);
kaf24@992 938 break;
kaf24@9927 939 }
kaf24@992 940
kaf24@9927 941 case EVTCHNOP_status: {
kaf24@9927 942 struct evtchn_status status;
kaf24@9927 943 if ( copy_from_guest(&status, arg, 1) != 0 )
kaf24@9927 944 return -EFAULT;
kaf24@9927 945 rc = evtchn_status(&status);
kaf24@9927 946 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
kaf24@1165 947 rc = -EFAULT;
kaf24@992 948 break;
kaf24@9927 949 }
kaf24@992 950
kaf24@9927 951 case EVTCHNOP_bind_vcpu: {
kaf24@9927 952 struct evtchn_bind_vcpu bind_vcpu;
kaf24@9927 953 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
kaf24@9927 954 return -EFAULT;
kaf24@9927 955 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
iap10@5729 956 break;
kaf24@9927 957 }
iap10@5729 958
kaf24@9927 959 case EVTCHNOP_unmask: {
kaf24@9927 960 struct evtchn_unmask unmask;
kaf24@9927 961 if ( copy_from_guest(&unmask, arg, 1) != 0 )
kaf24@9927 962 return -EFAULT;
keir@18882 963 rc = evtchn_unmask(unmask.port);
kaf24@8389 964 break;
kaf24@9927 965 }
kaf24@8389 966
kfraser@13559 967 case EVTCHNOP_reset: {
kfraser@13559 968 struct evtchn_reset reset;
kfraser@13559 969 if ( copy_from_guest(&reset, arg, 1) != 0 )
kfraser@13559 970 return -EFAULT;
kfraser@13559 971 rc = evtchn_reset(&reset);
kfraser@13559 972 break;
kfraser@13559 973 }
kfraser@13559 974
kaf24@992 975 default:
kaf24@992 976 rc = -ENOSYS;
kaf24@992 977 break;
kaf24@992 978 }
kaf24@992 979
kaf24@992 980 return rc;
kaf24@992 981 }
kaf24@992 982
kaf24@992 983
kfraser@10987 984 int alloc_unbound_xen_event_channel(
kfraser@10987 985 struct vcpu *local_vcpu, domid_t remote_domid)
kfraser@10987 986 {
kfraser@10987 987 struct evtchn *chn;
kfraser@10987 988 struct domain *d = local_vcpu->domain;
kfraser@10987 989 int port;
kfraser@10987 990
keir@18622 991 spin_lock(&d->event_lock);
kfraser@10987 992
kfraser@10987 993 if ( (port = get_free_port(d)) < 0 )
kfraser@10987 994 goto out;
kfraser@10987 995 chn = evtchn_from_port(d, port);
kfraser@10987 996
kfraser@10987 997 chn->state = ECS_UNBOUND;
kfraser@10987 998 chn->consumer_is_xen = 1;
kfraser@10987 999 chn->notify_vcpu_id = local_vcpu->vcpu_id;
kfraser@10987 1000 chn->u.unbound.remote_domid = remote_domid;
kfraser@10987 1001
kfraser@10987 1002 out:
keir@18622 1003 spin_unlock(&d->event_lock);
kfraser@10987 1004
kfraser@10987 1005 return port;
kfraser@10987 1006 }
kfraser@10987 1007
kfraser@10987 1008
kfraser@10987 1009 void free_xen_event_channel(
kfraser@10987 1010 struct vcpu *local_vcpu, int port)
kfraser@10987 1011 {
kfraser@10987 1012 struct evtchn *chn;
kfraser@10987 1013 struct domain *d = local_vcpu->domain;
kfraser@10987 1014
keir@18622 1015 spin_lock(&d->event_lock);
keir@17482 1016
keir@17482 1017 if ( unlikely(d->is_dying) )
keir@17482 1018 {
keir@18622 1019 spin_unlock(&d->event_lock);
keir@17482 1020 return;
keir@17482 1021 }
keir@17482 1022
keir@17482 1023 BUG_ON(!port_is_valid(d, port));
kfraser@10987 1024 chn = evtchn_from_port(d, port);
kfraser@10987 1025 BUG_ON(!chn->consumer_is_xen);
kfraser@10987 1026 chn->consumer_is_xen = 0;
keir@17482 1027
keir@18622 1028 spin_unlock(&d->event_lock);
kfraser@10987 1029
kfraser@10987 1030 (void)__evtchn_close(d, port);
kfraser@10987 1031 }
kfraser@10987 1032
kfraser@10987 1033
keir@21573 1034 void notify_via_xen_event_channel(struct domain *ld, int lport)
kfraser@10987 1035 {
kfraser@10987 1036 struct evtchn *lchn, *rchn;
keir@21573 1037 struct domain *rd;
kfraser@10987 1038 int rport;
kfraser@10987 1039
keir@18622 1040 spin_lock(&ld->event_lock);
kfraser@10987 1041
keir@22205 1042 if ( unlikely(ld->is_dying) )
keir@22205 1043 {
keir@22205 1044 spin_unlock(&ld->event_lock);
keir@22205 1045 return;
keir@22205 1046 }
keir@22205 1047
kfraser@10987 1048 ASSERT(port_is_valid(ld, lport));
kfraser@10987 1049 lchn = evtchn_from_port(ld, lport);
kfraser@10987 1050 ASSERT(lchn->consumer_is_xen);
kfraser@10987 1051
kfraser@10987 1052 if ( likely(lchn->state == ECS_INTERDOMAIN) )
kfraser@10987 1053 {
kfraser@10987 1054 rd = lchn->u.interdomain.remote_dom;
kfraser@10987 1055 rport = lchn->u.interdomain.remote_port;
kfraser@10987 1056 rchn = evtchn_from_port(rd, rport);
kfraser@10987 1057 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
kfraser@10987 1058 }
kfraser@10987 1059
keir@18622 1060 spin_unlock(&ld->event_lock);
kfraser@10987 1061 }
kfraser@10987 1062
kfraser@10987 1063
kaf24@5346 1064 int evtchn_init(struct domain *d)
kaf24@1256 1065 {
keir@18622 1066 spin_lock_init(&d->event_lock);
kaf24@5346 1067 if ( get_free_port(d) != 0 )
keir@19712 1068 return -EINVAL;
kaf24@5346 1069 evtchn_from_port(d, 0)->state = ECS_RESERVED;
keir@19826 1070
keir@19826 1071 #if MAX_VIRT_CPUS > BITS_PER_LONG
keir@19826 1072 d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
keir@19826 1073 if ( !d->poll_mask )
keir@19826 1074 return -ENOMEM;
keir@19826 1075 bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
keir@19826 1076 #endif
keir@19826 1077
cl349@3329 1078 return 0;
kaf24@1256 1079 }
kaf24@1256 1080
kaf24@1256 1081
kaf24@5346 1082 void evtchn_destroy(struct domain *d)
kaf24@992 1083 {
kaf24@992 1084 int i;
kaf24@5346 1085
kfraser@15503 1086 /* After this barrier no new event-channel allocations can occur. */
kfraser@15503 1087 BUG_ON(!d->is_dying);
keir@18622 1088 spin_barrier(&d->event_lock);
kfraser@15503 1089
kfraser@15503 1090 /* Close all existing event channels. */
kaf24@5346 1091 for ( i = 0; port_is_valid(d, i); i++ )
kfraser@10987 1092 {
kfraser@10987 1093 evtchn_from_port(d, i)->consumer_is_xen = 0;
kfraser@10987 1094 (void)__evtchn_close(d, i);
kfraser@10987 1095 }
kaf24@5346 1096
kfraser@15503 1097 /* Free all event-channel buckets. */
keir@18622 1098 spin_lock(&d->event_lock);
kaf24@5346 1099 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
kfraser@15846 1100 {
kfraser@15846 1101 xsm_free_security_evtchn(d->evtchn[i]);
kaf24@7803 1102 xfree(d->evtchn[i]);
keir@17482 1103 d->evtchn[i] = NULL;
kfraser@15846 1104 }
keir@18622 1105 spin_unlock(&d->event_lock);
keir@20768 1106 }
keir@19826 1107
keir@20768 1108
keir@20768 1109 void evtchn_destroy_final(struct domain *d)
keir@20768 1110 {
keir@19826 1111 #if MAX_VIRT_CPUS > BITS_PER_LONG
keir@19826 1112 xfree(d->poll_mask);
keir@19826 1113 d->poll_mask = NULL;
keir@19826 1114 #endif
kaf24@992 1115 }
kaf24@3952 1116
keir@20768 1117
keir@21671 1118 void evtchn_move_pirqs(struct vcpu *v)
keir@21671 1119 {
keir@21671 1120 struct domain *d = v->domain;
keir@21671 1121 const cpumask_t *mask = cpumask_of(v->processor);
keir@21671 1122 unsigned int port;
keir@21671 1123 struct evtchn *chn;
keir@21671 1124
keir@21671 1125 spin_lock(&d->event_lock);
keir@21671 1126 for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port )
keir@21671 1127 {
keir@21671 1128 chn = evtchn_from_port(d, port);
keir@21671 1129 pirq_set_affinity(d, chn->u.pirq.irq, mask);
keir@21671 1130 }
keir@21671 1131 spin_unlock(&d->event_lock);
keir@21671 1132 }
keir@21671 1133
keir@21671 1134
keir@18566 1135 static void domain_dump_evtchn_info(struct domain *d)
keir@18566 1136 {
keir@18566 1137 unsigned int port;
keir@18566 1138
keir@20976 1139 bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
keir@20976 1140 d->poll_mask, d->max_vcpus);
keir@22028 1141 printk("Event channel information for domain %d:\n"
keir@22028 1142 "Polling vCPUs: {%s}\n"
keir@22028 1143 " port [p/m]\n", d->domain_id, keyhandler_scratch);
keir@18566 1144
keir@22028 1145 spin_lock(&d->event_lock);
keir@18566 1146
keir@18566 1147 for ( port = 1; port < MAX_EVTCHNS(d); ++port )
keir@18566 1148 {
keir@18566 1149 const struct evtchn *chn;
keir@18566 1150
keir@18566 1151 if ( !port_is_valid(d, port) )
keir@18566 1152 continue;
keir@18566 1153 chn = evtchn_from_port(d, port);
keir@18566 1154 if ( chn->state == ECS_FREE )
keir@18566 1155 continue;
keir@18566 1156
keir@20200 1157 printk(" %4u [%d/%d]: s=%d n=%d",
keir@18566 1158 port,
keir@20200 1159 !!test_bit(port, &shared_info(d, evtchn_pending)),
keir@20200 1160 !!test_bit(port, &shared_info(d, evtchn_mask)),
keir@18566 1161 chn->state, chn->notify_vcpu_id);
keir@18566 1162 switch ( chn->state )
keir@18566 1163 {
keir@18566 1164 case ECS_UNBOUND:
keir@18566 1165 printk(" d=%d", chn->u.unbound.remote_domid);
keir@18566 1166 break;
keir@18566 1167 case ECS_INTERDOMAIN:
keir@18566 1168 printk(" d=%d p=%d",
keir@18566 1169 chn->u.interdomain.remote_dom->domain_id,
keir@18566 1170 chn->u.interdomain.remote_port);
keir@18566 1171 break;
keir@18566 1172 case ECS_PIRQ:
keir@21671 1173 printk(" p=%d", chn->u.pirq.irq);
keir@18566 1174 break;
keir@18566 1175 case ECS_VIRQ:
keir@18566 1176 printk(" v=%d", chn->u.virq);
keir@18566 1177 break;
keir@18566 1178 }
keir@18566 1179 printk(" x=%d\n", chn->consumer_is_xen);
keir@18566 1180 }
keir@18566 1181
keir@18622 1182 spin_unlock(&d->event_lock);
keir@18566 1183 }
keir@18566 1184
keir@18566 1185 static void dump_evtchn_info(unsigned char key)
keir@18566 1186 {
keir@18566 1187 struct domain *d;
keir@18566 1188
keir@18566 1189 printk("'%c' pressed -> dumping event-channel info\n", key);
keir@18566 1190
keir@18566 1191 rcu_read_lock(&domlist_read_lock);
keir@18566 1192
keir@18566 1193 for_each_domain ( d )
keir@18566 1194 domain_dump_evtchn_info(d);
keir@18566 1195
keir@18566 1196 rcu_read_unlock(&domlist_read_lock);
keir@18566 1197 }
keir@18566 1198
keir@20048 1199 static struct keyhandler dump_evtchn_info_keyhandler = {
keir@20048 1200 .diagnostic = 1,
keir@20048 1201 .u.fn = dump_evtchn_info,
keir@20048 1202 .desc = "dump evtchn info"
keir@20048 1203 };
keir@20048 1204
keir@18566 1205 static int __init dump_evtchn_info_key_init(void)
keir@18566 1206 {
keir@20048 1207 register_keyhandler('e', &dump_evtchn_info_keyhandler);
keir@18566 1208 return 0;
keir@18566 1209 }
keir@18566 1210 __initcall(dump_evtchn_info_key_init);
keir@18566 1211
kaf24@3952 1212 /*
kaf24@3952 1213 * Local variables:
kaf24@3952 1214 * mode: C
kaf24@3952 1215 * c-set-style: "BSD"
kaf24@3952 1216 * c-basic-offset: 4
kaf24@3952 1217 * tab-width: 4
kaf24@3952 1218 * indent-tabs-mode: nil
kaf24@4026 1219 * End:
kaf24@3952 1220 */