xen-vtx-unstable

annotate linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c @ 6774:4d899a738d59

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:05:49 2005 +0000 (2005-09-13)
parents 9ead08216805 1f460d0fd6c6
children e7c7196fa329 8ca0f98ba8e2
rev   line source
cl349@4113 1 /******************************************************************************
cl349@4113 2 * arch/xen/drivers/usbif/backend/main.c
cl349@4113 3 *
cl349@4113 4 * Backend for the Xen virtual USB driver - provides an abstraction of a
cl349@4113 5 * USB host controller to the corresponding frontend driver.
cl349@4113 6 *
cl349@4113 7 * by Mark Williamson
cl349@4113 8 * Copyright (c) 2004 Intel Research Cambridge
cl349@4113 9 * Copyright (c) 2004, 2005 Mark Williamson
cl349@4113 10 *
cl349@4113 11 * Based on arch/xen/drivers/blkif/backend/main.c
cl349@4113 12 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
cl349@4113 13 */
cl349@4113 14
cl349@4113 15 #include "common.h"
cl349@4113 16
cl349@4113 17
cl349@4113 18 #include <linux/list.h>
cl349@4113 19 #include <linux/usb.h>
cl349@4113 20 #include <linux/spinlock.h>
cl349@4113 21 #include <linux/module.h>
cl349@4113 22 #include <linux/tqueue.h>
cl349@4113 23
cl349@4113 24 /*
cl349@4113 25 * This is rather arbitrary.
cl349@4113 26 */
cl349@4113 27 #define MAX_PENDING_REQS 4
cl349@4113 28 #define BATCH_PER_DOMAIN 1
cl349@4113 29
cl349@4113 30 static unsigned long mmap_vstart;
cl349@4113 31
cl349@4113 32 /* Needs to be sufficiently large that we can map the (large) buffers
cl349@4113 33 * the USB mass storage driver wants. */
cl349@4113 34 #define MMAP_PAGES_PER_REQUEST \
cl349@4113 35 (128)
cl349@4113 36 #define MMAP_PAGES \
cl349@4113 37 (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
cl349@4113 38
cl349@4113 39 #define MMAP_VADDR(_req,_seg) \
cl349@4113 40 (mmap_vstart + \
cl349@4113 41 ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
cl349@4113 42 ((_seg) * PAGE_SIZE))
cl349@4113 43
cl349@4113 44
cl349@4113 45 static spinlock_t owned_ports_lock;
cl349@4113 46 LIST_HEAD(owned_ports);
cl349@4113 47
cl349@4113 48 /* A list of these structures is used to track ownership of physical USB
cl349@4113 49 * ports. */
cl349@4113 50 typedef struct
cl349@4113 51 {
cl349@4113 52 usbif_priv_t *usbif_priv;
cl349@4113 53 char path[16];
cl349@4113 54 int guest_port;
cl349@4113 55 int enabled;
cl349@4113 56 struct list_head list;
cl349@4113 57 unsigned long guest_address; /* The USB device address that has been
cl349@4113 58 * assigned by the guest. */
cl349@4113 59 int dev_present; /* Is there a device present? */
cl349@4113 60 struct usb_device * dev;
cl349@4113 61 unsigned long ifaces; /* What interfaces are present on this device? */
cl349@4113 62 } owned_port_t;
cl349@4113 63
cl349@4113 64
cl349@4113 65 /*
cl349@4113 66 * Each outstanding request that we've passed to the lower device layers has a
cl349@4113 67 * 'pending_req' allocated to it. The request is complete, the specified
cl349@4113 68 * domain has a response queued for it, with the saved 'id' passed back.
cl349@4113 69 */
cl349@4113 70 typedef struct {
cl349@4113 71 usbif_priv_t *usbif_priv;
cl349@4113 72 unsigned long id;
cl349@4113 73 int nr_pages;
cl349@4113 74 unsigned short operation;
cl349@4113 75 int status;
cl349@4113 76 } pending_req_t;
cl349@4113 77
cl349@4113 78 /*
cl349@4113 79 * We can't allocate pending_req's in order, since they may complete out of
cl349@4113 80 * order. We therefore maintain an allocation ring. This ring also indicates
cl349@4113 81 * when enough work has been passed down -- at that point the allocation ring
cl349@4113 82 * will be empty.
cl349@4113 83 */
cl349@4113 84 static pending_req_t pending_reqs[MAX_PENDING_REQS];
cl349@4113 85 static unsigned char pending_ring[MAX_PENDING_REQS];
cl349@4113 86 static spinlock_t pend_prod_lock;
cl349@4113 87
cl349@4113 88 /* NB. We use a different index type to differentiate from shared usb rings. */
cl349@4113 89 typedef unsigned int PEND_RING_IDX;
cl349@4113 90 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
cl349@4113 91 static PEND_RING_IDX pending_prod, pending_cons;
cl349@4113 92 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
cl349@4113 93
cl349@4113 94 static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do);
cl349@4113 95 static void make_response(usbif_priv_t *usbif, unsigned long id,
cl349@4113 96 unsigned short op, int st, int inband,
cl349@4113 97 unsigned long actual_length);
cl349@4113 98 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long port);
cl349@4113 99 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req);
cl349@4113 100 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid);
cl349@4113 101 static owned_port_t *usbif_find_port(char *);
cl349@4113 102
cl349@4113 103 /******************************************************************
cl349@4113 104 * PRIVATE DEBUG FUNCTIONS
cl349@4113 105 */
cl349@4113 106
cl349@4113 107 #undef DEBUG
cl349@4113 108 #ifdef DEBUG
cl349@4113 109
cl349@4113 110 static void dump_port(owned_port_t *p)
cl349@4113 111 {
cl349@4113 112 printk(KERN_DEBUG "owned_port_t @ %p\n"
cl349@4113 113 " usbif_priv @ %p\n"
cl349@4113 114 " path: %s\n"
cl349@4113 115 " guest_port: %d\n"
cl349@4113 116 " guest_address: %ld\n"
cl349@4113 117 " dev_present: %d\n"
cl349@4113 118 " dev @ %p\n"
cl349@4113 119 " ifaces: 0x%lx\n",
cl349@4113 120 p, p->usbif_priv, p->path, p->guest_port, p->guest_address,
cl349@4113 121 p->dev_present, p->dev, p->ifaces);
cl349@4113 122 }
cl349@4113 123
cl349@4113 124
cl349@4113 125 static void dump_request(usbif_request_t *req)
cl349@4113 126 {
cl349@4113 127 printk(KERN_DEBUG "id = 0x%lx\n"
cl349@4113 128 "devnum %d\n"
cl349@4113 129 "endpoint 0x%x\n"
cl349@4113 130 "direction %d\n"
cl349@4113 131 "speed %d\n"
cl349@4113 132 "pipe_type 0x%x\n"
cl349@4113 133 "transfer_buffer 0x%lx\n"
cl349@4113 134 "length 0x%lx\n"
cl349@4113 135 "transfer_flags 0x%lx\n"
cl349@4113 136 "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n"
cl349@4113 137 "iso_schedule = 0x%lx\n"
cl349@4113 138 "num_iso %ld\n",
cl349@4113 139 req->id, req->devnum, req->endpoint, req->direction, req->speed,
cl349@4113 140 req->pipe_type, req->transfer_buffer, req->length,
cl349@4113 141 req->transfer_flags, req->setup[0], req->setup[1], req->setup[2],
cl349@4113 142 req->setup[3], req->setup[4], req->setup[5], req->setup[6],
cl349@4113 143 req->setup[7], req->iso_schedule, req->num_iso);
cl349@4113 144 }
cl349@4113 145
cl349@4113 146 static void dump_urb(struct urb *urb)
cl349@4113 147 {
cl349@4113 148 printk(KERN_DEBUG "dumping urb @ %p\n", urb);
cl349@4113 149
cl349@4113 150 #define DUMP_URB_FIELD(name, format) \
cl349@4113 151 printk(KERN_DEBUG " " # name " " format "\n", urb-> name)
cl349@4113 152
cl349@4113 153 DUMP_URB_FIELD(pipe, "0x%x");
cl349@4113 154 DUMP_URB_FIELD(status, "%d");
cl349@4113 155 DUMP_URB_FIELD(transfer_flags, "0x%x");
cl349@4113 156 DUMP_URB_FIELD(transfer_buffer, "%p");
cl349@4113 157 DUMP_URB_FIELD(transfer_buffer_length, "%d");
cl349@4113 158 DUMP_URB_FIELD(actual_length, "%d");
cl349@4113 159 }
cl349@4113 160
cl349@4113 161 static void dump_response(usbif_response_t *resp)
cl349@4113 162 {
cl349@4113 163 printk(KERN_DEBUG "usbback: Sending response:\n"
cl349@4113 164 " id = 0x%x\n"
cl349@4113 165 " op = %d\n"
cl349@4113 166 " status = %d\n"
cl349@4113 167 " data = %d\n"
cl349@4113 168 " length = %d\n",
cl349@4113 169 resp->id, resp->op, resp->status, resp->data, resp->length);
cl349@4113 170 }
cl349@4113 171
cl349@4113 172 #else /* DEBUG */
cl349@4113 173
cl349@4113 174 #define dump_port(blah) ((void)0)
cl349@4113 175 #define dump_request(blah) ((void)0)
cl349@4113 176 #define dump_urb(blah) ((void)0)
cl349@4113 177 #define dump_response(blah) ((void)0)
cl349@4113 178
cl349@4113 179 #endif /* DEBUG */
cl349@4113 180
cl349@4113 181 /******************************************************************
cl349@4113 182 * MEMORY MANAGEMENT
cl349@4113 183 */
cl349@4113 184
cl349@4113 185 static void fast_flush_area(int idx, int nr_pages)
cl349@4113 186 {
cl349@4113 187 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
cl349@4113 188 int i;
cl349@4113 189
cl349@4113 190 for ( i = 0; i < nr_pages; i++ )
cl349@4113 191 {
kaf24@5671 192 MULTI_update_va_mapping(mcl+i, MMAP_VADDR(idx, i),
kaf24@5671 193 __pte(0), 0);
cl349@4113 194 }
cl349@4113 195
smh22@5818 196 mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
cl349@4113 197 if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
cl349@4113 198 BUG();
cl349@4113 199 }
cl349@4113 200
cl349@4113 201
cl349@4113 202 /******************************************************************
cl349@4113 203 * USB INTERFACE SCHEDULER LIST MAINTENANCE
cl349@4113 204 */
cl349@4113 205
cl349@4113 206 static struct list_head usbio_schedule_list;
cl349@4113 207 static spinlock_t usbio_schedule_list_lock;
cl349@4113 208
cl349@4113 209 static int __on_usbif_list(usbif_priv_t *up)
cl349@4113 210 {
cl349@4113 211 return up->usbif_list.next != NULL;
cl349@4113 212 }
cl349@4113 213
cl349@4113 214 void remove_from_usbif_list(usbif_priv_t *up)
cl349@4113 215 {
cl349@4113 216 unsigned long flags;
cl349@4113 217 if ( !__on_usbif_list(up) ) return;
cl349@4113 218 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
cl349@4113 219 if ( __on_usbif_list(up) )
cl349@4113 220 {
cl349@4113 221 list_del(&up->usbif_list);
cl349@4113 222 up->usbif_list.next = NULL;
cl349@4113 223 usbif_put(up);
cl349@4113 224 }
cl349@4113 225 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
cl349@4113 226 }
cl349@4113 227
cl349@4113 228 static void add_to_usbif_list_tail(usbif_priv_t *up)
cl349@4113 229 {
cl349@4113 230 unsigned long flags;
cl349@4113 231 if ( __on_usbif_list(up) ) return;
cl349@4113 232 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
cl349@4113 233 if ( !__on_usbif_list(up) && (up->status == CONNECTED) )
cl349@4113 234 {
cl349@4113 235 list_add_tail(&up->usbif_list, &usbio_schedule_list);
cl349@4113 236 usbif_get(up);
cl349@4113 237 }
cl349@4113 238 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
cl349@4113 239 }
cl349@4113 240
cl349@4113 241 void free_pending(int pending_idx)
cl349@4113 242 {
cl349@4113 243 unsigned long flags;
cl349@4113 244
cl349@4113 245 /* Free the pending request. */
cl349@4113 246 spin_lock_irqsave(&pend_prod_lock, flags);
cl349@4113 247 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
cl349@4113 248 spin_unlock_irqrestore(&pend_prod_lock, flags);
cl349@4113 249 }
cl349@4113 250
cl349@4113 251 /******************************************************************
cl349@4113 252 * COMPLETION CALLBACK -- Called as urb->complete()
cl349@4113 253 */
cl349@4113 254
cl349@4113 255 static void maybe_trigger_usbio_schedule(void);
cl349@4113 256
cl349@4113 257 static void __end_usb_io_op(struct urb *purb)
cl349@4113 258 {
cl349@4113 259 pending_req_t *pending_req;
cl349@4113 260 int pending_idx;
cl349@4113 261
cl349@4113 262 pending_req = purb->context;
cl349@4113 263
cl349@4113 264 pending_idx = pending_req - pending_reqs;
cl349@4113 265
cl349@4113 266 ASSERT(purb->actual_length <= purb->transfer_buffer_length);
cl349@4113 267 ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE);
cl349@4113 268
cl349@4113 269 /* An error fails the entire request. */
cl349@4113 270 if ( purb->status )
cl349@4113 271 {
cl349@4113 272 printk(KERN_WARNING "URB @ %p failed. Status %d\n", purb, purb->status);
cl349@4113 273 }
cl349@4113 274
cl349@4113 275 if ( usb_pipetype(purb->pipe) == 0 )
cl349@4113 276 {
cl349@4113 277 int i;
cl349@4113 278 usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, pending_req->nr_pages - 1);
cl349@4113 279
cl349@4113 280 /* If we're dealing with an iso pipe, we need to copy back the schedule. */
cl349@4113 281 for ( i = 0; i < purb->number_of_packets; i++ )
cl349@4113 282 {
cl349@4113 283 sched[i].length = purb->iso_frame_desc[i].actual_length;
cl349@4113 284 ASSERT(sched[i].buffer_offset ==
cl349@4113 285 purb->iso_frame_desc[i].offset);
cl349@4113 286 sched[i].status = purb->iso_frame_desc[i].status;
cl349@4113 287 }
cl349@4113 288 }
cl349@4113 289
cl349@4113 290 fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages);
cl349@4113 291
cl349@4113 292 kfree(purb->setup_packet);
cl349@4113 293
cl349@4113 294 make_response(pending_req->usbif_priv, pending_req->id,
cl349@4113 295 pending_req->operation, pending_req->status, 0, purb->actual_length);
cl349@4113 296 usbif_put(pending_req->usbif_priv);
cl349@4113 297
cl349@4113 298 usb_free_urb(purb);
cl349@4113 299
cl349@4113 300 free_pending(pending_idx);
cl349@4113 301
cl349@4113 302 rmb();
cl349@4113 303
cl349@4113 304 /* Check for anything still waiting in the rings, having freed a request... */
cl349@4113 305 maybe_trigger_usbio_schedule();
cl349@4113 306 }
cl349@4113 307
cl349@4113 308 /******************************************************************
cl349@4113 309 * SCHEDULER FUNCTIONS
cl349@4113 310 */
cl349@4113 311
cl349@4113 312 static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait);
cl349@4113 313
cl349@4113 314 static int usbio_schedule(void *arg)
cl349@4113 315 {
cl349@4113 316 DECLARE_WAITQUEUE(wq, current);
cl349@4113 317
cl349@4113 318 usbif_priv_t *up;
cl349@4113 319 struct list_head *ent;
cl349@4113 320
cl349@4113 321 daemonize();
cl349@4113 322
cl349@4113 323 for ( ; ; )
cl349@4113 324 {
cl349@4113 325 /* Wait for work to do. */
cl349@4113 326 add_wait_queue(&usbio_schedule_wait, &wq);
cl349@4113 327 set_current_state(TASK_INTERRUPTIBLE);
cl349@4113 328 if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
cl349@4113 329 list_empty(&usbio_schedule_list) )
cl349@4113 330 schedule();
cl349@4113 331 __set_current_state(TASK_RUNNING);
cl349@4113 332 remove_wait_queue(&usbio_schedule_wait, &wq);
cl349@4113 333
cl349@4113 334 /* Queue up a batch of requests. */
cl349@4113 335 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
cl349@4113 336 !list_empty(&usbio_schedule_list) )
cl349@4113 337 {
cl349@4113 338 ent = usbio_schedule_list.next;
cl349@4113 339 up = list_entry(ent, usbif_priv_t, usbif_list);
cl349@4113 340 usbif_get(up);
cl349@4113 341 remove_from_usbif_list(up);
cl349@4113 342 if ( do_usb_io_op(up, BATCH_PER_DOMAIN) )
cl349@4113 343 add_to_usbif_list_tail(up);
cl349@4113 344 usbif_put(up);
cl349@4113 345 }
cl349@4113 346 }
cl349@4113 347 }
cl349@4113 348
cl349@4113 349 static void maybe_trigger_usbio_schedule(void)
cl349@4113 350 {
cl349@4113 351 /*
cl349@4113 352 * Needed so that two processes, who together make the following predicate
cl349@4113 353 * true, don't both read stale values and evaluate the predicate
cl349@4113 354 * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
cl349@4113 355 */
cl349@4113 356 smp_mb();
cl349@4113 357
cl349@4113 358 if ( !list_empty(&usbio_schedule_list) )
cl349@4113 359 wake_up(&usbio_schedule_wait);
cl349@4113 360 }
cl349@4113 361
cl349@4113 362
cl349@4113 363 /******************************************************************************
cl349@4113 364 * NOTIFICATION FROM GUEST OS.
cl349@4113 365 */
cl349@4113 366
cl349@4113 367 irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs)
cl349@4113 368 {
cl349@4113 369 usbif_priv_t *up = dev_id;
cl349@4113 370
cl349@4113 371 smp_mb();
cl349@4113 372
cl349@4113 373 add_to_usbif_list_tail(up);
cl349@4113 374
cl349@4113 375 /* Will in fact /always/ trigger an io schedule in this case. */
cl349@4113 376 maybe_trigger_usbio_schedule();
cl349@4113 377
cl349@4113 378 return IRQ_HANDLED;
cl349@4113 379 }
cl349@4113 380
cl349@4113 381
cl349@4113 382
cl349@4113 383 /******************************************************************
cl349@4113 384 * DOWNWARD CALLS -- These interface with the usb-device layer proper.
cl349@4113 385 */
cl349@4113 386
cl349@4113 387 static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
cl349@4113 388 {
cl349@4113 389 usbif_back_ring_t *usb_ring = &up->usb_ring;
cl349@4113 390 usbif_request_t *req;
cl349@4113 391 RING_IDX i, rp;
cl349@4113 392 int more_to_do = 0;
cl349@4113 393
cl349@4113 394 rp = usb_ring->sring->req_prod;
cl349@4113 395 rmb(); /* Ensure we see queued requests up to 'rp'. */
cl349@4113 396
cl349@4113 397 /* Take items off the comms ring, taking care not to overflow. */
cl349@4113 398 for ( i = usb_ring->req_cons;
cl349@4113 399 (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i);
cl349@4113 400 i++ )
cl349@4113 401 {
cl349@4113 402 if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
cl349@4113 403 {
cl349@4113 404 more_to_do = 1;
cl349@4113 405 break;
cl349@4113 406 }
cl349@4113 407
cl349@4113 408 req = RING_GET_REQUEST(usb_ring, i);
cl349@4113 409
cl349@4113 410 switch ( req->operation )
cl349@4113 411 {
cl349@4113 412 case USBIF_OP_PROBE:
cl349@4113 413 dispatch_usb_probe(up, req->id, req->port);
cl349@4113 414 break;
cl349@4113 415
cl349@4113 416 case USBIF_OP_IO:
cl349@4113 417 /* Assemble an appropriate URB. */
cl349@4113 418 dispatch_usb_io(up, req);
cl349@4113 419 break;
cl349@4113 420
cl349@4113 421 case USBIF_OP_RESET:
cl349@4113 422 dispatch_usb_reset(up, req->port);
cl349@4113 423 break;
cl349@4113 424
cl349@4113 425 default:
cl349@4113 426 DPRINTK("error: unknown USB io operation [%d]\n",
cl349@4113 427 req->operation);
cl349@4113 428 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
cl349@4113 429 break;
cl349@4113 430 }
cl349@4113 431 }
cl349@4113 432
cl349@4113 433 usb_ring->req_cons = i;
cl349@4113 434
cl349@4113 435 return more_to_do;
cl349@4113 436 }
cl349@4113 437
cl349@4113 438 static owned_port_t *find_guest_port(usbif_priv_t *up, int port)
cl349@4113 439 {
cl349@4113 440 unsigned long flags;
cl349@4113 441 struct list_head *l;
cl349@4113 442
cl349@4113 443 spin_lock_irqsave(&owned_ports_lock, flags);
cl349@4113 444 list_for_each(l, &owned_ports)
cl349@4113 445 {
cl349@4113 446 owned_port_t *p = list_entry(l, owned_port_t, list);
cl349@4113 447 if(p->usbif_priv == up && p->guest_port == port)
cl349@4113 448 {
cl349@4113 449 spin_unlock_irqrestore(&owned_ports_lock, flags);
cl349@4113 450 return p;
cl349@4113 451 }
cl349@4113 452 }
cl349@4113 453 spin_unlock_irqrestore(&owned_ports_lock, flags);
cl349@4113 454
cl349@4113 455 return NULL;
cl349@4113 456 }
cl349@4113 457
cl349@4113 458 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid)
cl349@4113 459 {
cl349@4113 460 owned_port_t *port = find_guest_port(up, portid);
cl349@4113 461 int ret = 0;
cl349@4113 462
cl349@4113 463
cl349@4113 464 /* Allowing the guest to actually reset the device causes more problems
cl349@4113 465 * than it's worth. We just fake it out in software but we will do a real
cl349@4113 466 * reset when the interface is destroyed. */
cl349@4113 467
cl349@4113 468 dump_port(port);
cl349@4113 469
cl349@4113 470 port->guest_address = 0;
cl349@4113 471 /* If there's an attached device then the port is now enabled. */
cl349@4113 472 if ( port->dev_present )
cl349@4113 473 port->enabled = 1;
cl349@4113 474 else
cl349@4113 475 port->enabled = 0;
cl349@4113 476
cl349@4113 477 make_response(up, 0, USBIF_OP_RESET, ret, 0, 0);
cl349@4113 478 }
cl349@4113 479
cl349@4113 480 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long portid)
cl349@4113 481 {
cl349@4113 482 owned_port_t *port = find_guest_port(up, portid);
cl349@4113 483 int ret;
cl349@4113 484
cl349@4113 485 if ( port != NULL )
cl349@4113 486 ret = port->dev_present;
cl349@4113 487 else
cl349@4113 488 {
cl349@4113 489 ret = -EINVAL;
cl349@4113 490 printk(KERN_INFO "dispatch_usb_probe(): invalid port probe request "
cl349@4113 491 "(port %ld)\n", portid);
cl349@4113 492 }
cl349@4113 493
cl349@4113 494 /* Probe result is sent back in-band. Probes don't have an associated id
cl349@4113 495 * right now... */
cl349@4113 496 make_response(up, id, USBIF_OP_PROBE, ret, portid, 0);
cl349@4113 497 }
cl349@4113 498
cl349@4113 499 /**
cl349@4113 500 * check_iso_schedule - safety check the isochronous schedule for an URB
cl349@4113 501 * @purb : the URB in question
cl349@4113 502 */
cl349@4113 503 static int check_iso_schedule(struct urb *purb)
cl349@4113 504 {
cl349@4113 505 int i;
cl349@4113 506 unsigned long total_length = 0;
cl349@4113 507
cl349@4113 508 for ( i = 0; i < purb->number_of_packets; i++ )
cl349@4113 509 {
cl349@4113 510 struct usb_iso_packet_descriptor *desc = &purb->iso_frame_desc[i];
cl349@4113 511
cl349@4113 512 if ( desc->offset >= purb->transfer_buffer_length
cl349@4113 513 || ( desc->offset + desc->length) > purb->transfer_buffer_length )
cl349@4113 514 return -EINVAL;
cl349@4113 515
cl349@4113 516 total_length += desc->length;
cl349@4113 517
cl349@4113 518 if ( total_length > purb->transfer_buffer_length )
cl349@4113 519 return -EINVAL;
cl349@4113 520 }
cl349@4113 521
cl349@4113 522 return 0;
cl349@4113 523 }
cl349@4113 524
cl349@4113 525 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req);
cl349@4113 526
cl349@4113 527 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req)
cl349@4113 528 {
cl349@4113 529 unsigned long buffer_mach;
cl349@4113 530 int i = 0, offset = 0,
cl349@4113 531 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
cl349@4113 532 pending_req_t *pending_req;
cl349@4113 533 unsigned long remap_prot;
cl349@4113 534 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
cl349@4113 535 struct urb *purb = NULL;
cl349@4113 536 owned_port_t *port;
cl349@4113 537 unsigned char *setup;
cl349@4113 538
cl349@4113 539 dump_request(req);
cl349@4113 540
cl349@4113 541 if ( NR_PENDING_REQS == MAX_PENDING_REQS )
cl349@4113 542 {
cl349@4113 543 printk(KERN_WARNING "usbback: Max requests already queued. "
cl349@4113 544 "Giving up!\n");
cl349@4113 545
cl349@4113 546 return;
cl349@4113 547 }
cl349@4113 548
cl349@4113 549 port = find_port_for_request(up, req);
cl349@4113 550
cl349@4113 551 if ( port == NULL )
cl349@4113 552 {
cl349@4113 553 printk(KERN_WARNING "No such device! (%d)\n", req->devnum);
cl349@4113 554 dump_request(req);
cl349@4113 555
cl349@4113 556 make_response(up, req->id, req->operation, -ENODEV, 0, 0);
cl349@4113 557 return;
cl349@4113 558 }
cl349@4113 559 else if ( !port->dev_present )
cl349@4113 560 {
cl349@4113 561 /* In normal operation, we'll only get here if a device is unplugged
cl349@4113 562 * and the frontend hasn't noticed yet. */
cl349@4113 563 make_response(up, req->id, req->operation, -ENODEV, 0, 0);
cl349@4113 564 return;
cl349@4113 565 }
cl349@4113 566
cl349@4113 567
cl349@4113 568 setup = kmalloc(8, GFP_KERNEL);
cl349@4113 569
cl349@4113 570 if ( setup == NULL )
cl349@4113 571 goto no_mem;
cl349@4113 572
cl349@4113 573 /* Copy request out for safety. */
cl349@4113 574 memcpy(setup, req->setup, 8);
cl349@4113 575
cl349@4113 576 if( setup[0] == 0x0 && setup[1] == 0x5)
cl349@4113 577 {
cl349@4113 578 /* To virtualise the USB address space, we need to intercept
cl349@4113 579 * set_address messages and emulate. From the USB specification:
cl349@4113 580 * bmRequestType = 0x0;
cl349@4113 581 * Brequest = SET_ADDRESS (i.e. 0x5)
cl349@4113 582 * wValue = device address
cl349@4113 583 * wIndex = 0
cl349@4113 584 * wLength = 0
cl349@4113 585 * data = None
cl349@4113 586 */
cl349@4113 587 /* Store into the guest transfer buffer using cpu_to_le16 */
cl349@4113 588 port->guest_address = le16_to_cpu(*(u16 *)(setup + 2));
cl349@4113 589 /* Make a successful response. That was easy! */
cl349@4113 590
cl349@4113 591 make_response(up, req->id, req->operation, 0, 0, 0);
cl349@4113 592
cl349@4113 593 kfree(setup);
cl349@4113 594 return;
cl349@4113 595 }
cl349@4113 596 else if ( setup[0] == 0x0 && setup[1] == 0x9 )
cl349@4113 597 {
cl349@4113 598 /* The host kernel needs to know what device configuration is in use
cl349@4113 599 * because various error checks get confused otherwise. We just do
cl349@4113 600 * configuration settings here, under controlled conditions.
cl349@4113 601 */
cl349@4113 602
cl349@4113 603 /* Ignore configuration setting and hope that the host kernel
cl349@4113 604 did it right. */
cl349@4113 605 /* usb_set_configuration(port->dev, setup[2]); */
cl349@4113 606
cl349@4113 607 make_response(up, req->id, req->operation, 0, 0, 0);
cl349@4113 608
cl349@4113 609 kfree(setup);
cl349@4113 610 return;
cl349@4113 611 }
cl349@4113 612 else if ( setup[0] == 0x1 && setup[1] == 0xB )
cl349@4113 613 {
cl349@4113 614 /* The host kernel needs to know what device interface is in use
cl349@4113 615 * because various error checks get confused otherwise. We just do
cl349@4113 616 * configuration settings here, under controlled conditions.
cl349@4113 617 */
cl349@4113 618 usb_set_interface(port->dev, (setup[4] | setup[5] << 8),
cl349@4113 619 (setup[2] | setup[3] << 8) );
cl349@4113 620
cl349@4113 621 make_response(up, req->id, req->operation, 0, 0, 0);
cl349@4113 622
cl349@4113 623 kfree(setup);
cl349@4113 624 return;
cl349@4113 625 }
cl349@4113 626
cl349@4113 627 if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK)
cl349@4113 628 + req->length )
cl349@4113 629 > MMAP_PAGES_PER_REQUEST * PAGE_SIZE )
cl349@4113 630 {
cl349@4113 631 printk(KERN_WARNING "usbback: request of %lu bytes too large\n",
cl349@4113 632 req->length);
cl349@4113 633 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
cl349@4113 634 kfree(setup);
cl349@4113 635 return;
cl349@4113 636 }
cl349@4113 637
cl349@4113 638 buffer_mach = req->transfer_buffer;
cl349@4113 639
cl349@4113 640 if( buffer_mach == 0 )
cl349@4113 641 goto no_remap;
cl349@4113 642
cl349@4113 643 ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST);
cl349@4113 644 ASSERT(buffer_mach);
cl349@4113 645
cl349@4113 646 /* Always map writeable for now. */
kaf24@5574 647 remap_prot = _KERNPG_TABLE;
cl349@4113 648
cl349@4113 649 for ( i = 0, offset = 0; offset < req->length;
cl349@4113 650 i++, offset += PAGE_SIZE )
cl349@4113 651 {
kaf24@5671 652 MULTI_update_va_mapping_otherdomain(
kaf24@5671 653 mcl+i, MMAP_VADDR(pending_idx, i),
smh22@5818 654 pfn_pte_ma((buffer_mach + offset) >> PAGE_SHIFT, remap_prot),
kaf24@5671 655 0, up->domid);
cl349@4113 656
cl349@4113 657 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
cl349@4113 658 FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
cl349@4113 659
kaf24@6279 660 ASSERT(virt_to_mfn(MMAP_VADDR(pending_idx, i))
kaf24@6279 661 == ((buffer_mach >> PAGE_SHIFT) + i));
cl349@4113 662 }
cl349@4113 663
cl349@4113 664 if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
cl349@4113 665 {
cl349@4113 666 /* Map in ISO schedule, if necessary. */
kaf24@5671 667 MULTI_update_va_mapping_otherdomain(
kaf24@5671 668 mcl+i, MMAP_VADDR(pending_idx, i),
kaf24@5671 669 pfn_pte_ma(req->iso_schedule >> PAGE_SHIFT, remap_prot),
kaf24@5671 670 0, up->domid);
cl349@4113 671
cl349@4113 672 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
cl349@4113 673 FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
cl349@4113 674
cl349@4113 675 i++;
cl349@4113 676 }
cl349@4113 677
cl349@4113 678 if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) )
cl349@4113 679 BUG();
cl349@4113 680
cl349@4113 681 {
cl349@4113 682 int j;
cl349@4113 683 for ( j = 0; j < i; j++ )
cl349@4113 684 {
kaf24@5252 685 if ( unlikely(mcl[j].result != 0) )
cl349@4113 686 {
cl349@4113 687 printk(KERN_WARNING
cl349@4113 688 "invalid buffer %d -- could not remap it\n", j);
cl349@4113 689 fast_flush_area(pending_idx, i);
cl349@4113 690 goto bad_descriptor;
cl349@4113 691 }
cl349@4113 692 }
cl349@4113 693 }
cl349@4113 694
cl349@4113 695 no_remap:
cl349@4113 696
cl349@4113 697 ASSERT(i <= MMAP_PAGES_PER_REQUEST);
cl349@4113 698 ASSERT(i * PAGE_SIZE >= req->length);
cl349@4113 699
cl349@4113 700 /* We have to do this because some things might complete out of order. */
cl349@4113 701 pending_req = &pending_reqs[pending_idx];
cl349@4113 702 pending_req->usbif_priv= up;
cl349@4113 703 pending_req->id = req->id;
cl349@4113 704 pending_req->operation = req->operation;
cl349@4113 705 pending_req->nr_pages = i;
cl349@4113 706
cl349@4113 707 pending_cons++;
cl349@4113 708
cl349@4113 709 usbif_get(up);
cl349@4113 710
cl349@4113 711 /* Fill out an actual request for the USB layer. */
cl349@4113 712 purb = usb_alloc_urb(req->num_iso);
cl349@4113 713
cl349@4113 714 if ( purb == NULL )
cl349@4113 715 {
cl349@4113 716 usbif_put(up);
cl349@4113 717 free_pending(pending_idx);
cl349@4113 718 goto no_mem;
cl349@4113 719 }
cl349@4113 720
cl349@4113 721 purb->dev = port->dev;
cl349@4113 722 purb->context = pending_req;
cl349@4113 723 purb->transfer_buffer =
cl349@4113 724 (void *)(MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK));
cl349@4113 725 if(buffer_mach == 0)
cl349@4113 726 purb->transfer_buffer = NULL;
cl349@4113 727 purb->complete = __end_usb_io_op;
cl349@4113 728 purb->transfer_buffer_length = req->length;
cl349@4113 729 purb->transfer_flags = req->transfer_flags;
cl349@4113 730
cl349@4113 731 purb->pipe = 0;
cl349@4113 732 purb->pipe |= req->direction << 7;
cl349@4113 733 purb->pipe |= port->dev->devnum << 8;
cl349@4113 734 purb->pipe |= req->speed << 26;
cl349@4113 735 purb->pipe |= req->pipe_type << 30;
cl349@4113 736 purb->pipe |= req->endpoint << 15;
cl349@4113 737
cl349@4113 738 purb->number_of_packets = req->num_iso;
cl349@4113 739
cl349@4113 740 if ( purb->number_of_packets * sizeof(usbif_iso_t) > PAGE_SIZE )
cl349@4113 741 goto urb_error;
cl349@4113 742
cl349@4113 743 /* Make sure there's always some kind of timeout. */
cl349@4113 744 purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000
cl349@4113 745 : 1000;
cl349@4113 746
cl349@4113 747 purb->setup_packet = setup;
cl349@4113 748
cl349@4113 749 if ( req->pipe_type == 0 ) /* ISO */
cl349@4113 750 {
cl349@4113 751 int j;
cl349@4113 752 usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1);
cl349@4113 753
cl349@4113 754 /* If we're dealing with an iso pipe, we need to copy in a schedule. */
cl349@4113 755 for ( j = 0; j < purb->number_of_packets; j++ )
cl349@4113 756 {
cl349@4113 757 purb->iso_frame_desc[j].length = iso_sched[j].length;
cl349@4113 758 purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset;
cl349@4113 759 iso_sched[j].status = 0;
cl349@4113 760 }
cl349@4113 761 }
cl349@4113 762
cl349@4113 763 if ( check_iso_schedule(purb) != 0 )
cl349@4113 764 goto urb_error;
cl349@4113 765
cl349@4113 766 if ( usb_submit_urb(purb) != 0 )
cl349@4113 767 goto urb_error;
cl349@4113 768
cl349@4113 769 return;
cl349@4113 770
cl349@4113 771 urb_error:
cl349@4113 772 dump_urb(purb);
cl349@4113 773 usbif_put(up);
cl349@4113 774 free_pending(pending_idx);
cl349@4113 775
cl349@4113 776 bad_descriptor:
cl349@4113 777 kfree ( setup );
cl349@4113 778 if ( purb != NULL )
cl349@4113 779 usb_free_urb(purb);
cl349@4113 780 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
cl349@4113 781 return;
cl349@4113 782
cl349@4113 783 no_mem:
cl349@4113 784 if ( setup != NULL )
cl349@4113 785 kfree(setup);
cl349@4113 786 make_response(up, req->id, req->operation, -ENOMEM, 0, 0);
cl349@4113 787 return;
cl349@4113 788 }
cl349@4113 789
cl349@4113 790
cl349@4113 791
cl349@4113 792 /******************************************************************
cl349@4113 793 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
cl349@4113 794 */
cl349@4113 795
cl349@4113 796
cl349@4113 797 static void make_response(usbif_priv_t *up, unsigned long id,
cl349@4113 798 unsigned short op, int st, int inband,
cl349@4113 799 unsigned long length)
cl349@4113 800 {
cl349@4113 801 usbif_response_t *resp;
cl349@4113 802 unsigned long flags;
cl349@4113 803 usbif_back_ring_t *usb_ring = &up->usb_ring;
cl349@4113 804
cl349@4113 805 /* Place on the response ring for the relevant domain. */
cl349@4113 806 spin_lock_irqsave(&up->usb_ring_lock, flags);
cl349@4113 807 resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt);
cl349@4113 808 resp->id = id;
cl349@4113 809 resp->operation = op;
cl349@4113 810 resp->status = st;
cl349@4113 811 resp->data = inband;
cl349@4113 812 resp->length = length;
cl349@4113 813 wmb(); /* Ensure other side can see the response fields. */
cl349@4113 814
cl349@4113 815 dump_response(resp);
cl349@4113 816
cl349@4113 817 usb_ring->rsp_prod_pvt++;
cl349@4113 818 RING_PUSH_RESPONSES(usb_ring);
cl349@4113 819 spin_unlock_irqrestore(&up->usb_ring_lock, flags);
cl349@4113 820
cl349@4113 821 /* Kick the relevant domain. */
cl349@4113 822 notify_via_evtchn(up->evtchn);
cl349@4113 823 }
cl349@4113 824
cl349@4113 825 /**
cl349@4113 826 * usbif_claim_port - claim devices on a port on behalf of guest
cl349@4113 827 *
cl349@4113 828 * Once completed, this will ensure that any device attached to that
cl349@4113 829 * port is claimed by this driver for use by the guest.
cl349@4113 830 */
cl349@4113 831 int usbif_claim_port(usbif_be_claim_port_t *msg)
cl349@4113 832 {
cl349@4113 833 owned_port_t *o_p;
cl349@4113 834
cl349@4113 835 /* Sanity... */
cl349@4113 836 if ( usbif_find_port(msg->path) != NULL )
cl349@4113 837 {
cl349@4113 838 printk(KERN_WARNING "usbback: Attempted to claim USB port "
cl349@4113 839 "we already own!\n");
cl349@4113 840 return -EINVAL;
cl349@4113 841 }
cl349@4113 842
cl349@4113 843 /* No need for a slab cache - this should be infrequent. */
cl349@4113 844 o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL);
cl349@4113 845
cl349@4113 846 if ( o_p == NULL )
cl349@4113 847 return -ENOMEM;
cl349@4113 848
cl349@4113 849 o_p->enabled = 0;
cl349@4113 850 o_p->usbif_priv = usbif_find(msg->domid);
cl349@4113 851 o_p->guest_port = msg->usbif_port;
cl349@4113 852 o_p->dev_present = 0;
cl349@4113 853 o_p->guest_address = 0; /* Default address. */
cl349@4113 854
cl349@4113 855 strcpy(o_p->path, msg->path);
cl349@4113 856
cl349@4113 857 spin_lock_irq(&owned_ports_lock);
cl349@4113 858
cl349@4113 859 list_add(&o_p->list, &owned_ports);
cl349@4113 860
cl349@4113 861 spin_unlock_irq(&owned_ports_lock);
cl349@4113 862
cl349@4113 863 printk(KERN_INFO "usbback: Claimed USB port (%s) for %d.%d\n", o_p->path,
cl349@4113 864 msg->domid, msg->usbif_port);
cl349@4113 865
cl349@4113 866 /* Force a reprobe for unclaimed devices. */
cl349@4113 867 usb_scan_devices();
cl349@4113 868
cl349@4113 869 return 0;
cl349@4113 870 }
cl349@4113 871
cl349@4113 872 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req)
cl349@4113 873 {
cl349@4113 874 unsigned long flags;
cl349@4113 875 struct list_head *port;
cl349@4113 876
cl349@4113 877 /* I'm assuming this is not called from IRQ context - correct? I think
cl349@4113 878 * it's probably only called in response to control messages or plug events
cl349@4113 879 * in the USB hub kernel thread, so should be OK. */
cl349@4113 880 spin_lock_irqsave(&owned_ports_lock, flags);
cl349@4113 881 list_for_each(port, &owned_ports)
cl349@4113 882 {
cl349@4113 883 owned_port_t *p = list_entry(port, owned_port_t, list);
cl349@4113 884 if(p->usbif_priv == up && p->guest_address == req->devnum && p->enabled )
cl349@4113 885 {
cl349@4113 886 dump_port(p);
cl349@4113 887
cl349@4113 888 spin_unlock_irqrestore(&owned_ports_lock, flags);
cl349@4113 889 return p;
cl349@4113 890 }
cl349@4113 891 }
cl349@4113 892 spin_unlock_irqrestore(&owned_ports_lock, flags);
cl349@4113 893
cl349@4113 894 return NULL;
cl349@4113 895 }
cl349@4113 896
cl349@4113 897 owned_port_t *__usbif_find_port(char *path)
cl349@4113 898 {
cl349@4113 899 struct list_head *port;
cl349@4113 900
cl349@4113 901 list_for_each(port, &owned_ports)
cl349@4113 902 {
cl349@4113 903 owned_port_t *p = list_entry(port, owned_port_t, list);
cl349@4113 904 if(!strcmp(path, p->path))
cl349@4113 905 {
cl349@4113 906 return p;
cl349@4113 907 }
cl349@4113 908 }
cl349@4113 909
cl349@4113 910 return NULL;
cl349@4113 911 }
cl349@4113 912
cl349@4113 913 owned_port_t *usbif_find_port(char *path)
cl349@4113 914 {
cl349@4113 915 owned_port_t *ret;
cl349@4113 916 unsigned long flags;
cl349@4113 917
cl349@4113 918 spin_lock_irqsave(&owned_ports_lock, flags);
cl349@4113 919 ret = __usbif_find_port(path);
cl349@4113 920 spin_unlock_irqrestore(&owned_ports_lock, flags);
cl349@4113 921
cl349@4113 922 return ret;
cl349@4113 923 }
cl349@4113 924
cl349@4113 925
cl349@4113 926 static void *probe(struct usb_device *dev, unsigned iface,
cl349@4113 927 const struct usb_device_id *id)
cl349@4113 928 {
cl349@4113 929 owned_port_t *p;
cl349@4113 930
cl349@4113 931 /* We don't care what the device is - if we own the port, we want it. We
cl349@4113 932 * don't deal with device-specifics in this driver, so we don't care what
cl349@4113 933 * the device actually is ;-) */
cl349@4113 934 if ( ( p = usbif_find_port(dev->devpath) ) != NULL )
cl349@4113 935 {
cl349@4113 936 printk(KERN_INFO "usbback: claimed device attached to owned port\n");
cl349@4113 937
cl349@4113 938 p->dev_present = 1;
cl349@4113 939 p->dev = dev;
cl349@4113 940 set_bit(iface, &p->ifaces);
cl349@4113 941
cl349@4113 942 return p->usbif_priv;
cl349@4113 943 }
cl349@4113 944 else
cl349@4113 945 printk(KERN_INFO "usbback: hotplug for non-owned port (%s), ignoring\n",
cl349@4113 946 dev->devpath);
cl349@4113 947
cl349@4113 948
cl349@4113 949 return NULL;
cl349@4113 950 }
cl349@4113 951
cl349@4113 952 static void disconnect(struct usb_device *dev, void *usbif)
cl349@4113 953 {
cl349@4113 954 /* Note the device is removed so we can tell the guest when it probes. */
cl349@4113 955 owned_port_t *port = usbif_find_port(dev->devpath);
cl349@4113 956 port->dev_present = 0;
cl349@4113 957 port->dev = NULL;
cl349@4113 958 port->ifaces = 0;
cl349@4113 959 }
cl349@4113 960
cl349@4113 961
cl349@4113 962 struct usb_driver driver =
cl349@4113 963 {
cl349@4113 964 .owner = THIS_MODULE,
cl349@4113 965 .name = "Xen USB Backend",
cl349@4113 966 .probe = probe,
cl349@4113 967 .disconnect = disconnect,
cl349@4113 968 .id_table = NULL,
cl349@4113 969 };
cl349@4113 970
cl349@4113 971 /* __usbif_release_port - internal mechanics for releasing a port */
cl349@4113 972 void __usbif_release_port(owned_port_t *p)
cl349@4113 973 {
cl349@4113 974 int i;
cl349@4113 975
cl349@4113 976 for ( i = 0; p->ifaces != 0; i++)
cl349@4113 977 if ( p->ifaces & 1 << i )
cl349@4113 978 {
cl349@4113 979 usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i));
cl349@4113 980 clear_bit(i, &p->ifaces);
cl349@4113 981 }
cl349@4113 982 list_del(&p->list);
cl349@4113 983
cl349@4113 984 /* Reset the real device. We don't simulate disconnect / probe for other
cl349@4113 985 * drivers in this kernel because we assume the device is completely under
cl349@4113 986 * the control of ourselves (i.e. the guest!). This should ensure that the
cl349@4113 987 * device is in a sane state for the next customer ;-) */
cl349@4113 988
cl349@4113 989 /* MAW NB: we're not resetting the real device here. This looks perfectly
cl349@4113 990 * valid to me but it causes memory corruption. We seem to get away with not
cl349@4113 991 * resetting for now, although it'd be nice to have this tracked down. */
cl349@4113 992 /* if ( p->dev != NULL) */
cl349@4113 993 /* usb_reset_device(p->dev); */
cl349@4113 994
cl349@4113 995 kfree(p);
cl349@4113 996 }
cl349@4113 997
cl349@4113 998
cl349@4113 999 /**
cl349@4113 1000 * usbif_release_port - stop claiming devices on a port on behalf of guest
cl349@4113 1001 */
cl349@4113 1002 void usbif_release_port(usbif_be_release_port_t *msg)
cl349@4113 1003 {
cl349@4113 1004 owned_port_t *p;
cl349@4113 1005
cl349@4113 1006 spin_lock_irq(&owned_ports_lock);
cl349@4113 1007 p = __usbif_find_port(msg->path);
cl349@4113 1008 __usbif_release_port(p);
cl349@4113 1009 spin_unlock_irq(&owned_ports_lock);
cl349@4113 1010 }
cl349@4113 1011
cl349@4113 1012 void usbif_release_ports(usbif_priv_t *up)
cl349@4113 1013 {
cl349@4113 1014 struct list_head *port, *tmp;
cl349@4113 1015 unsigned long flags;
cl349@4113 1016
cl349@4113 1017 spin_lock_irqsave(&owned_ports_lock, flags);
cl349@4113 1018 list_for_each_safe(port, tmp, &owned_ports)
cl349@4113 1019 {
cl349@4113 1020 owned_port_t *p = list_entry(port, owned_port_t, list);
cl349@4113 1021 if ( p->usbif_priv == up )
cl349@4113 1022 __usbif_release_port(p);
cl349@4113 1023 }
cl349@4113 1024 spin_unlock_irqrestore(&owned_ports_lock, flags);
cl349@4113 1025 }
cl349@4113 1026
cl349@4113 1027 static int __init usbif_init(void)
cl349@4113 1028 {
cl349@4113 1029 int i;
kaf24@6316 1030 struct page *page;
cl349@4113 1031
cl349@6618 1032 if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
cl349@6618 1033 !(xen_start_info->flags & SIF_USB_BE_DOMAIN) )
cl349@4113 1034 return 0;
kaf24@6316 1035
kaf24@6316 1036 page = balloon_alloc_empty_page_range(MMAP_PAGES);
kaf24@6316 1037 BUG_ON(page == NULL);
kaf24@6316 1038 mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
cl349@4113 1039
cl349@4113 1040 pending_cons = 0;
cl349@4113 1041 pending_prod = MAX_PENDING_REQS;
cl349@4113 1042 memset(pending_reqs, 0, sizeof(pending_reqs));
cl349@4113 1043 for ( i = 0; i < MAX_PENDING_REQS; i++ )
cl349@4113 1044 pending_ring[i] = i;
cl349@4113 1045
cl349@4113 1046 spin_lock_init(&pend_prod_lock);
cl349@4113 1047
cl349@4113 1048 spin_lock_init(&owned_ports_lock);
cl349@4113 1049 INIT_LIST_HEAD(&owned_ports);
cl349@4113 1050
cl349@4113 1051 spin_lock_init(&usbio_schedule_list_lock);
cl349@4113 1052 INIT_LIST_HEAD(&usbio_schedule_list);
cl349@4113 1053
cl349@4113 1054 if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
cl349@4113 1055 BUG();
cl349@4113 1056
cl349@4113 1057 usbif_interface_init();
cl349@4113 1058
cl349@4113 1059 usbif_ctrlif_init();
cl349@4113 1060
cl349@4113 1061 usb_register(&driver);
cl349@4113 1062
cl349@4113 1063 printk(KERN_INFO "Xen USB Backend Initialised");
cl349@4113 1064
cl349@4113 1065 return 0;
cl349@4113 1066 }
cl349@4113 1067
cl349@4113 1068 __initcall(usbif_init);