debuggers.hg

view linux-2.6.10-xen-sparse/drivers/xen/usbback/usbback.c @ 3660:47a82d6e179e

bitkeeper revision 1.1159.239.1 (42011d3b3kafRU5DPu227sLReeeG3g)

Ignore SET_CONFIGURATION messages for now. It Works For Me (TM). Please
report any problems / weirdness you see as a result.
author mwilli2@equilibrium.research
date Wed Feb 02 18:34:35 2005 +0000 (2005-02-02)
parents d295396360fb
children d1bedbc0f20a
line source
1 /******************************************************************************
2 * arch/xen/drivers/usbif/backend/main.c
3 *
4 * Backend for the Xen virtual USB driver - provides an abstraction of a
5 * USB host controller to the corresponding frontend driver.
6 *
7 * by Mark Williamson, Copyright (c) 2004 Intel Research Cambridge
8 *
9 * Based on arch/xen/drivers/blkif/backend/main.c
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 */
13 #include "common.h"
16 #include <linux/list.h>
17 #include <linux/usb.h>
18 #include <linux/spinlock.h>
19 #include <linux/module.h>
20 #include <linux/tqueue.h>
22 /*
23 * This is rather arbitrary.
24 */
25 #define MAX_PENDING_REQS 4
26 #define BATCH_PER_DOMAIN 1
28 static unsigned long mmap_vstart;
30 /* Needs to be sufficiently large that we can map the (large) buffers
31 * the USB mass storage driver wants. */
32 #define MMAP_PAGES_PER_REQUEST \
33 (128)
34 #define MMAP_PAGES \
35 (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
37 #define MMAP_VADDR(_req,_seg) \
38 (mmap_vstart + \
39 ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
40 ((_seg) * PAGE_SIZE))
42 #define MIN(x,y) ( ( x < y ) ? x : y )
44 static spinlock_t owned_ports_lock;
45 LIST_HEAD(owned_ports);
47 /* A list of these structures is used to track ownership of physical USB
48 * ports. */
49 typedef struct
50 {
51 usbif_priv_t *usbif_priv;
52 char path[16];
53 int guest_port;
54 int enabled;
55 struct list_head list;
56 unsigned long guest_address; /* The USB device address that has been
57 * assigned by the guest. */
58 int dev_present; /* Is there a device present? */
59 struct usb_device * dev;
60 unsigned long ifaces; /* What interfaces are present on this device? */
61 } owned_port_t;
64 /*
65 * Each outstanding request that we've passed to the lower device layers has a
66 * 'pending_req' allocated to it. The request is complete, the specified
67 * domain has a response queued for it, with the saved 'id' passed back.
68 */
69 typedef struct {
70 usbif_priv_t *usbif_priv;
71 usbif_iso_t *iso_sched;
72 unsigned long id;
73 int nr_pages;
74 unsigned short operation;
75 int status;
76 } pending_req_t;
78 /*
79 * We can't allocate pending_req's in order, since they may complete out of
80 * order. We therefore maintain an allocation ring. This ring also indicates
81 * when enough work has been passed down -- at that point the allocation ring
82 * will be empty.
83 */
84 static pending_req_t pending_reqs[MAX_PENDING_REQS];
85 static unsigned char pending_ring[MAX_PENDING_REQS];
86 static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
88 /* NB. We use a different index type to differentiate from shared blk rings. */
89 typedef unsigned int PEND_RING_IDX;
90 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
91 static PEND_RING_IDX pending_prod, pending_cons;
92 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
94 static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do);
95 static void make_response(usbif_priv_t *usbif, unsigned long id,
96 unsigned short op, int st, int inband,
97 unsigned long actual_length);
98 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long port);
99 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req);
100 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid);
101 static owned_port_t *usbif_find_port(char *);
104 void dump_port(owned_port_t *p)
105 {
106 printk("owned_port_t @ %p\n", p);
107 printk(" usbif_priv @ %p\n", p->usbif_priv);
108 printk(" path: %s\n", p->path);
109 printk(" guest_port: %d\n", p->guest_port);
110 printk(" guest_address: %ld\n", p->guest_address);
111 printk(" dev_present: %d\n", p->dev_present);
112 printk(" dev @ %p\n", p->dev);
113 printk(" ifaces: 0x%lx\n", p->ifaces);
114 }
118 static void fast_flush_area(int idx, int nr_pages)
119 {
120 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
121 int i;
123 for ( i = 0; i < nr_pages; i++ )
124 {
125 mcl[i].op = __HYPERVISOR_update_va_mapping;
126 mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
127 mcl[i].args[1] = 0;
128 mcl[i].args[2] = 0;
129 }
131 mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
132 if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
133 BUG();
134 }
137 /******************************************************************
138 * USB INTERFACE SCHEDULER LIST MAINTENANCE
139 */
141 static struct list_head usbio_schedule_list;
142 static spinlock_t usbio_schedule_list_lock;
144 static int __on_usbif_list(usbif_priv_t *up)
145 {
146 return up->usbif_list.next != NULL;
147 }
149 void remove_from_usbif_list(usbif_priv_t *up)
150 {
151 unsigned long flags;
152 if ( !__on_usbif_list(up) ) return;
153 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
154 if ( __on_usbif_list(up) )
155 {
156 list_del(&up->usbif_list);
157 up->usbif_list.next = NULL;
158 usbif_put(up);
159 }
160 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
161 }
163 static void add_to_usbif_list_tail(usbif_priv_t *up)
164 {
165 unsigned long flags;
166 if ( __on_usbif_list(up) ) return;
167 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
168 if ( !__on_usbif_list(up) && (up->status == CONNECTED) )
169 {
170 list_add_tail(&up->usbif_list, &usbio_schedule_list);
171 usbif_get(up);
172 }
173 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
174 }
177 /******************************************************************
178 * COMPLETION CALLBACK -- Called as urb->complete()
179 */
181 static void maybe_trigger_usbio_schedule(void);
183 static void __end_usb_io_op(struct urb *purb)
184 {
185 unsigned long flags;
186 pending_req_t *pending_req;
187 int pending_idx;
189 pending_req = purb->context;
191 /* printk("Completed for id = %p to 0x%lx - 0x%lx\n", pending_req->id, */
192 /* virt_to_machine(purb->transfer_buffer), */
193 /* virt_to_machine(purb->transfer_buffer) */
194 /* + pending_req->nr_pages * PAGE_SIZE); */
196 pending_idx = pending_req - pending_reqs;
198 ASSERT(purb->actual_length <= purb->transfer_buffer_length);
199 ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE);
201 /* An error fails the entire request. */
202 if ( purb->status )
203 {
204 printk("URB @ %p failed. Status %d\n", purb, purb->status);
205 }
207 if ( usb_pipetype(purb->pipe) == 0 )
208 {
209 int i;
210 usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, pending_req->nr_pages - 1);
212 ASSERT(sched == pending_req->sched);
214 // printk("writing back schedule at %p\n", sched);
216 /* If we're dealing with an iso pipe, we need to copy back the schedule. */
217 for ( i = 0; i < purb->number_of_packets; i++ )
218 {
219 sched[i].length = purb->iso_frame_desc[i].actual_length;
220 ASSERT(sched[i].buffer_offset ==
221 purb->iso_frame_desc[i].offset);
222 sched[i].status = purb->iso_frame_desc[i].status;
223 }
224 }
226 // printk("Flushing %d pages\n", pending_req->nr_pages);
227 fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages);
229 kfree(purb->setup_packet);
231 spin_lock_irqsave(&pending_req->usbif_priv->usb_ring_lock, flags);
232 make_response(pending_req->usbif_priv, pending_req->id,
233 pending_req->operation, pending_req->status, 0, purb->actual_length);
234 spin_unlock_irqrestore(&pending_req->usbif_priv->usb_ring_lock, flags);
235 usbif_put(pending_req->usbif_priv);
237 usb_free_urb(purb);
239 /* Free the pending request. */
240 spin_lock_irqsave(&pend_prod_lock, flags);
241 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
242 spin_unlock_irqrestore(&pend_prod_lock, flags);
244 rmb();
246 /* Check for anything still waiting in the rings, having freed a request... */
247 maybe_trigger_usbio_schedule();
248 }
250 /******************************************************************
251 * SCHEDULER FUNCTIONS
252 */
254 static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait);
256 static int usbio_schedule(void *arg)
257 {
258 DECLARE_WAITQUEUE(wq, current);
260 usbif_priv_t *up;
261 struct list_head *ent;
263 daemonize();
265 for ( ; ; )
266 {
267 /* Wait for work to do. */
268 add_wait_queue(&usbio_schedule_wait, &wq);
269 set_current_state(TASK_INTERRUPTIBLE);
270 if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
271 list_empty(&usbio_schedule_list) )
272 schedule();
273 __set_current_state(TASK_RUNNING);
274 remove_wait_queue(&usbio_schedule_wait, &wq);
276 /* Queue up a batch of requests. */
277 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
278 !list_empty(&usbio_schedule_list) )
279 {
280 ent = usbio_schedule_list.next;
281 up = list_entry(ent, usbif_priv_t, usbif_list);
282 usbif_get(up);
283 remove_from_usbif_list(up);
284 if ( do_usb_io_op(up, BATCH_PER_DOMAIN) )
285 add_to_usbif_list_tail(up);
286 usbif_put(up);
287 }
288 }
289 }
291 static void maybe_trigger_usbio_schedule(void)
292 {
293 /*
294 * Needed so that two processes, who together make the following predicate
295 * true, don't both read stale values and evaluate the predicate
296 * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
297 */
298 smp_mb();
300 if ( !list_empty(&usbio_schedule_list) )
301 wake_up(&usbio_schedule_wait);
302 }
305 /******************************************************************************
306 * NOTIFICATION FROM GUEST OS.
307 */
309 irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs)
310 {
311 usbif_priv_t *up = dev_id;
313 smp_mb();
315 add_to_usbif_list_tail(up);
317 /* Will in fact /always/ trigger an io schedule in this case. */
318 maybe_trigger_usbio_schedule();
320 return IRQ_HANDLED;
321 }
325 /******************************************************************
326 * DOWNWARD CALLS -- These interface with the usb-device layer proper.
327 */
329 static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
330 {
331 usbif_t *usb_ring = up->usb_ring_base;
332 usbif_request_t *req;
333 USBIF_RING_IDX i, rp;
334 int more_to_do = 0;
335 unsigned long flags;
337 spin_lock_irqsave(&up->usb_ring_lock, flags);
339 rp = usb_ring->req_prod;
340 rmb(); /* Ensure we see queued requests up to 'rp'. */
342 /* Take items off the comms ring, taking care not to overflow. */
343 for ( i = up->usb_req_cons;
344 (i != rp) && ((i-up->usb_resp_prod) != USBIF_RING_SIZE);
345 i++ )
346 {
347 if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
348 {
349 more_to_do = 1;
350 break;
351 }
353 req = &usb_ring->ring[MASK_USBIF_IDX(i)].req;
355 switch ( req->operation )
356 {
357 case USBIF_OP_PROBE:
358 dispatch_usb_probe(up, req->id, req->port);
359 break;
361 case USBIF_OP_IO:
362 /* Assemble an appropriate URB. */
363 dispatch_usb_io(up, req);
364 break;
366 case USBIF_OP_RESET:
367 dispatch_usb_reset(up, req->port);
368 break;
370 default:
371 DPRINTK("error: unknown USB io operation [%d]\n",
372 req->operation);
373 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
374 break;
375 }
376 }
378 up->usb_req_cons = i;
380 spin_unlock_irqrestore(&up->usb_ring_lock, flags);
382 return more_to_do;
383 }
385 static owned_port_t *find_guest_port(usbif_priv_t *up, int port)
386 {
387 unsigned long flags;
388 struct list_head *l;
390 spin_lock_irqsave(&owned_ports_lock, flags);
391 list_for_each(l, &owned_ports)
392 {
393 owned_port_t *p = list_entry(l, owned_port_t, list);
394 if(p->usbif_priv == up && p->guest_port == port)
395 {
396 spin_unlock_irqrestore(&owned_ports_lock, flags);
397 return p;
398 }
399 }
400 spin_unlock_irqrestore(&owned_ports_lock, flags);
402 return NULL;
403 }
405 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid)
406 {
407 owned_port_t *port = find_guest_port(up, portid);
408 int ret = 0;
411 /* Allowing the guest to actually reset the device causes more problems
412 * than it's worth. We just fake it out in software but we will do a real
413 * reset when the interface is destroyed. */
415 #if 0
416 printk("Reset port %d\n", portid);
418 dump_port(port);
419 #endif
421 port->guest_address = 0;
422 /* If there's an attached device then the port is now enabled. */
423 if ( port->dev_present )
424 port->enabled = 1;
425 else
426 port->enabled = 0;
428 make_response(up, 0, USBIF_OP_RESET, ret, 0, 0);
429 }
431 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long portid)
432 {
433 owned_port_t *port = find_guest_port(up, portid);
434 int ret;
436 if ( port != NULL )
437 ret = port->dev_present;
438 else
439 {
440 ret = -EINVAL;
441 printk("dispatch_usb_probe(): invalid port probe request (port %ld)\n",
442 portid);
443 }
445 /* Probe result is sent back in-band. Probes don't have an associated id
446 * right now... */
447 make_response(up, id, USBIF_OP_PROBE, ret, portid, 0);
448 }
450 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req);
452 static void dump_request(usbif_request_t *req)
453 {
454 printk("id = 0x%lx\n", req->id);
456 printk("devnum %d\n", req->devnum);
457 printk("endpoint 0x%x\n", req->endpoint);
458 printk("direction %d\n", req->direction);
459 printk("speed %d\n", req->speed);
460 printk("pipe_type 0x%x\n", req->pipe_type);
461 printk("transfer_buffer 0x%lx\n", req->transfer_buffer);
462 printk("length 0x%lx\n", req->length);
463 printk("transfer_flags 0x%lx\n", req->transfer_flags);
464 printk("setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
465 req->setup[0], req->setup[1], req->setup[2], req->setup[3],
466 req->setup[4], req->setup[5], req->setup[6], req->setup[7]);
467 printk("iso_schedule = 0x%lx\n", req->iso_schedule);
468 printk("num_iso %ld\n", req->num_iso);
469 }
471 void dump_urb(struct urb *urb)
472 {
473 printk("dumping urb @ %p\n", urb);
475 #define DUMP_URB_FIELD(name, format) printk(" " # name " " format "\n", urb-> name)
477 DUMP_URB_FIELD(pipe, "0x%x");
478 DUMP_URB_FIELD(status, "%d");
479 DUMP_URB_FIELD(transfer_flags, "0x%x");
480 DUMP_URB_FIELD(transfer_buffer, "%p");
481 DUMP_URB_FIELD(transfer_buffer_length, "%d");
482 DUMP_URB_FIELD(actual_length, "%d");
483 }
486 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req)
487 {
488 unsigned long buffer_mach;
489 int i = 0, offset = 0,
490 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
491 pending_req_t *pending_req;
492 unsigned long remap_prot;
493 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
494 struct urb *purb = NULL;
495 owned_port_t *port;
496 unsigned char *setup;
498 // dump_request(req);
500 if ( NR_PENDING_REQS == MAX_PENDING_REQS )
501 {
502 printk("usbback: Max requests already queued. Now giving up!\n");
504 return;
505 }
507 port = find_port_for_request(up, req);
509 if(port == NULL)
510 {
511 printk("No such device! (%d)\n", req->devnum);
512 dump_request(req);
514 make_response(up, req->id, req->operation, -ENODEV, 0, 0);
515 return;
516 }
518 setup = kmalloc(8, GFP_ATOMIC | GFP_NOIO);
520 if ( setup == NULL )
521 goto no_mem;
523 /* Copy request out for safety. */
524 memcpy(setup, req->setup, 8);
526 if( setup[0] == 0x0 && setup[1] == 0x5)
527 {
528 /* To virtualise the USB address space, we need to intercept
529 * set_address messages and emulate. From the USB specification:
530 * bmRequestType = 0x0;
531 * Brequest = SET_ADDRESS (i.e. 0x5)
532 * wValue = device address
533 * wIndex = 0
534 * wLength = 0
535 * data = None
536 */
537 /* Store into the guest transfer buffer using cpu_to_le16 */
538 port->guest_address = le16_to_cpu(*(u16 *)(setup + 2));
539 /* Make a successful response. That was easy! */
541 make_response(up, req->id, req->operation, 0, 0, 0);
543 kfree(setup);
544 return;
545 }
546 else if ( setup[0] == 0x0 && setup[1] == 0x9 )
547 {
548 /* The host kernel needs to know what device configuration is in use
549 * because various error checks get confused otherwise. We just do
550 * configuration settings here, under controlled conditions.
551 */
553 /* Ignore configuration setting and hope that the host kernel
554 did it right. */
555 // usb_set_configuration(port->dev, setup[2]);
557 make_response(up, req->id, req->operation, 0, 0, 0);
559 kfree(setup);
560 return;
561 }
563 else if ( setup[0] == 0x1 && setup[1] == 0xB )
564 {
565 /* The host kernel needs to know what device interface is in use
566 * because various error checks get confused otherwise. We just do
567 * configuration settings here, under controlled conditions.
568 */
569 usb_set_interface(port->dev, (setup[4] | setup[5] << 8),
570 (setup[2] | setup[3] << 8) );
572 make_response(up, req->id, req->operation, 0, 0, 0);
574 kfree(setup);
575 return;
576 }
578 if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK)
579 + req->length )
580 > MMAP_PAGES_PER_REQUEST * PAGE_SIZE )
581 {
582 printk("usbback: request of %d bytes too large, failing it\n", req->length);
583 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
584 kfree(setup);
585 return;
586 }
588 buffer_mach = req->transfer_buffer;
590 if( buffer_mach == 0 )
591 goto no_remap;
593 ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST);
594 ASSERT(buffer_mach);
596 /* Always map writeable for now. */
597 remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
599 for ( i = 0, offset = 0; offset < req->length;
600 i++, offset += PAGE_SIZE )
601 {
602 // printk("length = %d, offset = %d, looping!\n", req->length, offset);
604 mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
605 mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
606 mcl[i].args[1] = ((buffer_mach & PAGE_MASK) + offset) | remap_prot;
607 mcl[i].args[2] = 0;
608 mcl[i].args[3] = up->domid;
610 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
611 FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
612 // printk("i = %d\n", i);
614 ASSERT(virt_to_machine(MMAP_VADDR(pending_idx, i))
615 == buffer_mach + i << PAGE_SHIFT);
616 }
618 if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
619 {
620 // printk("for iso, i = %d\n", i);
621 /* Map in ISO schedule, if necessary. */
622 mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
623 mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
624 mcl[i].args[1] = (req->iso_schedule & PAGE_MASK) | remap_prot;
625 mcl[i].args[2] = 0;
626 mcl[i].args[3] = up->domid;
628 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
629 FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
631 // printk("Mapped iso at %p\n", MMAP_VADDR(pending_idx, i));
632 i++;
633 }
635 // printk("Well we got this far!\n");
637 if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) )
638 BUG();
640 {
641 int j;
642 for ( j = 0; j < i; j++ )
643 {
644 if ( unlikely(mcl[j].args[5] != 0) )
645 {
646 printk("invalid buffer %d -- could not remap it\n", j);
647 fast_flush_area(pending_idx, i);
648 printk("sending invalid descriptor\n");
649 goto bad_descriptor;
650 }
651 }
652 }
654 no_remap:
656 ASSERT(i <= MMAP_PAGES_PER_REQUEST);
657 ASSERT(i * PAGE_SIZE >= req->length);
659 /* We have to do this because some things might complete out of order. */
660 pending_req = &pending_reqs[pending_idx];
661 pending_req->usbif_priv= up;
662 pending_req->id = req->id;
663 pending_req->operation = req->operation;
664 pending_req->nr_pages = i;
668 pending_cons++;
670 usbif_get(up);
672 /* Fill out an actual request for the USB layer. */
673 purb = usb_alloc_urb(req->num_iso);
675 if ( purb == NULL )
676 goto no_mem;
678 purb->dev = port->dev;
679 purb->context = pending_req;
680 purb->transfer_buffer = (void *)MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK);
681 if(buffer_mach == 0)
682 purb->transfer_buffer = NULL;
683 purb->complete = __end_usb_io_op;
684 purb->transfer_buffer_length = req->length;
685 purb->transfer_flags = req->transfer_flags;
687 /* if ( req->transfer_flags != 0 ) */
688 /* dump_request(req); */
690 purb->pipe = 0;
691 purb->pipe |= req->direction << 7;
692 purb->pipe |= port->dev->devnum << 8;
693 purb->pipe |= req->speed << 26;
694 purb->pipe |= req->pipe_type << 30;
695 purb->pipe |= req->endpoint << 15;
697 purb->number_of_packets = req->num_iso;
699 /* Make sure there's always some kind of timeout. */
700 purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000
701 : 1000;
703 purb->setup_packet = setup;
705 if ( req->pipe_type == 0 ) /* ISO */
706 {
707 int j;
708 usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1);
710 // printk("Reading iso sched at %p\n", iso_sched);
712 /* If we're dealing with an iso pipe, we need to copy in a schedule. */
713 for ( j = 0; j < req->num_iso; j++ )
714 {
715 purb->iso_frame_desc[j].length = iso_sched[j].length;
716 purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset;
717 iso_sched[j].status = 0;
718 }
719 pending_req->iso_sched = iso_sched;
720 }
722 {
723 int ret;
724 ret = usb_submit_urb(purb);
726 // dump_urb(purb);
728 if ( ret != 0 )
729 goto bad_descriptor; /* XXX free pending here! */
730 }
732 return;
734 bad_descriptor:
735 kfree ( setup );
736 if ( purb != NULL )
737 usb_free_urb(purb);
738 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
739 return;
741 no_mem:
742 if ( setup != NULL )
743 kfree(setup);
744 make_response(up, req->id, req->operation, -ENOMEM, 0, 0);
745 return;
746 }
750 /******************************************************************
751 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
752 */
755 static void make_response(usbif_priv_t *up, unsigned long id,
756 unsigned short op, int st, int inband,
757 unsigned long length)
758 {
759 usbif_response_t *resp;
760 unsigned long flags;
762 #if 0
763 printk("usbback: Sending response:\n");
764 printk(" id = 0x%x\n", id);
765 printk(" op = %d\n", op);
766 printk(" status = %d\n", st);
767 printk(" data = %d\n", inband);
768 printk(" length = %d\n", length);
769 #endif
771 /* Place on the response ring for the relevant domain. */
772 spin_lock_irqsave(&up->usb_ring_lock, flags);
773 resp = &up->usb_ring_base->
774 ring[MASK_USBIF_IDX(up->usb_resp_prod)].resp;
775 resp->id = id;
776 resp->operation = op;
777 resp->status = st;
778 resp->data = inband;
779 resp->length = length;
780 wmb(); /* Ensure other side can see the response fields. */
781 up->usb_ring_base->resp_prod = ++up->usb_resp_prod;
782 spin_unlock_irqrestore(&up->usb_ring_lock, flags);
784 /* Kick the relevant domain. */
785 notify_via_evtchn(up->evtchn);
786 }
788 /**
789 * usbif_claim_port - claim devices on a port on behalf of guest
790 *
791 * Once completed, this will ensure that any device attached to that
792 * port is claimed by this driver for use by the guest.
793 */
794 int usbif_claim_port(usbif_be_claim_port_t *msg)
795 {
796 owned_port_t *o_p;
798 /* Sanity... */
799 if ( usbif_find_port(msg->path) != NULL )
800 {
801 printk("usbback: Attempted to claim USB port "
802 "we already own!\n");
803 return -EINVAL;
804 }
806 spin_lock_irq(&owned_ports_lock);
808 /* No need for a slab cache - this should be infrequent. */
809 o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL);
811 o_p->enabled = 0;
812 o_p->usbif_priv = usbif_find(msg->domid);
813 o_p->guest_port = msg->usbif_port;
814 o_p->dev_present = 0;
815 o_p->guest_address = 0; /* Default address. */
817 strcpy(o_p->path, msg->path);
819 list_add(&o_p->list, &owned_ports);
821 printk("usbback: Claimed USB port (%s) for %d.%d\n", o_p->path,
822 msg->domid, msg->usbif_port);
824 spin_unlock_irq(&owned_ports_lock);
826 /* Force a reprobe for unclaimed devices. */
827 usb_scan_devices();
829 return 0;
830 }
832 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req)
833 {
834 unsigned long flags;
835 struct list_head *port;
837 /* I'm assuming this is not called from IRQ context - correct? I think
838 * it's probably only called in response to control messages or plug events
839 * in the USB hub kernel thread, so should be OK. */
840 spin_lock_irqsave(&owned_ports_lock, flags);
841 list_for_each(port, &owned_ports)
842 {
843 owned_port_t *p = list_entry(port, owned_port_t, list);
844 if(p->usbif_priv == up && p->guest_address == req->devnum && p->enabled )
845 {
846 #if 0
847 printk("Found port for devnum %d\n", req->devnum);
849 dump_port(p);
850 #endif
851 return p;
852 }
853 }
854 spin_unlock_irqrestore(&owned_ports_lock, flags);
856 return NULL;
857 }
859 owned_port_t *usbif_find_port(char *path)
860 {
861 struct list_head *port;
862 unsigned long flags;
864 spin_lock_irqsave(&owned_ports_lock, flags);
865 list_for_each(port, &owned_ports)
866 {
867 owned_port_t *p = list_entry(port, owned_port_t, list);
868 if(!strcmp(path, p->path))
869 {
870 spin_unlock_irqrestore(&owned_ports_lock, flags);
871 return p;
872 }
873 }
874 spin_unlock_irqrestore(&owned_ports_lock, flags);
876 return NULL;
877 }
880 static void *probe(struct usb_device *dev, unsigned iface,
881 const struct usb_device_id *id)
882 {
883 owned_port_t *p;
885 /* We don't care what the device is - if we own the port, we want it. We
886 * don't deal with device-specifics in this driver, so we don't care what
887 * the device actually is ;-) */
888 if ( ( p = usbif_find_port(dev->devpath) ) != NULL )
889 {
890 printk("usbback: claimed device attached to owned port\n");
892 p->dev_present = 1;
893 p->dev = dev;
894 set_bit(iface, &p->ifaces);
896 return p->usbif_priv;
897 }
898 else
899 printk("usbback: hotplug for non-owned port (%s), ignoring\n", dev->devpath);
902 return NULL;
903 }
905 static void disconnect(struct usb_device *dev, void *usbif)
906 {
907 /* Note the device is removed so we can tell the guest when it probes. */
908 owned_port_t *port = usbif_find_port(dev->devpath);
909 port->dev_present = 0;
910 port->dev = NULL;
911 port->ifaces = 0;
912 }
915 struct usb_driver driver =
916 {
917 .owner = THIS_MODULE,
918 .name = "Xen USB Backend",
919 .probe = probe,
920 .disconnect = disconnect,
921 .id_table = NULL,
922 };
924 /* __usbif_release_port - internal mechanics for releasing a port */
925 void __usbif_release_port(owned_port_t *p)
926 {
927 int i;
929 for ( i = 0; p->ifaces != 0; i++)
930 if ( p->ifaces & 1 << i )
931 {
932 usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i));
933 clear_bit(i, &p->ifaces);
934 }
935 list_del(&p->list);
937 /* Reset the real device. We don't simulate disconnect / probe for other
938 * drivers in this kernel because we assume the device is completely under
939 * the control of ourselves (i.e. the guest!). This should ensure that the
940 * device is in a sane state for the next customer ;-) */
941 /* if ( p->dev != NULL) */
942 /* usb_reset_device(p->dev); */
944 kfree(p);
945 }
948 /**
949 * usbif_release_port - stop claiming devices on a port on behalf of guest
950 */
951 void usbif_release_port(usbif_be_release_port_t *msg)
952 {
953 owned_port_t *p;
955 spin_lock_irq(&owned_ports_lock);
956 p = usbif_find_port(msg->path);
957 __usbif_release_port(p);
958 spin_unlock_irq(&owned_ports_lock);
959 }
961 void usbif_release_ports(usbif_priv_t *up)
962 {
963 struct list_head *port, *tmp;
964 unsigned long flags;
966 spin_lock_irqsave(&owned_ports_lock, flags);
967 list_for_each_safe(port, tmp, &owned_ports)
968 {
969 owned_port_t *p = list_entry(port, owned_port_t, list);
970 if ( p->usbif_priv == up )
971 __usbif_release_port(p);
972 }
973 spin_unlock_irqrestore(&owned_ports_lock, flags);
974 }
976 static int __init usbif_init(void)
977 {
978 int i;
980 if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
981 !(xen_start_info.flags & SIF_USB_BE_DOMAIN) )
982 return 0;
984 INIT_LIST_HEAD(&owned_ports);
986 usb_register(&driver);
988 usbif_interface_init();
990 if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
991 BUG();
993 pending_cons = 0;
994 pending_prod = MAX_PENDING_REQS;
995 memset(pending_reqs, 0, sizeof(pending_reqs));
996 for ( i = 0; i < MAX_PENDING_REQS; i++ )
997 pending_ring[i] = i;
999 spin_lock_init(&usbio_schedule_list_lock);
1000 INIT_LIST_HEAD(&usbio_schedule_list);
1002 if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
1003 BUG();
1005 usbif_ctrlif_init();
1007 spin_lock_init(&owned_ports_lock);
1009 printk("Xen USB Backend Initialised");
1011 return 0;
1014 __initcall(usbif_init);