debuggers.hg

view linux-2.6.10-xen-sparse/drivers/xen/usbback/usbback.c @ 3758:9f7935ea4606

bitkeeper revision 1.1159.212.128 (4208d72fZEHIE9NOZZbr91V7R-3gUg)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 08 15:13:51 2005 +0000 (2005-02-08)
parents 924777207448 f504382b179f
children f5f2757b3aa2
line source
1 /******************************************************************************
2 * arch/xen/drivers/usbif/backend/main.c
3 *
4 * Backend for the Xen virtual USB driver - provides an abstraction of a
5 * USB host controller to the corresponding frontend driver.
6 *
7 * by Mark Williamson
8 * Copyright (c) 2004 Intel Research Cambridge
9 * Copyright (c) 2004, 2005 Mark Williamson
10 *
11 * Based on arch/xen/drivers/blkif/backend/main.c
12 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
13 */
15 #include "common.h"
18 #include <linux/list.h>
19 #include <linux/usb.h>
20 #include <linux/spinlock.h>
21 #include <linux/module.h>
22 #include <linux/tqueue.h>
24 /*
25 * This is rather arbitrary.
26 */
27 #define MAX_PENDING_REQS 4
28 #define BATCH_PER_DOMAIN 1
30 static unsigned long mmap_vstart;
32 /* Needs to be sufficiently large that we can map the (large) buffers
33 * the USB mass storage driver wants. */
34 #define MMAP_PAGES_PER_REQUEST \
35 (128)
36 #define MMAP_PAGES \
37 (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
39 #define MMAP_VADDR(_req,_seg) \
40 (mmap_vstart + \
41 ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
42 ((_seg) * PAGE_SIZE))
45 static spinlock_t owned_ports_lock;
46 LIST_HEAD(owned_ports);
48 /* A list of these structures is used to track ownership of physical USB
49 * ports. */
50 typedef struct
51 {
52 usbif_priv_t *usbif_priv;
53 char path[16];
54 int guest_port;
55 int enabled;
56 struct list_head list;
57 unsigned long guest_address; /* The USB device address that has been
58 * assigned by the guest. */
59 int dev_present; /* Is there a device present? */
60 struct usb_device * dev;
61 unsigned long ifaces; /* What interfaces are present on this device? */
62 } owned_port_t;
65 /*
66 * Each outstanding request that we've passed to the lower device layers has a
67 * 'pending_req' allocated to it. The request is complete, the specified
68 * domain has a response queued for it, with the saved 'id' passed back.
69 */
70 typedef struct {
71 usbif_priv_t *usbif_priv;
72 usbif_iso_t *iso_sched;
73 unsigned long id;
74 int nr_pages;
75 unsigned short operation;
76 int status;
77 } pending_req_t;
79 /*
80 * We can't allocate pending_req's in order, since they may complete out of
81 * order. We therefore maintain an allocation ring. This ring also indicates
82 * when enough work has been passed down -- at that point the allocation ring
83 * will be empty.
84 */
85 static pending_req_t pending_reqs[MAX_PENDING_REQS];
86 static unsigned char pending_ring[MAX_PENDING_REQS];
87 static spinlock_t pend_prod_lock;
89 /* NB. We use a different index type to differentiate from shared usb rings. */
90 typedef unsigned int PEND_RING_IDX;
91 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
92 static PEND_RING_IDX pending_prod, pending_cons;
93 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
95 static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do);
96 static void make_response(usbif_priv_t *usbif, unsigned long id,
97 unsigned short op, int st, int inband,
98 unsigned long actual_length);
99 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long port);
100 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req);
101 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid);
102 static owned_port_t *usbif_find_port(char *);
104 /******************************************************************
105 * PRIVATE DEBUG FUNCTIONS
106 */
108 #undef DEBUG
109 #ifdef DEBUG
111 static void dump_port(owned_port_t *p)
112 {
113 printk(KERN_DEBUG "owned_port_t @ %p\n"
114 " usbif_priv @ %p\n"
115 " path: %s\n"
116 " guest_port: %d\n"
117 " guest_address: %ld\n"
118 " dev_present: %d\n"
119 " dev @ %p\n"
120 " ifaces: 0x%lx\n",
121 p, p->usbif_priv, p->path, p->guest_port, p->guest_address,
122 p->dev_present, p->dev, p->ifaces);
123 }
126 static void dump_request(usbif_request_t *req)
127 {
128 printk(KERN_DEBUG "id = 0x%lx\n"
129 "devnum %d\n"
130 "endpoint 0x%x\n"
131 "direction %d\n"
132 "speed %d\n"
133 "pipe_type 0x%x\n"
134 "transfer_buffer 0x%lx\n"
135 "length 0x%lx\n"
136 "transfer_flags 0x%lx\n"
137 "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n"
138 "iso_schedule = 0x%lx\n"
139 "num_iso %ld\n",
140 req->id, req->devnum, req->endpoint, req->direction, req->speed,
141 req->pipe_type, req->transfer_buffer, req->length,
142 req->transfer_flags, req->setup[0], req->setup[1], req->setup[2],
143 req->setup[3], req->setup[4], req->setup[5], req->setup[6],
144 req->setup[7], req->iso_schedule, req->num_iso);
145 }
147 static void dump_urb(struct urb *urb)
148 {
149 printk(KERN_DEBUG "dumping urb @ %p\n", urb);
151 #define DUMP_URB_FIELD(name, format) \
152 printk(KERN_DEBUG " " # name " " format "\n", urb-> name)
154 DUMP_URB_FIELD(pipe, "0x%x");
155 DUMP_URB_FIELD(status, "%d");
156 DUMP_URB_FIELD(transfer_flags, "0x%x");
157 DUMP_URB_FIELD(transfer_buffer, "%p");
158 DUMP_URB_FIELD(transfer_buffer_length, "%d");
159 DUMP_URB_FIELD(actual_length, "%d");
160 }
162 static void dump_response(usbif_response_t *resp)
163 {
164 printk(KERN_DEBUG "usbback: Sending response:\n"
165 " id = 0x%x\n"
166 " op = %d\n"
167 " status = %d\n"
168 " data = %d\n"
169 " length = %d\n",
170 resp->id, resp->op, resp->status, resp->data, resp->length);
171 }
173 #else /* DEBUG */
175 #define dump_port(blah) ((void)0)
176 #define dump_request(blah) ((void)0)
177 #define dump_urb(blah) ((void)0)
178 #define dump_response(blah) ((void)0)
180 #endif /* DEBUG */
182 /******************************************************************
183 * MEMORY MANAGEMENT
184 */
186 static void fast_flush_area(int idx, int nr_pages)
187 {
188 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
189 int i;
191 for ( i = 0; i < nr_pages; i++ )
192 {
193 mcl[i].op = __HYPERVISOR_update_va_mapping;
194 mcl[i].args[0] = MMAP_VADDR(idx, i);
195 mcl[i].args[1] = 0;
196 mcl[i].args[2] = 0;
197 }
199 mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
200 if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
201 BUG();
202 }
205 /******************************************************************
206 * USB INTERFACE SCHEDULER LIST MAINTENANCE
207 */
209 static struct list_head usbio_schedule_list;
210 static spinlock_t usbio_schedule_list_lock;
212 static int __on_usbif_list(usbif_priv_t *up)
213 {
214 return up->usbif_list.next != NULL;
215 }
217 void remove_from_usbif_list(usbif_priv_t *up)
218 {
219 unsigned long flags;
220 if ( !__on_usbif_list(up) ) return;
221 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
222 if ( __on_usbif_list(up) )
223 {
224 list_del(&up->usbif_list);
225 up->usbif_list.next = NULL;
226 usbif_put(up);
227 }
228 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
229 }
231 static void add_to_usbif_list_tail(usbif_priv_t *up)
232 {
233 unsigned long flags;
234 if ( __on_usbif_list(up) ) return;
235 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
236 if ( !__on_usbif_list(up) && (up->status == CONNECTED) )
237 {
238 list_add_tail(&up->usbif_list, &usbio_schedule_list);
239 usbif_get(up);
240 }
241 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
242 }
244 void free_pending(int pending_idx)
245 {
246 unsigned long flags;
248 /* Free the pending request. */
249 spin_lock_irqsave(&pend_prod_lock, flags);
250 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
251 spin_unlock_irqrestore(&pend_prod_lock, flags);
252 }
254 /******************************************************************
255 * COMPLETION CALLBACK -- Called as urb->complete()
256 */
258 static void maybe_trigger_usbio_schedule(void);
260 static void __end_usb_io_op(struct urb *purb)
261 {
262 pending_req_t *pending_req;
263 int pending_idx;
265 pending_req = purb->context;
267 pending_idx = pending_req - pending_reqs;
269 ASSERT(purb->actual_length <= purb->transfer_buffer_length);
270 ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE);
272 /* An error fails the entire request. */
273 if ( purb->status )
274 {
275 printk(KERN_WARNING "URB @ %p failed. Status %d\n", purb, purb->status);
276 }
278 if ( usb_pipetype(purb->pipe) == 0 )
279 {
280 int i;
281 usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, pending_req->nr_pages - 1);
283 ASSERT(sched == pending_req->sched);
285 /* If we're dealing with an iso pipe, we need to copy back the schedule. */
286 for ( i = 0; i < purb->number_of_packets; i++ )
287 {
288 sched[i].length = purb->iso_frame_desc[i].actual_length;
289 ASSERT(sched[i].buffer_offset ==
290 purb->iso_frame_desc[i].offset);
291 sched[i].status = purb->iso_frame_desc[i].status;
292 }
293 }
295 fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages);
297 kfree(purb->setup_packet);
299 make_response(pending_req->usbif_priv, pending_req->id,
300 pending_req->operation, pending_req->status, 0, purb->actual_length);
301 usbif_put(pending_req->usbif_priv);
303 usb_free_urb(purb);
305 free_pending(pending_idx);
307 rmb();
309 /* Check for anything still waiting in the rings, having freed a request... */
310 maybe_trigger_usbio_schedule();
311 }
313 /******************************************************************
314 * SCHEDULER FUNCTIONS
315 */
317 static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait);
319 static int usbio_schedule(void *arg)
320 {
321 DECLARE_WAITQUEUE(wq, current);
323 usbif_priv_t *up;
324 struct list_head *ent;
326 daemonize();
328 for ( ; ; )
329 {
330 /* Wait for work to do. */
331 add_wait_queue(&usbio_schedule_wait, &wq);
332 set_current_state(TASK_INTERRUPTIBLE);
333 if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
334 list_empty(&usbio_schedule_list) )
335 schedule();
336 __set_current_state(TASK_RUNNING);
337 remove_wait_queue(&usbio_schedule_wait, &wq);
339 /* Queue up a batch of requests. */
340 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
341 !list_empty(&usbio_schedule_list) )
342 {
343 ent = usbio_schedule_list.next;
344 up = list_entry(ent, usbif_priv_t, usbif_list);
345 usbif_get(up);
346 remove_from_usbif_list(up);
347 if ( do_usb_io_op(up, BATCH_PER_DOMAIN) )
348 add_to_usbif_list_tail(up);
349 usbif_put(up);
350 }
351 }
352 }
354 static void maybe_trigger_usbio_schedule(void)
355 {
356 /*
357 * Needed so that two processes, who together make the following predicate
358 * true, don't both read stale values and evaluate the predicate
359 * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
360 */
361 smp_mb();
363 if ( !list_empty(&usbio_schedule_list) )
364 wake_up(&usbio_schedule_wait);
365 }
368 /******************************************************************************
369 * NOTIFICATION FROM GUEST OS.
370 */
372 irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs)
373 {
374 usbif_priv_t *up = dev_id;
376 smp_mb();
378 add_to_usbif_list_tail(up);
380 /* Will in fact /always/ trigger an io schedule in this case. */
381 maybe_trigger_usbio_schedule();
383 return IRQ_HANDLED;
384 }
388 /******************************************************************
389 * DOWNWARD CALLS -- These interface with the usb-device layer proper.
390 */
392 static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
393 {
394 usbif_back_ring_t *usb_ring = &up->usb_ring;
395 usbif_request_t *req;
396 RING_IDX i, rp;
397 int more_to_do = 0;
399 rp = usb_ring->sring->req_prod;
400 rmb(); /* Ensure we see queued requests up to 'rp'. */
402 /* Take items off the comms ring, taking care not to overflow. */
403 for ( i = usb_ring->req_cons;
404 (i != rp) && !RING_REQUEST_CONS_OVERFLOW(USBIF_RING, usb_ring, i);
405 i++ )
406 {
407 if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
408 {
409 more_to_do = 1;
410 break;
411 }
413 req = RING_GET_REQUEST(USBIF_RING, usb_ring, i);
415 switch ( req->operation )
416 {
417 case USBIF_OP_PROBE:
418 dispatch_usb_probe(up, req->id, req->port);
419 break;
421 case USBIF_OP_IO:
422 /* Assemble an appropriate URB. */
423 dispatch_usb_io(up, req);
424 break;
426 case USBIF_OP_RESET:
427 dispatch_usb_reset(up, req->port);
428 break;
430 default:
431 DPRINTK("error: unknown USB io operation [%d]\n",
432 req->operation);
433 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
434 break;
435 }
436 }
438 usb_ring->req_cons = i;
440 return more_to_do;
441 }
443 static owned_port_t *find_guest_port(usbif_priv_t *up, int port)
444 {
445 unsigned long flags;
446 struct list_head *l;
448 spin_lock_irqsave(&owned_ports_lock, flags);
449 list_for_each(l, &owned_ports)
450 {
451 owned_port_t *p = list_entry(l, owned_port_t, list);
452 if(p->usbif_priv == up && p->guest_port == port)
453 {
454 spin_unlock_irqrestore(&owned_ports_lock, flags);
455 return p;
456 }
457 }
458 spin_unlock_irqrestore(&owned_ports_lock, flags);
460 return NULL;
461 }
463 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid)
464 {
465 owned_port_t *port = find_guest_port(up, portid);
466 int ret = 0;
469 /* Allowing the guest to actually reset the device causes more problems
470 * than it's worth. We just fake it out in software but we will do a real
471 * reset when the interface is destroyed. */
473 dump_port(port);
475 port->guest_address = 0;
476 /* If there's an attached device then the port is now enabled. */
477 if ( port->dev_present )
478 port->enabled = 1;
479 else
480 port->enabled = 0;
482 make_response(up, 0, USBIF_OP_RESET, ret, 0, 0);
483 }
485 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long portid)
486 {
487 owned_port_t *port = find_guest_port(up, portid);
488 int ret;
490 if ( port != NULL )
491 ret = port->dev_present;
492 else
493 {
494 ret = -EINVAL;
495 printk(KERN_INFO "dispatch_usb_probe(): invalid port probe request "
496 "(port %ld)\n", portid);
497 }
499 /* Probe result is sent back in-band. Probes don't have an associated id
500 * right now... */
501 make_response(up, id, USBIF_OP_PROBE, ret, portid, 0);
502 }
504 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req);
506 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req)
507 {
508 unsigned long buffer_mach;
509 int i = 0, offset = 0,
510 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
511 pending_req_t *pending_req;
512 unsigned long remap_prot;
513 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
514 struct urb *purb = NULL;
515 owned_port_t *port;
516 unsigned char *setup;
518 dump_request(req);
520 if ( NR_PENDING_REQS == MAX_PENDING_REQS )
521 {
522 printk(KERN_WARNING "usbback: Max requests already queued. "
523 "Giving up!\n");
525 return;
526 }
528 port = find_port_for_request(up, req);
530 if ( port == NULL )
531 {
532 printk(KERN_WARNING "No such device! (%d)\n", req->devnum);
533 dump_request(req);
535 make_response(up, req->id, req->operation, -ENODEV, 0, 0);
536 return;
537 }
538 else if ( !port->dev_present )
539 {
540 /* In normal operation, we'll only get here if a device is unplugged
541 * and the frontend hasn't noticed yet. */
542 make_response(up, req->id, req->operation, -ENODEV, 0, 0);
543 return;
544 }
547 setup = kmalloc(8, GFP_KERNEL);
549 if ( setup == NULL )
550 goto no_mem;
552 /* Copy request out for safety. */
553 memcpy(setup, req->setup, 8);
555 if( setup[0] == 0x0 && setup[1] == 0x5)
556 {
557 /* To virtualise the USB address space, we need to intercept
558 * set_address messages and emulate. From the USB specification:
559 * bmRequestType = 0x0;
560 * Brequest = SET_ADDRESS (i.e. 0x5)
561 * wValue = device address
562 * wIndex = 0
563 * wLength = 0
564 * data = None
565 */
566 /* Store into the guest transfer buffer using cpu_to_le16 */
567 port->guest_address = le16_to_cpu(*(u16 *)(setup + 2));
568 /* Make a successful response. That was easy! */
570 make_response(up, req->id, req->operation, 0, 0, 0);
572 kfree(setup);
573 return;
574 }
575 else if ( setup[0] == 0x0 && setup[1] == 0x9 )
576 {
577 /* The host kernel needs to know what device configuration is in use
578 * because various error checks get confused otherwise. We just do
579 * configuration settings here, under controlled conditions.
580 */
582 /* Ignore configuration setting and hope that the host kernel
583 did it right. */
584 /* usb_set_configuration(port->dev, setup[2]); */
586 make_response(up, req->id, req->operation, 0, 0, 0);
588 kfree(setup);
589 return;
590 }
592 else if ( setup[0] == 0x1 && setup[1] == 0xB )
593 {
594 /* The host kernel needs to know what device interface is in use
595 * because various error checks get confused otherwise. We just do
596 * configuration settings here, under controlled conditions.
597 */
598 usb_set_interface(port->dev, (setup[4] | setup[5] << 8),
599 (setup[2] | setup[3] << 8) );
601 make_response(up, req->id, req->operation, 0, 0, 0);
603 kfree(setup);
604 return;
605 }
607 if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK)
608 + req->length )
609 > MMAP_PAGES_PER_REQUEST * PAGE_SIZE )
610 {
611 printk(KERN_WARNING "usbback: request of %lu bytes too large\n",
612 req->length);
613 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
614 kfree(setup);
615 return;
616 }
618 buffer_mach = req->transfer_buffer;
620 if( buffer_mach == 0 )
621 goto no_remap;
623 ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST);
624 ASSERT(buffer_mach);
626 /* Always map writeable for now. */
627 remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
629 for ( i = 0, offset = 0; offset < req->length;
630 i++, offset += PAGE_SIZE )
631 {
632 mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
633 mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
634 mcl[i].args[1] = ((buffer_mach & PAGE_MASK) + offset) | remap_prot;
635 mcl[i].args[2] = 0;
636 mcl[i].args[3] = up->domid;
638 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
639 FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
641 ASSERT(virt_to_machine(MMAP_VADDR(pending_idx, i))
642 == buffer_mach + i << PAGE_SHIFT);
643 }
645 if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
646 {
647 /* Map in ISO schedule, if necessary. */
648 mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
649 mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
650 mcl[i].args[1] = (req->iso_schedule & PAGE_MASK) | remap_prot;
651 mcl[i].args[2] = 0;
652 mcl[i].args[3] = up->domid;
654 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
655 FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
657 i++;
658 }
660 if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) )
661 BUG();
663 {
664 int j;
665 for ( j = 0; j < i; j++ )
666 {
667 if ( unlikely(mcl[j].args[5] != 0) )
668 {
669 printk(KERN_WARNING
670 "invalid buffer %d -- could not remap it\n", j);
671 fast_flush_area(pending_idx, i);
672 goto bad_descriptor;
673 }
674 }
675 }
677 no_remap:
679 ASSERT(i <= MMAP_PAGES_PER_REQUEST);
680 ASSERT(i * PAGE_SIZE >= req->length);
682 /* We have to do this because some things might complete out of order. */
683 pending_req = &pending_reqs[pending_idx];
684 pending_req->usbif_priv= up;
685 pending_req->id = req->id;
686 pending_req->operation = req->operation;
687 pending_req->nr_pages = i;
689 pending_cons++;
691 usbif_get(up);
693 /* Fill out an actual request for the USB layer. */
694 purb = usb_alloc_urb(req->num_iso);
696 if ( purb == NULL )
697 {
698 usbif_put(up);
699 free_pending(pending_idx);
700 goto no_mem;
701 }
703 purb->dev = port->dev;
704 purb->context = pending_req;
705 purb->transfer_buffer =
706 (void *)MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK);
707 if(buffer_mach == 0)
708 purb->transfer_buffer = NULL;
709 purb->complete = __end_usb_io_op;
710 purb->transfer_buffer_length = req->length;
711 purb->transfer_flags = req->transfer_flags;
713 purb->pipe = 0;
714 purb->pipe |= req->direction << 7;
715 purb->pipe |= port->dev->devnum << 8;
716 purb->pipe |= req->speed << 26;
717 purb->pipe |= req->pipe_type << 30;
718 purb->pipe |= req->endpoint << 15;
720 purb->number_of_packets = req->num_iso;
722 /* Make sure there's always some kind of timeout. */
723 purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000
724 : 1000;
726 purb->setup_packet = setup;
728 if ( req->pipe_type == 0 ) /* ISO */
729 {
730 int j;
731 usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1);
733 /* If we're dealing with an iso pipe, we need to copy in a schedule. */
734 for ( j = 0; j < req->num_iso; j++ )
735 {
736 purb->iso_frame_desc[j].length = iso_sched[j].length;
737 purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset;
738 iso_sched[j].status = 0;
739 }
740 pending_req->iso_sched = iso_sched;
741 }
743 {
744 int ret;
745 ret = usb_submit_urb(purb);
747 dump_urb(purb);
749 if ( ret != 0 )
750 {
751 usbif_put(up);
752 free_pending(pending_idx);
753 goto bad_descriptor;
754 }
755 }
757 return;
759 bad_descriptor:
760 kfree ( setup );
761 if ( purb != NULL )
762 usb_free_urb(purb);
763 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
764 return;
766 no_mem:
767 if ( setup != NULL )
768 kfree(setup);
769 make_response(up, req->id, req->operation, -ENOMEM, 0, 0);
770 return;
771 }
775 /******************************************************************
776 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
777 */
780 static void make_response(usbif_priv_t *up, unsigned long id,
781 unsigned short op, int st, int inband,
782 unsigned long length)
783 {
784 usbif_response_t *resp;
785 unsigned long flags;
786 usbif_back_ring_t *usb_ring = &up->usb_ring;
788 /* Place on the response ring for the relevant domain. */
789 spin_lock_irqsave(&up->usb_ring_lock, flags);
790 resp = RING_GET_RESPONSE(USBIF_RING, usb_ring, usb_ring->rsp_prod_pvt);
791 resp->id = id;
792 resp->operation = op;
793 resp->status = st;
794 resp->data = inband;
795 resp->length = length;
796 wmb(); /* Ensure other side can see the response fields. */
798 dump_response(resp);
800 usb_ring->rsp_prod_pvt++;
801 RING_PUSH_RESPONSES(USBIF_RING, usb_ring);
802 spin_unlock_irqrestore(&up->usb_ring_lock, flags);
804 /* Kick the relevant domain. */
805 notify_via_evtchn(up->evtchn);
806 }
808 /**
809 * usbif_claim_port - claim devices on a port on behalf of guest
810 *
811 * Once completed, this will ensure that any device attached to that
812 * port is claimed by this driver for use by the guest.
813 */
814 int usbif_claim_port(usbif_be_claim_port_t *msg)
815 {
816 owned_port_t *o_p;
818 /* Sanity... */
819 if ( usbif_find_port(msg->path) != NULL )
820 {
821 printk(KERN_WARNING "usbback: Attempted to claim USB port "
822 "we already own!\n");
823 return -EINVAL;
824 }
826 /* No need for a slab cache - this should be infrequent. */
827 o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL);
829 if ( o_p == NULL )
830 return -ENOMEM;
832 o_p->enabled = 0;
833 o_p->usbif_priv = usbif_find(msg->domid);
834 o_p->guest_port = msg->usbif_port;
835 o_p->dev_present = 0;
836 o_p->guest_address = 0; /* Default address. */
838 strcpy(o_p->path, msg->path);
840 spin_lock_irq(&owned_ports_lock);
842 list_add(&o_p->list, &owned_ports);
844 spin_unlock_irq(&owned_ports_lock);
846 printk(KERN_INFO "usbback: Claimed USB port (%s) for %d.%d\n", o_p->path,
847 msg->domid, msg->usbif_port);
849 /* Force a reprobe for unclaimed devices. */
850 usb_scan_devices();
852 return 0;
853 }
855 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req)
856 {
857 unsigned long flags;
858 struct list_head *port;
860 /* I'm assuming this is not called from IRQ context - correct? I think
861 * it's probably only called in response to control messages or plug events
862 * in the USB hub kernel thread, so should be OK. */
863 spin_lock_irqsave(&owned_ports_lock, flags);
864 list_for_each(port, &owned_ports)
865 {
866 owned_port_t *p = list_entry(port, owned_port_t, list);
867 if(p->usbif_priv == up && p->guest_address == req->devnum && p->enabled )
868 {
869 dump_port(p);
871 spin_unlock_irqrestore(&owned_ports_lock, flags);
872 return p;
873 }
874 }
875 spin_unlock_irqrestore(&owned_ports_lock, flags);
877 return NULL;
878 }
880 owned_port_t *__usbif_find_port(char *path)
881 {
882 struct list_head *port;
884 list_for_each(port, &owned_ports)
885 {
886 owned_port_t *p = list_entry(port, owned_port_t, list);
887 if(!strcmp(path, p->path))
888 {
889 return p;
890 }
891 }
893 return NULL;
894 }
896 owned_port_t *usbif_find_port(char *path)
897 {
898 owned_port_t *ret;
899 unsigned long flags;
901 spin_lock_irqsave(&owned_ports_lock, flags);
902 ret = __usbif_find_port(path);
903 spin_unlock_irqrestore(&owned_ports_lock, flags);
905 return ret;
906 }
909 static void *probe(struct usb_device *dev, unsigned iface,
910 const struct usb_device_id *id)
911 {
912 owned_port_t *p;
914 /* We don't care what the device is - if we own the port, we want it. We
915 * don't deal with device-specifics in this driver, so we don't care what
916 * the device actually is ;-) */
917 if ( ( p = usbif_find_port(dev->devpath) ) != NULL )
918 {
919 printk(KERN_INFO "usbback: claimed device attached to owned port\n");
921 p->dev_present = 1;
922 p->dev = dev;
923 set_bit(iface, &p->ifaces);
925 return p->usbif_priv;
926 }
927 else
928 printk(KERN_INFO "usbback: hotplug for non-owned port (%s), ignoring\n",
929 dev->devpath);
932 return NULL;
933 }
935 static void disconnect(struct usb_device *dev, void *usbif)
936 {
937 /* Note the device is removed so we can tell the guest when it probes. */
938 owned_port_t *port = usbif_find_port(dev->devpath);
939 port->dev_present = 0;
940 port->dev = NULL;
941 port->ifaces = 0;
942 }
945 struct usb_driver driver =
946 {
947 .owner = THIS_MODULE,
948 .name = "Xen USB Backend",
949 .probe = probe,
950 .disconnect = disconnect,
951 .id_table = NULL,
952 };
954 /* __usbif_release_port - internal mechanics for releasing a port */
955 void __usbif_release_port(owned_port_t *p)
956 {
957 int i;
959 for ( i = 0; p->ifaces != 0; i++)
960 if ( p->ifaces & 1 << i )
961 {
962 usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i));
963 clear_bit(i, &p->ifaces);
964 }
965 list_del(&p->list);
967 /* Reset the real device. We don't simulate disconnect / probe for other
968 * drivers in this kernel because we assume the device is completely under
969 * the control of ourselves (i.e. the guest!). This should ensure that the
970 * device is in a sane state for the next customer ;-) */
972 /* MAW NB: we're not resetting the real device here. This looks perfectly
973 * valid to me but it causes memory corruption. We seem to get away with not
974 * resetting for now, although it'd be nice to have this tracked down. */
975 /* if ( p->dev != NULL) */
976 /* usb_reset_device(p->dev); */
978 kfree(p);
979 }
982 /**
983 * usbif_release_port - stop claiming devices on a port on behalf of guest
984 */
985 void usbif_release_port(usbif_be_release_port_t *msg)
986 {
987 owned_port_t *p;
989 spin_lock_irq(&owned_ports_lock);
990 p = __usbif_find_port(msg->path);
991 __usbif_release_port(p);
992 spin_unlock_irq(&owned_ports_lock);
993 }
995 void usbif_release_ports(usbif_priv_t *up)
996 {
997 struct list_head *port, *tmp;
998 unsigned long flags;
1000 spin_lock_irqsave(&owned_ports_lock, flags);
1001 list_for_each_safe(port, tmp, &owned_ports)
1003 owned_port_t *p = list_entry(port, owned_port_t, list);
1004 if ( p->usbif_priv == up )
1005 __usbif_release_port(p);
1007 spin_unlock_irqrestore(&owned_ports_lock, flags);
1010 static int __init usbif_init(void)
1012 int i;
1014 if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
1015 !(xen_start_info.flags & SIF_USB_BE_DOMAIN) )
1016 return 0;
1018 if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
1019 BUG();
1021 pending_cons = 0;
1022 pending_prod = MAX_PENDING_REQS;
1023 memset(pending_reqs, 0, sizeof(pending_reqs));
1024 for ( i = 0; i < MAX_PENDING_REQS; i++ )
1025 pending_ring[i] = i;
1027 spin_lock_init(&pend_prod_lock);
1029 spin_lock_init(&owned_ports_lock);
1030 INIT_LIST_HEAD(&owned_ports);
1032 spin_lock_init(&usbio_schedule_list_lock);
1033 INIT_LIST_HEAD(&usbio_schedule_list);
1035 if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
1036 BUG();
1038 usbif_interface_init();
1040 usbif_ctrlif_init();
1042 usb_register(&driver);
1044 printk(KERN_INFO "Xen USB Backend Initialised");
1046 return 0;
1049 __initcall(usbif_init);