xen-vtx-unstable

annotate linux-2.6-xen-sparse/drivers/xen/usbfront/usbfront.c @ 6774:4d899a738d59

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:05:49 2005 +0000 (2005-09-13)
parents 9ead08216805 7d0fb56b4a91
children e7c7196fa329 8ca0f98ba8e2
rev   line source
cl349@4113 1 /*
cl349@4113 2 * Xen Virtual USB Frontend Driver
cl349@4113 3 *
cl349@4113 4 * This file contains the first version of the Xen virtual USB hub
cl349@4113 5 * that I've managed not to delete by mistake (3rd time lucky!).
cl349@4113 6 *
cl349@4113 7 * Based on Linux's uhci.c, original copyright notices are displayed
cl349@4113 8 * below. Portions also (c) 2004 Intel Research Cambridge
cl349@4113 9 * and (c) 2004, 2005 Mark Williamson
cl349@4113 10 *
cl349@4113 11 * Contact <mark.williamson@cl.cam.ac.uk> or
cl349@4113 12 * <xen-devel@lists.sourceforge.net> regarding this code.
cl349@4113 13 *
cl349@4113 14 * Still to be (maybe) implemented:
cl349@4113 15 * - migration / backend restart support?
cl349@4113 16 * - support for building / using as a module
cl349@4113 17 */
cl349@4113 18
cl349@4113 19 /*
cl349@4113 20 * Universal Host Controller Interface driver for USB.
cl349@4113 21 *
cl349@4113 22 * Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
cl349@4113 23 *
cl349@4113 24 * (C) Copyright 1999 Linus Torvalds
cl349@4113 25 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
cl349@4113 26 * (C) Copyright 1999 Randy Dunlap
cl349@4113 27 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
cl349@4113 28 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
cl349@4113 29 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
cl349@4113 30 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
cl349@4113 31 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
cl349@4113 32 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
cl349@4113 33 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
cl349@4113 34 *
cl349@4113 35 * Intel documents this fairly well, and as far as I know there
cl349@4113 36 * are no royalties or anything like that, but even so there are
cl349@4113 37 * people who decided that they want to do the same thing in a
cl349@4113 38 * completely different way.
cl349@4113 39 *
cl349@4113 40 * WARNING! The USB documentation is downright evil. Most of it
cl349@4113 41 * is just crap, written by a committee. You're better off ignoring
cl349@4113 42 * most of it, the important stuff is:
cl349@4113 43 * - the low-level protocol (fairly simple but lots of small details)
cl349@4113 44 * - working around the horridness of the rest
cl349@4113 45 */
cl349@4113 46
cl349@4113 47 #include <linux/config.h>
cl349@4113 48 #include <linux/module.h>
cl349@4113 49 #include <linux/kernel.h>
cl349@4113 50 #include <linux/init.h>
cl349@4113 51 #include <linux/sched.h>
cl349@4113 52 #include <linux/delay.h>
cl349@4113 53 #include <linux/slab.h>
cl349@4113 54 #include <linux/smp_lock.h>
cl349@4113 55 #include <linux/errno.h>
cl349@4113 56 #include <linux/interrupt.h>
cl349@4113 57 #include <linux/spinlock.h>
cl349@4113 58 #ifdef CONFIG_USB_DEBUG
cl349@4113 59 #define DEBUG
cl349@4113 60 #else
cl349@4113 61 #undef DEBUG
cl349@4113 62 #endif
cl349@4113 63 #include <linux/usb.h>
cl349@4113 64
cl349@4113 65 #include <asm/irq.h>
cl349@4113 66 #include <asm/system.h>
cl349@4113 67
cl349@4113 68 #include "xhci.h"
cl349@4113 69
cl349@4113 70 #include "../../../../../drivers/usb/hcd.h"
cl349@4113 71
cl349@4113 72 #include <asm-xen/xen-public/io/usbif.h>
cl349@4113 73 #include <asm/xen-public/io/domain_controller.h>
cl349@4113 74
cl349@4113 75 /*
cl349@4113 76 * Version Information
cl349@4113 77 */
cl349@4113 78 #define DRIVER_VERSION "v1.0"
cl349@4113 79 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \
cl349@4113 80 "Randy Dunlap, Georg Acher, Deti Fliegl, " \
cl349@4113 81 "Thomas Sailer, Roman Weissgaerber, Mark Williamson"
cl349@4113 82 #define DRIVER_DESC "Xen Virtual USB Host Controller Interface"
cl349@4113 83
cl349@4113 84 /*
cl349@4113 85 * debug = 0, no debugging messages
cl349@4113 86 * debug = 1, dump failed URB's except for stalls
cl349@4113 87 * debug = 2, dump all failed URB's (including stalls)
cl349@4113 88 */
cl349@4113 89 #ifdef DEBUG
cl349@4113 90 static int debug = 1;
cl349@4113 91 #else
cl349@4113 92 static int debug = 0;
cl349@4113 93 #endif
cl349@4113 94 MODULE_PARM(debug, "i");
cl349@4113 95 MODULE_PARM_DESC(debug, "Debug level");
cl349@4113 96 static char *errbuf;
cl349@4113 97 #define ERRBUF_LEN (PAGE_SIZE * 8)
cl349@4113 98
cl349@4113 99 static int rh_submit_urb(struct urb *urb);
cl349@4113 100 static int rh_unlink_urb(struct urb *urb);
cl349@4113 101 static int xhci_unlink_urb(struct urb *urb);
cl349@4113 102 static void xhci_call_completion(struct urb *urb);
cl349@4113 103 static void xhci_drain_ring(void);
cl349@4113 104 static void xhci_transfer_result(struct xhci *xhci, struct urb *urb);
cl349@4113 105 static void xhci_finish_completion(void);
cl349@4113 106
cl349@4113 107 #define MAX_URB_LOOP 2048 /* Maximum number of linked URB's */
cl349@4113 108
cl349@4113 109 static kmem_cache_t *xhci_up_cachep; /* urb_priv cache */
cl349@4113 110 static struct xhci *xhci; /* XHCI structure for the interface */
cl349@4113 111
cl349@4113 112 /******************************************************************************
cl349@4113 113 * DEBUGGING
cl349@4113 114 */
cl349@4113 115
cl349@4113 116 #ifdef DEBUG
cl349@4113 117
cl349@4113 118 static void dump_urb(struct urb *urb)
cl349@4113 119 {
cl349@4113 120 printk(KERN_DEBUG "dumping urb @ %p\n"
cl349@4113 121 " hcpriv = %p\n"
cl349@4113 122 " next = %p\n"
cl349@4113 123 " dev = %p\n"
cl349@4113 124 " pipe = 0x%lx\n"
cl349@4113 125 " status = %d\n"
cl349@4113 126 " transfer_flags = 0x%lx\n"
cl349@4113 127 " transfer_buffer = %p\n"
cl349@4113 128 " transfer_buffer_length = %d\n"
cl349@4113 129 " actual_length = %d\n"
cl349@4113 130 " bandwidth = %d\n"
cl349@4113 131 " setup_packet = %p\n",
cl349@4113 132 urb, urb->hcpriv, urb->next, urb->dev, urb->pipe, urb->status,
cl349@4113 133 urb->transfer_flags, urb->transfer_buffer,
cl349@4113 134 urb->transfer_buffer_length, urb->actual_length, urb->bandwidth,
cl349@4113 135 urb->setup_packet);
cl349@4113 136 if ( urb->setup_packet != NULL )
cl349@4113 137 printk(KERN_DEBUG
cl349@4113 138 "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
cl349@4113 139 urb->setup_packet[0], urb->setup_packet[1],
cl349@4113 140 urb->setup_packet[2], urb->setup_packet[3],
cl349@4113 141 urb->setup_packet[4], urb->setup_packet[5],
cl349@4113 142 urb->setup_packet[6], urb->setup_packet[7]);
cl349@4113 143 printk(KERN_DEBUG "complete = %p\n"
cl349@4113 144 "interval = %d\n", urb->complete, urb->interval);
cl349@4113 145
cl349@4113 146 }
cl349@4113 147
cl349@4113 148 static void xhci_show_resp(usbif_response_t *r)
cl349@4113 149 {
cl349@4113 150 printk(KERN_DEBUG "dumping response @ %p\n"
cl349@4113 151 " id=0x%lx\n"
cl349@4113 152 " op=0x%x\n"
cl349@4113 153 " data=0x%x\n"
cl349@4113 154 " status=0x%x\n"
cl349@4113 155 " length=0x%lx\n",
cl349@4113 156 r->id, r->operation, r->data, r->status, r->length);
cl349@4113 157 }
cl349@4113 158
cl349@4113 159 #define DPRINK(...) printk(KERN_DEBUG __VA_ARGS__)
cl349@4113 160
cl349@4113 161 #else /* DEBUG */
cl349@4113 162
cl349@4113 163 #define dump_urb(blah) ((void)0)
cl349@4113 164 #define xhci_show_resp(blah) ((void)0)
cl349@4113 165 #define DPRINTK(blah,...) ((void)0)
cl349@4113 166
cl349@4113 167 #endif /* DEBUG */
cl349@4113 168
cl349@4113 169 /******************************************************************************
cl349@4113 170 * RING REQUEST HANDLING
cl349@4113 171 */
cl349@4113 172
mwilli2@4374 173 #define RING_PLUGGED(_hc) ( RING_FULL(&_hc->usb_ring) || _hc->recovery )
mwilli2@4374 174
cl349@4113 175 /**
cl349@4113 176 * xhci_construct_isoc - add isochronous information to a request
cl349@4113 177 */
cl349@4113 178 static int xhci_construct_isoc(usbif_request_t *req, struct urb *urb)
cl349@4113 179 {
cl349@4113 180 usbif_iso_t *schedule;
cl349@4113 181 int i;
cl349@4113 182 struct urb_priv *urb_priv = urb->hcpriv;
cl349@4113 183
cl349@4113 184 req->num_iso = urb->number_of_packets;
cl349@4113 185 schedule = (usbif_iso_t *)__get_free_page(GFP_KERNEL);
cl349@4113 186
cl349@4113 187 if ( schedule == NULL )
cl349@4113 188 return -ENOMEM;
cl349@4113 189
cl349@4113 190 for ( i = 0; i < req->num_iso; i++ )
cl349@4113 191 {
cl349@4113 192 schedule[i].buffer_offset = urb->iso_frame_desc[i].offset;
cl349@4113 193 schedule[i].length = urb->iso_frame_desc[i].length;
cl349@4113 194 }
cl349@4113 195
cl349@4113 196 urb_priv->schedule = schedule;
kaf24@6279 197 req->iso_schedule = virt_to_mfn(schedule) << PAGE_SHIFT;
cl349@4113 198
cl349@4113 199 return 0;
cl349@4113 200 }
cl349@4113 201
cl349@4113 202 /**
cl349@4113 203 * xhci_queue_req - construct and queue request for an URB
cl349@4113 204 */
cl349@4113 205 static int xhci_queue_req(struct urb *urb)
cl349@4113 206 {
mwilli2@4117 207 unsigned long flags;
cl349@4113 208 usbif_request_t *req;
cl349@4113 209 usbif_front_ring_t *usb_ring = &xhci->usb_ring;
cl349@4113 210
cl349@4113 211 #if DEBUG
cl349@4113 212 printk(KERN_DEBUG
cl349@4113 213 "usbif = %p, req_prod = %d (@ 0x%lx), resp_prod = %d, resp_cons = %d\n",
kaf24@6279 214 usbif, usbif->req_prod, virt_to_mfn(&usbif->req_prod),
cl349@4113 215 usbif->resp_prod, xhci->usb_resp_cons);
cl349@4113 216 #endif
cl349@4113 217
mwilli2@4117 218 spin_lock_irqsave(&xhci->ring_lock, flags);
cl349@4113 219
mwilli2@4374 220 if ( RING_PLUGGED(xhci) )
cl349@4113 221 {
cl349@4113 222 printk(KERN_WARNING
mwilli2@4374 223 "xhci_queue_req(): USB ring plugged, not queuing request\n");
mwilli2@4117 224 spin_unlock_irqrestore(&xhci->ring_lock, flags);
cl349@4113 225 return -ENOBUFS;
cl349@4113 226 }
cl349@4113 227
cl349@4113 228 /* Stick something in the shared communications ring. */
cl349@4113 229 req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
cl349@4113 230
cl349@4113 231 req->operation = USBIF_OP_IO;
cl349@4113 232 req->port = 0; /* We don't care what the port is. */
cl349@4113 233 req->id = (unsigned long) urb->hcpriv;
kaf24@6279 234 req->transfer_buffer = virt_to_mfn(urb->transfer_buffer) << PAGE_SHIFT;
cl349@4113 235 req->devnum = usb_pipedevice(urb->pipe);
cl349@4113 236 req->direction = usb_pipein(urb->pipe);
cl349@4113 237 req->speed = usb_pipeslow(urb->pipe);
cl349@4113 238 req->pipe_type = usb_pipetype(urb->pipe);
cl349@4113 239 req->length = urb->transfer_buffer_length;
cl349@4113 240 req->transfer_flags = urb->transfer_flags;
cl349@4113 241 req->endpoint = usb_pipeendpoint(urb->pipe);
cl349@4113 242 req->speed = usb_pipeslow(urb->pipe);
cl349@4113 243 req->timeout = urb->timeout * (1000 / HZ);
cl349@4113 244
cl349@4113 245 if ( usb_pipetype(urb->pipe) == 0 ) /* ISO */
cl349@4113 246 {
cl349@4113 247 int ret = xhci_construct_isoc(req, urb);
cl349@4113 248 if ( ret != 0 )
cl349@4113 249 return ret;
cl349@4113 250 }
cl349@4113 251
cl349@4113 252 if(urb->setup_packet != NULL)
cl349@4113 253 memcpy(req->setup, urb->setup_packet, 8);
cl349@4113 254 else
cl349@4113 255 memset(req->setup, 0, 8);
cl349@4113 256
cl349@4113 257 usb_ring->req_prod_pvt++;
cl349@4113 258 RING_PUSH_REQUESTS(usb_ring);
cl349@4113 259
mwilli2@4117 260 spin_unlock_irqrestore(&xhci->ring_lock, flags);
mwilli2@4117 261
cl349@4113 262 notify_via_evtchn(xhci->evtchn);
cl349@4113 263
cl349@4113 264 DPRINTK("Queued request for an URB.\n");
cl349@4113 265 dump_urb(urb);
cl349@4113 266
cl349@4113 267 return -EINPROGRESS;
cl349@4113 268 }
cl349@4113 269
cl349@4113 270 /**
cl349@4113 271 * xhci_queue_probe - queue a probe request for a particular port
cl349@4113 272 */
cl349@4113 273 static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port)
cl349@4113 274 {
cl349@4113 275 usbif_request_t *req;
cl349@4113 276 usbif_front_ring_t *usb_ring = &xhci->usb_ring;
cl349@4113 277
cl349@4113 278 #if DEBUG
cl349@4113 279 printk(KERN_DEBUG
cl349@4113 280 "queuing probe: req_prod = %d (@ 0x%lx), resp_prod = %d, "
cl349@4113 281 "resp_cons = %d\n", usbif->req_prod,
kaf24@6279 282 virt_to_mfn(&usbif->req_prod),
cl349@4113 283 usbif->resp_prod, xhci->usb_resp_cons);
cl349@4113 284 #endif
mwilli2@4117 285
mwilli2@4117 286 /* This is always called from the timer interrupt. */
mwilli2@4117 287 spin_lock(&xhci->ring_lock);
mwilli2@4117 288
mwilli2@4374 289 if ( RING_PLUGGED(xhci) )
cl349@4113 290 {
cl349@4113 291 printk(KERN_WARNING
cl349@4113 292 "xhci_queue_probe(): ring full, not queuing request\n");
mwilli2@4117 293 spin_unlock(&xhci->ring_lock);
cl349@4113 294 return NULL;
cl349@4113 295 }
cl349@4113 296
cl349@4113 297 /* Stick something in the shared communications ring. */
cl349@4113 298 req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
cl349@4113 299
mwilli2@4374 300 memset(req, 0, sizeof(*req));
cl349@4113 301
cl349@4113 302 req->operation = USBIF_OP_PROBE;
cl349@4113 303 req->port = port;
cl349@4113 304
cl349@4113 305 usb_ring->req_prod_pvt++;
cl349@4113 306 RING_PUSH_REQUESTS(usb_ring);
cl349@4113 307
mwilli2@4117 308 spin_unlock(&xhci->ring_lock);
mwilli2@4117 309
cl349@4113 310 notify_via_evtchn(xhci->evtchn);
cl349@4113 311
cl349@4113 312 return req;
cl349@4113 313 }
cl349@4113 314
cl349@4113 315 /**
cl349@4113 316 * xhci_port_reset - queue a reset request for a particular port
cl349@4113 317 */
cl349@4113 318 static int xhci_port_reset(usbif_vdev_t port)
cl349@4113 319 {
cl349@4113 320 usbif_request_t *req;
cl349@4113 321 usbif_front_ring_t *usb_ring = &xhci->usb_ring;
cl349@4113 322
mwilli2@4117 323 /* Only ever happens from process context (hub thread). */
mwilli2@4117 324 spin_lock_irq(&xhci->ring_lock);
mwilli2@4117 325
mwilli2@4374 326 if ( RING_PLUGGED(xhci) )
mwilli2@4117 327 {
mwilli2@4117 328 printk(KERN_WARNING
mwilli2@4374 329 "xhci_port_reset(): ring plugged, not queuing request\n");
mwilli2@4117 330 spin_unlock_irq(&xhci->ring_lock);
mwilli2@4117 331 return -ENOBUFS;
mwilli2@4117 332 }
mwilli2@4117 333
cl349@4113 334 /* We only reset one port at a time, so we only need one variable per
cl349@4113 335 * hub. */
cl349@4113 336 xhci->awaiting_reset = 1;
cl349@4113 337
cl349@4113 338 /* Stick something in the shared communications ring. */
cl349@4113 339 req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
cl349@4113 340
mwilli2@4374 341 memset(req, 0, sizeof(*req));
cl349@4113 342
cl349@4113 343 req->operation = USBIF_OP_RESET;
cl349@4113 344 req->port = port;
cl349@4113 345
cl349@4113 346 usb_ring->req_prod_pvt++;
cl349@4113 347 RING_PUSH_REQUESTS(usb_ring);
cl349@4113 348
mwilli2@4117 349 spin_unlock_irq(&xhci->ring_lock);
mwilli2@4117 350
cl349@4113 351 notify_via_evtchn(xhci->evtchn);
cl349@4113 352
cl349@4113 353 while ( xhci->awaiting_reset > 0 )
cl349@4113 354 {
cl349@4113 355 mdelay(1);
cl349@4113 356 xhci_drain_ring();
cl349@4113 357 }
cl349@4113 358
cl349@4113 359 xhci->rh.ports[port].pe = 1;
cl349@4113 360 xhci->rh.ports[port].pe_chg = 1;
cl349@4113 361
cl349@4113 362 return xhci->awaiting_reset;
cl349@4113 363 }
cl349@4113 364
cl349@4113 365
cl349@4113 366 /******************************************************************************
cl349@4113 367 * RING RESPONSE HANDLING
cl349@4113 368 */
cl349@4113 369
cl349@4113 370 static void receive_usb_reset(usbif_response_t *resp)
cl349@4113 371 {
cl349@4113 372 xhci->awaiting_reset = resp->status;
cl349@4113 373 rmb();
cl349@4113 374
cl349@4113 375 }
cl349@4113 376
cl349@4113 377 static void receive_usb_probe(usbif_response_t *resp)
cl349@4113 378 {
cl349@4113 379 spin_lock(&xhci->rh.port_state_lock);
cl349@4113 380
mwilli2@4374 381 if ( resp->status >= 0 )
cl349@4113 382 {
cl349@4113 383 if ( resp->status == 1 )
cl349@4113 384 {
cl349@4113 385 /* If theres a device there and there wasn't one before there must
cl349@4113 386 * have been a connection status change. */
cl349@4113 387 if( xhci->rh.ports[resp->data].cs == 0 )
cl349@4113 388 {
cl349@4113 389 xhci->rh.ports[resp->data].cs = 1;
cl349@4113 390 xhci->rh.ports[resp->data].cs_chg = 1;
cl349@4113 391 }
cl349@4113 392 }
mwilli2@4374 393 else if ( resp->status == 0 )
mwilli2@4374 394 {
mwilli2@4374 395 if(xhci->rh.ports[resp->data].cs == 1 )
mwilli2@4374 396 {
mwilli2@4374 397 xhci->rh.ports[resp->data].cs = 0;
mwilli2@4374 398 xhci->rh.ports[resp->data].cs_chg = 1;
mwilli2@4374 399 xhci->rh.ports[resp->data].pe = 0;
mwilli2@4374 400 /* According to USB Spec v2.0, 11.24.2.7.2.2, we don't need
mwilli2@4374 401 * to set pe_chg since an error has not occurred. */
mwilli2@4374 402 }
mwilli2@4374 403 }
cl349@4113 404 else
cl349@4113 405 printk(KERN_WARNING "receive_usb_probe(): unexpected status %d "
cl349@4113 406 "for port %d\n", resp->status, resp->data);
cl349@4113 407 }
cl349@4113 408 else if ( resp->status < 0)
cl349@4113 409 printk(KERN_WARNING "receive_usb_probe(): got error status %d\n",
cl349@4113 410 resp->status);
cl349@4113 411
cl349@4113 412 spin_unlock(&xhci->rh.port_state_lock);
cl349@4113 413 }
cl349@4113 414
cl349@4113 415 static void receive_usb_io(usbif_response_t *resp)
cl349@4113 416 {
cl349@4113 417 struct urb_priv *urbp = (struct urb_priv *)resp->id;
cl349@4113 418 struct urb *urb = urbp->urb;
cl349@4113 419
cl349@4113 420 urb->actual_length = resp->length;
cl349@4113 421 urbp->in_progress = 0;
cl349@4113 422
cl349@4113 423 if( usb_pipetype(urb->pipe) == 0 ) /* ISO */
cl349@4113 424 {
cl349@4113 425 int i;
cl349@4113 426
cl349@4113 427 /* Copy ISO schedule results back in. */
cl349@4113 428 for ( i = 0; i < urb->number_of_packets; i++ )
cl349@4113 429 {
cl349@4113 430 urb->iso_frame_desc[i].status
cl349@4113 431 = urbp->schedule[i].status;
cl349@4113 432 urb->iso_frame_desc[i].actual_length
cl349@4113 433 = urbp->schedule[i].length;
cl349@4113 434 }
cl349@4113 435 free_page((unsigned long)urbp->schedule);
cl349@4113 436 }
cl349@4113 437
cl349@4113 438 /* Only set status if it's not been changed since submission. It might
cl349@4113 439 * have been changed if the URB has been unlinked asynchronously, for
cl349@4113 440 * instance. */
cl349@4113 441 if ( urb->status == -EINPROGRESS )
cl349@4113 442 urbp->status = urb->status = resp->status;
cl349@4113 443 }
cl349@4113 444
cl349@4113 445 /**
cl349@4113 446 * xhci_drain_ring - drain responses from the ring, calling handlers
cl349@4113 447 *
cl349@4113 448 * This may be called from interrupt context when an event is received from the
cl349@4113 449 * backend domain, or sometimes in process context whilst waiting for a port
cl349@4113 450 * reset or URB completion.
cl349@4113 451 */
cl349@4113 452 static void xhci_drain_ring(void)
cl349@4113 453 {
cl349@4113 454 struct list_head *tmp, *head;
cl349@4113 455 usbif_front_ring_t *usb_ring = &xhci->usb_ring;
cl349@4113 456 usbif_response_t *resp;
cl349@4113 457 RING_IDX i, rp;
cl349@4113 458
cl349@4113 459 /* Walk the ring here to get responses, updating URBs to show what
cl349@4113 460 * completed. */
cl349@4113 461
cl349@4113 462 rp = usb_ring->sring->rsp_prod;
cl349@4113 463 rmb(); /* Ensure we see queued requests up to 'rp'. */
cl349@4113 464
cl349@4113 465 /* Take items off the comms ring, taking care not to overflow. */
cl349@4113 466 for ( i = usb_ring->rsp_cons; i != rp; i++ )
cl349@4113 467 {
cl349@4113 468 resp = RING_GET_RESPONSE(usb_ring, i);
cl349@4113 469
cl349@4113 470 /* May need to deal with batching and with putting a ceiling on
cl349@4113 471 the number dispatched for performance and anti-dos reasons */
cl349@4113 472
cl349@4113 473 xhci_show_resp(resp);
cl349@4113 474
cl349@4113 475 switch ( resp->operation )
cl349@4113 476 {
cl349@4113 477 case USBIF_OP_PROBE:
cl349@4113 478 receive_usb_probe(resp);
cl349@4113 479 break;
cl349@4113 480
cl349@4113 481 case USBIF_OP_IO:
cl349@4113 482 receive_usb_io(resp);
cl349@4113 483 break;
cl349@4113 484
cl349@4113 485 case USBIF_OP_RESET:
cl349@4113 486 receive_usb_reset(resp);
cl349@4113 487 break;
cl349@4113 488
cl349@4113 489 default:
cl349@4113 490 printk(KERN_WARNING
cl349@4113 491 "error: unknown USB io operation response [%d]\n",
cl349@4113 492 resp->operation);
cl349@4113 493 break;
cl349@4113 494 }
cl349@4113 495 }
cl349@4113 496
cl349@4113 497 usb_ring->rsp_cons = i;
cl349@4113 498
cl349@4113 499 /* Walk the list of pending URB's to see which ones completed and do
cl349@4113 500 * callbacks, etc. */
cl349@4113 501 spin_lock(&xhci->urb_list_lock);
cl349@4113 502 head = &xhci->urb_list;
cl349@4113 503 tmp = head->next;
cl349@4113 504 while (tmp != head) {
cl349@4113 505 struct urb *urb = list_entry(tmp, struct urb, urb_list);
cl349@4113 506
cl349@4113 507 tmp = tmp->next;
cl349@4113 508
cl349@4113 509 /* Checks the status and does all of the magic necessary */
cl349@4113 510 xhci_transfer_result(xhci, urb);
cl349@4113 511 }
cl349@4113 512 spin_unlock(&xhci->urb_list_lock);
cl349@4113 513
cl349@4113 514 xhci_finish_completion();
cl349@4113 515 }
cl349@4113 516
cl349@4113 517
cl349@4113 518 static void xhci_interrupt(int irq, void *__xhci, struct pt_regs *regs)
cl349@4113 519 {
cl349@4113 520 xhci_drain_ring();
cl349@4113 521 }
cl349@4113 522
cl349@4113 523 /******************************************************************************
cl349@4113 524 * HOST CONTROLLER FUNCTIONALITY
cl349@4113 525 */
cl349@4113 526
cl349@4113 527 /**
cl349@4113 528 * no-op implementation of private device alloc / free routines
cl349@4113 529 */
cl349@4113 530 static int xhci_do_nothing_dev(struct usb_device *dev)
cl349@4113 531 {
cl349@4113 532 return 0;
cl349@4113 533 }
cl349@4113 534
cl349@4113 535 static inline void xhci_add_complete(struct urb *urb)
cl349@4113 536 {
cl349@4113 537 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
cl349@4113 538 unsigned long flags;
cl349@4113 539
cl349@4113 540 spin_lock_irqsave(&xhci->complete_list_lock, flags);
cl349@4113 541 list_add_tail(&urbp->complete_list, &xhci->complete_list);
cl349@4113 542 spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
cl349@4113 543 }
cl349@4113 544
cl349@4113 545 /* When this returns, the owner of the URB may free its
cl349@4113 546 * storage.
cl349@4113 547 *
cl349@4113 548 * We spin and wait for the URB to complete before returning.
cl349@4113 549 *
cl349@4113 550 * Call with urb->lock acquired.
cl349@4113 551 */
cl349@4113 552 static void xhci_delete_urb(struct urb *urb)
cl349@4113 553 {
cl349@4113 554 struct urb_priv *urbp;
cl349@4113 555
cl349@4113 556 urbp = urb->hcpriv;
cl349@4113 557
cl349@4113 558 /* If there's no urb_priv structure for this URB then it can't have
cl349@4113 559 * been submitted at all. */
cl349@4113 560 if ( urbp == NULL )
cl349@4113 561 return;
cl349@4113 562
cl349@4113 563 /* For now we just spin until the URB completes. It shouldn't take too
cl349@4113 564 * long and we don't expect to have to do this very often. */
cl349@4113 565 while ( urb->status == -EINPROGRESS )
cl349@4113 566 {
cl349@4113 567 xhci_drain_ring();
cl349@4113 568 mdelay(1);
cl349@4113 569 }
cl349@4113 570
cl349@4113 571 /* Now we know that further transfers to the buffer won't
cl349@4113 572 * occur, so we can safely return. */
cl349@4113 573 }
cl349@4113 574
cl349@4113 575 static struct urb_priv *xhci_alloc_urb_priv(struct urb *urb)
cl349@4113 576 {
cl349@4113 577 struct urb_priv *urbp;
cl349@4113 578
cl349@4113 579 urbp = kmem_cache_alloc(xhci_up_cachep, SLAB_ATOMIC);
cl349@4113 580 if (!urbp) {
cl349@4113 581 err("xhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
cl349@4113 582 return NULL;
cl349@4113 583 }
cl349@4113 584
cl349@4113 585 memset((void *)urbp, 0, sizeof(*urbp));
cl349@4113 586
cl349@4113 587 urbp->inserttime = jiffies;
cl349@4113 588 urbp->urb = urb;
cl349@4113 589 urbp->dev = urb->dev;
cl349@4113 590
cl349@4113 591 INIT_LIST_HEAD(&urbp->complete_list);
cl349@4113 592
cl349@4113 593 urb->hcpriv = urbp;
cl349@4113 594
cl349@4113 595 return urbp;
cl349@4113 596 }
cl349@4113 597
cl349@4113 598 /*
cl349@4113 599 * MUST be called with urb->lock acquired
cl349@4113 600 */
cl349@4113 601 /* When is this called? Do we need to stop the transfer (as we
cl349@4113 602 * currently do)? */
cl349@4113 603 static void xhci_destroy_urb_priv(struct urb *urb)
cl349@4113 604 {
cl349@4113 605 struct urb_priv *urbp;
cl349@4113 606
cl349@4113 607 urbp = (struct urb_priv *)urb->hcpriv;
cl349@4113 608 if (!urbp)
cl349@4113 609 return;
cl349@4113 610
cl349@4113 611 if (!list_empty(&urb->urb_list))
cl349@4113 612 warn("xhci_destroy_urb_priv: urb %p still on xhci->urb_list", urb);
cl349@4113 613
cl349@4113 614 if (!list_empty(&urbp->complete_list))
cl349@4113 615 warn("xhci_destroy_urb_priv: urb %p still on xhci->complete_list", urb);
cl349@4113 616
cl349@4113 617 kmem_cache_free(xhci_up_cachep, urb->hcpriv);
cl349@4113 618
cl349@4113 619 urb->hcpriv = NULL;
cl349@4113 620 }
cl349@4113 621
cl349@4113 622 /**
cl349@4113 623 * Try to find URBs in progress on the same pipe to the same device.
cl349@4113 624 *
cl349@4113 625 * MUST be called with xhci->urb_list_lock acquired
cl349@4113 626 */
cl349@4113 627 static struct urb *xhci_find_urb_ep(struct xhci *xhci, struct urb *urb)
cl349@4113 628 {
cl349@4113 629 struct list_head *tmp, *head;
cl349@4113 630
cl349@4113 631 /* We don't match Isoc transfers since they are special */
cl349@4113 632 if (usb_pipeisoc(urb->pipe))
cl349@4113 633 return NULL;
cl349@4113 634
cl349@4113 635 head = &xhci->urb_list;
cl349@4113 636 tmp = head->next;
cl349@4113 637 while (tmp != head) {
cl349@4113 638 struct urb *u = list_entry(tmp, struct urb, urb_list);
cl349@4113 639
cl349@4113 640 tmp = tmp->next;
cl349@4113 641
cl349@4113 642 if (u->dev == urb->dev && u->pipe == urb->pipe &&
cl349@4113 643 u->status == -EINPROGRESS)
cl349@4113 644 return u;
cl349@4113 645 }
cl349@4113 646
cl349@4113 647 return NULL;
cl349@4113 648 }
cl349@4113 649
cl349@4113 650 static int xhci_submit_urb(struct urb *urb)
cl349@4113 651 {
cl349@4113 652 int ret = -EINVAL;
cl349@4113 653 unsigned long flags;
cl349@4113 654 struct urb *eurb;
cl349@4113 655 int bustime;
cl349@4113 656
cl349@4113 657 DPRINTK("URB submitted to XHCI driver.\n");
cl349@4113 658 dump_urb(urb);
cl349@4113 659
cl349@4113 660 if (!urb)
cl349@4113 661 return -EINVAL;
cl349@4113 662
cl349@4113 663 if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) {
cl349@4113 664 warn("xhci_submit_urb: urb %p belongs to disconnected device or bus?", urb);
cl349@4113 665 return -ENODEV;
cl349@4113 666 }
cl349@4113 667
cl349@4113 668 if ( urb->dev->devpath == NULL )
cl349@4113 669 BUG();
cl349@4113 670
cl349@4113 671 usb_inc_dev_use(urb->dev);
cl349@4113 672
cl349@4113 673 spin_lock_irqsave(&xhci->urb_list_lock, flags);
cl349@4113 674 spin_lock(&urb->lock);
cl349@4113 675
cl349@4113 676 if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET ||
cl349@4113 677 urb->status == -ECONNABORTED) {
cl349@4113 678 dbg("xhci_submit_urb: urb not available to submit (status = %d)", urb->status);
cl349@4113 679 /* Since we can have problems on the out path */
cl349@4113 680 spin_unlock(&urb->lock);
cl349@4113 681 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 682 usb_dec_dev_use(urb->dev);
cl349@4113 683
cl349@4113 684 return ret;
cl349@4113 685 }
cl349@4113 686
cl349@4113 687 INIT_LIST_HEAD(&urb->urb_list);
cl349@4113 688 if (!xhci_alloc_urb_priv(urb)) {
cl349@4113 689 ret = -ENOMEM;
cl349@4113 690
cl349@4113 691 goto out;
cl349@4113 692 }
cl349@4113 693
cl349@4113 694 ( (struct urb_priv *)urb->hcpriv )->in_progress = 1;
cl349@4113 695
cl349@4113 696 eurb = xhci_find_urb_ep(xhci, urb);
cl349@4113 697 if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
cl349@4113 698 ret = -ENXIO;
cl349@4113 699
cl349@4113 700 goto out;
cl349@4113 701 }
cl349@4113 702
cl349@4113 703 /* Short circuit the virtual root hub */
cl349@4113 704 if (urb->dev == xhci->rh.dev) {
cl349@4113 705 ret = rh_submit_urb(urb);
cl349@4113 706
cl349@4113 707 goto out;
cl349@4113 708 }
cl349@4113 709
cl349@4113 710 switch (usb_pipetype(urb->pipe)) {
cl349@4113 711 case PIPE_CONTROL:
cl349@4113 712 case PIPE_BULK:
cl349@4113 713 ret = xhci_queue_req(urb);
cl349@4113 714 break;
cl349@4113 715
cl349@4113 716 case PIPE_INTERRUPT:
cl349@4113 717 if (urb->bandwidth == 0) { /* not yet checked/allocated */
cl349@4113 718 bustime = usb_check_bandwidth(urb->dev, urb);
cl349@4113 719 if (bustime < 0)
cl349@4113 720 ret = bustime;
cl349@4113 721 else {
cl349@4113 722 ret = xhci_queue_req(urb);
cl349@4113 723 if (ret == -EINPROGRESS)
cl349@4113 724 usb_claim_bandwidth(urb->dev, urb,
cl349@4113 725 bustime, 0);
cl349@4113 726 }
cl349@4113 727 } else /* bandwidth is already set */
cl349@4113 728 ret = xhci_queue_req(urb);
cl349@4113 729 break;
cl349@4113 730
cl349@4113 731 case PIPE_ISOCHRONOUS:
cl349@4113 732 if (urb->bandwidth == 0) { /* not yet checked/allocated */
cl349@4113 733 if (urb->number_of_packets <= 0) {
cl349@4113 734 ret = -EINVAL;
cl349@4113 735 break;
cl349@4113 736 }
cl349@4113 737 bustime = usb_check_bandwidth(urb->dev, urb);
cl349@4113 738 if (bustime < 0) {
cl349@4113 739 ret = bustime;
cl349@4113 740 break;
cl349@4113 741 }
cl349@4113 742
cl349@4113 743 ret = xhci_queue_req(urb);
cl349@4113 744 if (ret == -EINPROGRESS)
cl349@4113 745 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
cl349@4113 746 } else /* bandwidth is already set */
cl349@4113 747 ret = xhci_queue_req(urb);
cl349@4113 748 break;
cl349@4113 749 }
cl349@4113 750 out:
cl349@4113 751 urb->status = ret;
cl349@4113 752
cl349@4113 753 if (ret == -EINPROGRESS) {
cl349@4113 754 /* We use _tail to make find_urb_ep more efficient */
cl349@4113 755 list_add_tail(&urb->urb_list, &xhci->urb_list);
cl349@4113 756
cl349@4113 757 spin_unlock(&urb->lock);
cl349@4113 758 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 759
cl349@4113 760 return 0;
cl349@4113 761 }
cl349@4113 762
cl349@4113 763 xhci_delete_urb(urb);
cl349@4113 764
cl349@4113 765 spin_unlock(&urb->lock);
cl349@4113 766 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 767
cl349@4113 768 /* Only call completion if it was successful */
cl349@4113 769 if (!ret)
cl349@4113 770 xhci_call_completion(urb);
cl349@4113 771
cl349@4113 772 return ret;
cl349@4113 773 }
cl349@4113 774
cl349@4113 775 /*
cl349@4113 776 * Return the result of a transfer
cl349@4113 777 *
cl349@4113 778 * MUST be called with urb_list_lock acquired
cl349@4113 779 */
cl349@4113 780 static void xhci_transfer_result(struct xhci *xhci, struct urb *urb)
cl349@4113 781 {
cl349@4113 782 int ret = 0;
cl349@4113 783 unsigned long flags;
cl349@4113 784 struct urb_priv *urbp;
cl349@4113 785
cl349@4113 786 /* The root hub is special */
cl349@4113 787 if (urb->dev == xhci->rh.dev)
cl349@4113 788 return;
cl349@4113 789
cl349@4113 790 spin_lock_irqsave(&urb->lock, flags);
cl349@4113 791
cl349@4113 792 urbp = (struct urb_priv *)urb->hcpriv;
cl349@4113 793
cl349@4113 794 if ( ( (struct urb_priv *)urb->hcpriv )->in_progress )
cl349@4113 795 ret = -EINPROGRESS;
cl349@4113 796
cl349@4113 797 if (urb->actual_length < urb->transfer_buffer_length) {
cl349@4113 798 if (urb->transfer_flags & USB_DISABLE_SPD) {
cl349@4113 799 ret = -EREMOTEIO;
cl349@4113 800 }
cl349@4113 801 }
cl349@4113 802
cl349@4113 803 if (urb->status == -EPIPE)
cl349@4113 804 {
cl349@4113 805 ret = urb->status;
cl349@4113 806 /* endpoint has stalled - mark it halted */
cl349@4113 807 usb_endpoint_halt(urb->dev, usb_pipeendpoint(urb->pipe),
cl349@4113 808 usb_pipeout(urb->pipe));
cl349@4113 809 }
cl349@4113 810
cl349@4113 811 if ((debug == 1 && ret != 0 && ret != -EPIPE) ||
cl349@4113 812 (ret != 0 && debug > 1)) {
cl349@4113 813 /* Some debugging code */
cl349@4113 814 dbg("xhci_result_interrupt/bulk() failed with status %x",
cl349@4113 815 status);
cl349@4113 816 }
cl349@4113 817
cl349@4113 818 if (ret == -EINPROGRESS)
cl349@4113 819 goto out;
cl349@4113 820
cl349@4113 821 switch (usb_pipetype(urb->pipe)) {
cl349@4113 822 case PIPE_CONTROL:
cl349@4113 823 case PIPE_BULK:
cl349@4113 824 case PIPE_ISOCHRONOUS:
cl349@4113 825 /* Release bandwidth for Interrupt or Isoc. transfers */
cl349@4113 826 /* Spinlock needed ? */
cl349@4113 827 if (urb->bandwidth)
cl349@4113 828 usb_release_bandwidth(urb->dev, urb, 1);
cl349@4113 829 xhci_delete_urb(urb);
cl349@4113 830 break;
cl349@4113 831 case PIPE_INTERRUPT:
cl349@4113 832 /* Interrupts are an exception */
cl349@4113 833 if (urb->interval)
cl349@4113 834 goto out_complete;
cl349@4113 835
cl349@4113 836 /* Release bandwidth for Interrupt or Isoc. transfers */
cl349@4113 837 /* Spinlock needed ? */
cl349@4113 838 if (urb->bandwidth)
cl349@4113 839 usb_release_bandwidth(urb->dev, urb, 0);
cl349@4113 840 xhci_delete_urb(urb);
cl349@4113 841 break;
cl349@4113 842 default:
cl349@4113 843 info("xhci_transfer_result: unknown pipe type %d for urb %p\n",
cl349@4113 844 usb_pipetype(urb->pipe), urb);
cl349@4113 845 }
cl349@4113 846
cl349@4113 847 /* Remove it from xhci->urb_list */
cl349@4113 848 list_del_init(&urb->urb_list);
cl349@4113 849
cl349@4113 850 out_complete:
cl349@4113 851 xhci_add_complete(urb);
cl349@4113 852
cl349@4113 853 out:
cl349@4113 854 spin_unlock_irqrestore(&urb->lock, flags);
cl349@4113 855 }
cl349@4113 856
cl349@4113 857 static int xhci_unlink_urb(struct urb *urb)
cl349@4113 858 {
cl349@4113 859 unsigned long flags;
cl349@4113 860 struct urb_priv *urbp = urb->hcpriv;
cl349@4113 861
cl349@4113 862 if (!urb)
cl349@4113 863 return -EINVAL;
cl349@4113 864
cl349@4113 865 if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
cl349@4113 866 return -ENODEV;
cl349@4113 867
cl349@4113 868 spin_lock_irqsave(&xhci->urb_list_lock, flags);
cl349@4113 869 spin_lock(&urb->lock);
cl349@4113 870
cl349@4113 871 /* Release bandwidth for Interrupt or Isoc. transfers */
cl349@4113 872 /* Spinlock needed ? */
cl349@4113 873 if (urb->bandwidth) {
cl349@4113 874 switch (usb_pipetype(urb->pipe)) {
cl349@4113 875 case PIPE_INTERRUPT:
cl349@4113 876 usb_release_bandwidth(urb->dev, urb, 0);
cl349@4113 877 break;
cl349@4113 878 case PIPE_ISOCHRONOUS:
cl349@4113 879 usb_release_bandwidth(urb->dev, urb, 1);
cl349@4113 880 break;
cl349@4113 881 default:
cl349@4113 882 break;
cl349@4113 883 }
cl349@4113 884 }
cl349@4113 885
cl349@4113 886 if (urb->status != -EINPROGRESS) {
cl349@4113 887 spin_unlock(&urb->lock);
cl349@4113 888 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 889 return 0;
cl349@4113 890 }
cl349@4113 891
cl349@4113 892 list_del_init(&urb->urb_list);
cl349@4113 893
cl349@4113 894 /* Short circuit the virtual root hub */
cl349@4113 895 if (urb->dev == xhci->rh.dev) {
cl349@4113 896 rh_unlink_urb(urb);
cl349@4113 897
cl349@4113 898 spin_unlock(&urb->lock);
cl349@4113 899 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 900
cl349@4113 901 xhci_call_completion(urb);
cl349@4113 902 } else {
cl349@4113 903 if (urb->transfer_flags & USB_ASYNC_UNLINK) {
cl349@4113 904 /* We currently don't currently attempt to cancel URBs
cl349@4113 905 * that have been queued in the ring. We handle async
cl349@4113 906 * unlinked URBs when they complete. */
cl349@4113 907 urbp->status = urb->status = -ECONNABORTED;
cl349@4113 908 spin_unlock(&urb->lock);
cl349@4113 909 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 910 } else {
cl349@4113 911 urb->status = -ENOENT;
cl349@4113 912
cl349@4113 913 spin_unlock(&urb->lock);
cl349@4113 914 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 915
cl349@4113 916 if (in_interrupt()) { /* wait at least 1 frame */
cl349@4113 917 static int errorcount = 10;
cl349@4113 918
cl349@4113 919 if (errorcount--)
cl349@4113 920 dbg("xhci_unlink_urb called from interrupt for urb %p", urb);
cl349@4113 921 udelay(1000);
cl349@4113 922 } else
cl349@4113 923 schedule_timeout(1+1*HZ/1000);
cl349@4113 924
cl349@4113 925 xhci_delete_urb(urb);
cl349@4113 926
cl349@4113 927 xhci_call_completion(urb);
cl349@4113 928 }
cl349@4113 929 }
cl349@4113 930
cl349@4113 931 return 0;
cl349@4113 932 }
cl349@4113 933
cl349@4113 934 static void xhci_call_completion(struct urb *urb)
cl349@4113 935 {
cl349@4113 936 struct urb_priv *urbp;
cl349@4113 937 struct usb_device *dev = urb->dev;
cl349@4113 938 int is_ring = 0, killed, resubmit_interrupt, status;
cl349@4113 939 struct urb *nurb;
cl349@4113 940 unsigned long flags;
cl349@4113 941
cl349@4113 942 spin_lock_irqsave(&urb->lock, flags);
cl349@4113 943
cl349@4113 944 urbp = (struct urb_priv *)urb->hcpriv;
cl349@4113 945 if (!urbp || !urb->dev) {
cl349@4113 946 spin_unlock_irqrestore(&urb->lock, flags);
cl349@4113 947 return;
cl349@4113 948 }
cl349@4113 949
cl349@4113 950 killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
cl349@4113 951 urb->status == -ECONNRESET);
cl349@4113 952 resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
cl349@4113 953 urb->interval);
cl349@4113 954
cl349@4113 955 nurb = urb->next;
cl349@4113 956 if (nurb && !killed) {
cl349@4113 957 int count = 0;
cl349@4113 958
cl349@4113 959 while (nurb && nurb != urb && count < MAX_URB_LOOP) {
cl349@4113 960 if (nurb->status == -ENOENT ||
cl349@4113 961 nurb->status == -ECONNABORTED ||
cl349@4113 962 nurb->status == -ECONNRESET) {
cl349@4113 963 killed = 1;
cl349@4113 964 break;
cl349@4113 965 }
cl349@4113 966
cl349@4113 967 nurb = nurb->next;
cl349@4113 968 count++;
cl349@4113 969 }
cl349@4113 970
cl349@4113 971 if (count == MAX_URB_LOOP)
cl349@4113 972 err("xhci_call_completion: too many linked URB's, loop? (first loop)");
cl349@4113 973
cl349@4113 974 /* Check to see if chain is a ring */
cl349@4113 975 is_ring = (nurb == urb);
cl349@4113 976 }
cl349@4113 977
cl349@4113 978 status = urbp->status;
cl349@4113 979 if (!resubmit_interrupt || killed)
cl349@4113 980 /* We don't need urb_priv anymore */
cl349@4113 981 xhci_destroy_urb_priv(urb);
cl349@4113 982
cl349@4113 983 if (!killed)
cl349@4113 984 urb->status = status;
cl349@4113 985
cl349@4113 986 spin_unlock_irqrestore(&urb->lock, flags);
cl349@4113 987
cl349@4113 988 if (urb->complete)
cl349@4113 989 urb->complete(urb);
cl349@4113 990
cl349@4113 991 if (resubmit_interrupt)
cl349@4113 992 /* Recheck the status. The completion handler may have */
cl349@4113 993 /* unlinked the resubmitting interrupt URB */
cl349@4113 994 killed = (urb->status == -ENOENT ||
cl349@4113 995 urb->status == -ECONNABORTED ||
cl349@4113 996 urb->status == -ECONNRESET);
cl349@4113 997
cl349@4113 998 if (resubmit_interrupt && !killed) {
cl349@4113 999 if ( urb->dev != xhci->rh.dev )
cl349@4113 1000 xhci_queue_req(urb); /* XXX What if this fails? */
cl349@4113 1001 /* Don't need to resubmit URBs for the virtual root dev. */
cl349@4113 1002 } else {
cl349@4113 1003 if (is_ring && !killed) {
cl349@4113 1004 urb->dev = dev;
cl349@4113 1005 xhci_submit_urb(urb);
cl349@4113 1006 } else {
cl349@4113 1007 /* We decrement the usage count after we're done */
cl349@4113 1008 /* with everything */
cl349@4113 1009 usb_dec_dev_use(dev);
cl349@4113 1010 }
cl349@4113 1011 }
cl349@4113 1012 }
cl349@4113 1013
cl349@4113 1014 static void xhci_finish_completion(void)
cl349@4113 1015 {
cl349@4113 1016 struct list_head *tmp, *head;
cl349@4113 1017 unsigned long flags;
cl349@4113 1018
cl349@4113 1019 spin_lock_irqsave(&xhci->complete_list_lock, flags);
cl349@4113 1020 head = &xhci->complete_list;
cl349@4113 1021 tmp = head->next;
cl349@4113 1022 while (tmp != head) {
cl349@4113 1023 struct urb_priv *urbp = list_entry(tmp, struct urb_priv,
cl349@4113 1024 complete_list);
cl349@4113 1025 struct urb *urb = urbp->urb;
cl349@4113 1026
cl349@4113 1027 list_del_init(&urbp->complete_list);
cl349@4113 1028 spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
cl349@4113 1029
cl349@4113 1030 xhci_call_completion(urb);
cl349@4113 1031
cl349@4113 1032 spin_lock_irqsave(&xhci->complete_list_lock, flags);
cl349@4113 1033 head = &xhci->complete_list;
cl349@4113 1034 tmp = head->next;
cl349@4113 1035 }
cl349@4113 1036 spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
cl349@4113 1037 }
cl349@4113 1038
cl349@4113 1039 static struct usb_operations xhci_device_operations = {
cl349@4113 1040 .allocate = xhci_do_nothing_dev,
cl349@4113 1041 .deallocate = xhci_do_nothing_dev,
cl349@4113 1042 /* It doesn't look like any drivers actually care what the frame number
cl349@4113 1043 * is at the moment! If necessary, we could approximate the current
cl349@4113 1044 * frame nubmer by passing it from the backend in response messages. */
cl349@4113 1045 .get_frame_number = NULL,
cl349@4113 1046 .submit_urb = xhci_submit_urb,
cl349@4113 1047 .unlink_urb = xhci_unlink_urb
cl349@4113 1048 };
cl349@4113 1049
cl349@4113 1050 /******************************************************************************
cl349@4113 1051 * VIRTUAL ROOT HUB EMULATION
cl349@4113 1052 */
cl349@4113 1053
cl349@4113 1054 static __u8 root_hub_dev_des[] =
cl349@4113 1055 {
cl349@4113 1056 0x12, /* __u8 bLength; */
cl349@4113 1057 0x01, /* __u8 bDescriptorType; Device */
cl349@4113 1058 0x00, /* __u16 bcdUSB; v1.0 */
cl349@4113 1059 0x01,
cl349@4113 1060 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
cl349@4113 1061 0x00, /* __u8 bDeviceSubClass; */
cl349@4113 1062 0x00, /* __u8 bDeviceProtocol; */
cl349@4113 1063 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */
cl349@4113 1064 0x00, /* __u16 idVendor; */
cl349@4113 1065 0x00,
cl349@4113 1066 0x00, /* __u16 idProduct; */
cl349@4113 1067 0x00,
cl349@4113 1068 0x00, /* __u16 bcdDevice; */
cl349@4113 1069 0x00,
cl349@4113 1070 0x00, /* __u8 iManufacturer; */
cl349@4113 1071 0x02, /* __u8 iProduct; */
cl349@4113 1072 0x01, /* __u8 iSerialNumber; */
cl349@4113 1073 0x01 /* __u8 bNumConfigurations; */
cl349@4113 1074 };
cl349@4113 1075
cl349@4113 1076
cl349@4113 1077 /* Configuration descriptor */
cl349@4113 1078 static __u8 root_hub_config_des[] =
cl349@4113 1079 {
cl349@4113 1080 0x09, /* __u8 bLength; */
cl349@4113 1081 0x02, /* __u8 bDescriptorType; Configuration */
cl349@4113 1082 0x19, /* __u16 wTotalLength; */
cl349@4113 1083 0x00,
cl349@4113 1084 0x01, /* __u8 bNumInterfaces; */
cl349@4113 1085 0x01, /* __u8 bConfigurationValue; */
cl349@4113 1086 0x00, /* __u8 iConfiguration; */
cl349@4113 1087 0x40, /* __u8 bmAttributes;
cl349@4113 1088 Bit 7: Bus-powered, 6: Self-powered,
cl349@4113 1089 Bit 5 Remote-wakeup, 4..0: resvd */
cl349@4113 1090 0x00, /* __u8 MaxPower; */
cl349@4113 1091
cl349@4113 1092 /* interface */
cl349@4113 1093 0x09, /* __u8 if_bLength; */
cl349@4113 1094 0x04, /* __u8 if_bDescriptorType; Interface */
cl349@4113 1095 0x00, /* __u8 if_bInterfaceNumber; */
cl349@4113 1096 0x00, /* __u8 if_bAlternateSetting; */
cl349@4113 1097 0x01, /* __u8 if_bNumEndpoints; */
cl349@4113 1098 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
cl349@4113 1099 0x00, /* __u8 if_bInterfaceSubClass; */
cl349@4113 1100 0x00, /* __u8 if_bInterfaceProtocol; */
cl349@4113 1101 0x00, /* __u8 if_iInterface; */
cl349@4113 1102
cl349@4113 1103 /* endpoint */
cl349@4113 1104 0x07, /* __u8 ep_bLength; */
cl349@4113 1105 0x05, /* __u8 ep_bDescriptorType; Endpoint */
cl349@4113 1106 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
cl349@4113 1107 0x03, /* __u8 ep_bmAttributes; Interrupt */
cl349@4113 1108 0x08, /* __u16 ep_wMaxPacketSize; 8 Bytes */
cl349@4113 1109 0x00,
cl349@4113 1110 0xff /* __u8 ep_bInterval; 255 ms */
cl349@4113 1111 };
cl349@4113 1112
cl349@4113 1113 static __u8 root_hub_hub_des[] =
cl349@4113 1114 {
cl349@4113 1115 0x09, /* __u8 bLength; */
cl349@4113 1116 0x29, /* __u8 bDescriptorType; Hub-descriptor */
cl349@4113 1117 0x02, /* __u8 bNbrPorts; */
cl349@4113 1118 0x00, /* __u16 wHubCharacteristics; */
cl349@4113 1119 0x00,
cl349@4113 1120 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
cl349@4113 1121 0x00, /* __u8 bHubContrCurrent; 0 mA */
cl349@4113 1122 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
cl349@4113 1123 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
cl349@4113 1124 };
cl349@4113 1125
cl349@4113 1126 /* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
cl349@4113 1127 static int rh_send_irq(struct urb *urb)
cl349@4113 1128 {
cl349@4113 1129 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
cl349@4113 1130 xhci_port_t *ports = xhci->rh.ports;
cl349@4113 1131 unsigned long flags;
cl349@4113 1132 int i, len = 1;
cl349@4113 1133 __u16 data = 0;
cl349@4113 1134
cl349@4113 1135 spin_lock_irqsave(&urb->lock, flags);
cl349@4113 1136 for (i = 0; i < xhci->rh.numports; i++) {
cl349@4113 1137 /* Set a bit if anything at all has changed on the port, as per
cl349@4113 1138 * USB spec 11.12 */
cl349@4113 1139 data |= (ports[i].cs_chg || ports[i].pe_chg )
cl349@4113 1140 ? (1 << (i + 1))
cl349@4113 1141 : 0;
cl349@4113 1142
cl349@4113 1143 len = (i + 1) / 8 + 1;
cl349@4113 1144 }
cl349@4113 1145
cl349@4113 1146 *(__u16 *) urb->transfer_buffer = cpu_to_le16(data);
cl349@4113 1147 urb->actual_length = len;
cl349@4113 1148 urbp->status = 0;
cl349@4113 1149
cl349@4113 1150 spin_unlock_irqrestore(&urb->lock, flags);
cl349@4113 1151
cl349@4113 1152 if ((data > 0) && (xhci->rh.send != 0)) {
cl349@4113 1153 dbg("root-hub INT complete: data: %x", data);
cl349@4113 1154 xhci_call_completion(urb);
cl349@4113 1155 }
cl349@4113 1156
cl349@4113 1157 return 0;
cl349@4113 1158 }
cl349@4113 1159
cl349@4113 1160 /* Virtual Root Hub INTs are polled by this timer every "interval" ms */
cl349@4113 1161 static int rh_init_int_timer(struct urb *urb);
cl349@4113 1162
cl349@4113 1163 static void rh_int_timer_do(unsigned long ptr)
cl349@4113 1164 {
cl349@4113 1165 struct urb *urb = (struct urb *)ptr;
cl349@4113 1166 struct list_head list, *tmp, *head;
cl349@4113 1167 unsigned long flags;
cl349@4113 1168 int i;
cl349@4113 1169
cl349@4113 1170 for ( i = 0; i < xhci->rh.numports; i++)
cl349@4113 1171 xhci_queue_probe(i);
cl349@4113 1172
cl349@4113 1173 if (xhci->rh.send)
cl349@4113 1174 rh_send_irq(urb);
cl349@4113 1175
cl349@4113 1176 INIT_LIST_HEAD(&list);
cl349@4113 1177
cl349@4113 1178 spin_lock_irqsave(&xhci->urb_list_lock, flags);
cl349@4113 1179 head = &xhci->urb_list;
cl349@4113 1180 tmp = head->next;
cl349@4113 1181 while (tmp != head) {
cl349@4113 1182 struct urb *u = list_entry(tmp, struct urb, urb_list);
cl349@4113 1183 struct urb_priv *up = (struct urb_priv *)u->hcpriv;
cl349@4113 1184
cl349@4113 1185 tmp = tmp->next;
cl349@4113 1186
cl349@4113 1187 spin_lock(&u->lock);
cl349@4113 1188
cl349@4113 1189 /* Check if the URB timed out */
cl349@4113 1190 if (u->timeout && time_after_eq(jiffies,
cl349@4113 1191 up->inserttime + u->timeout)) {
cl349@4113 1192 list_del(&u->urb_list);
cl349@4113 1193 list_add_tail(&u->urb_list, &list);
cl349@4113 1194 }
cl349@4113 1195
cl349@4113 1196 spin_unlock(&u->lock);
cl349@4113 1197 }
cl349@4113 1198 spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
cl349@4113 1199
cl349@4113 1200 head = &list;
cl349@4113 1201 tmp = head->next;
cl349@4113 1202 while (tmp != head) {
cl349@4113 1203 struct urb *u = list_entry(tmp, struct urb, urb_list);
cl349@4113 1204
cl349@4113 1205 tmp = tmp->next;
cl349@4113 1206
cl349@4113 1207 u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
cl349@4113 1208 xhci_unlink_urb(u);
cl349@4113 1209 }
cl349@4113 1210
cl349@4113 1211 rh_init_int_timer(urb);
cl349@4113 1212 }
cl349@4113 1213
cl349@4113 1214 /* Root Hub INTs are polled by this timer */
cl349@4113 1215 static int rh_init_int_timer(struct urb *urb)
cl349@4113 1216 {
cl349@4113 1217 xhci->rh.interval = urb->interval;
cl349@4113 1218 init_timer(&xhci->rh.rh_int_timer);
cl349@4113 1219 xhci->rh.rh_int_timer.function = rh_int_timer_do;
cl349@4113 1220 xhci->rh.rh_int_timer.data = (unsigned long)urb;
cl349@4113 1221 xhci->rh.rh_int_timer.expires = jiffies
cl349@4113 1222 + (HZ * (urb->interval < 30 ? 30 : urb->interval)) / 1000;
cl349@4113 1223 add_timer(&xhci->rh.rh_int_timer);
cl349@4113 1224
cl349@4113 1225 return 0;
cl349@4113 1226 }
cl349@4113 1227
cl349@4113 1228 #define OK(x) len = (x); break
cl349@4113 1229
cl349@4113 1230 /* Root Hub Control Pipe */
cl349@4113 1231 static int rh_submit_urb(struct urb *urb)
cl349@4113 1232 {
cl349@4113 1233 unsigned int pipe = urb->pipe;
cl349@4113 1234 struct usb_ctrlrequest *cmd =
cl349@4113 1235 (struct usb_ctrlrequest *)urb->setup_packet;
cl349@4113 1236 void *data = urb->transfer_buffer;
cl349@4113 1237 int leni = urb->transfer_buffer_length;
cl349@4113 1238 int len = 0;
cl349@4113 1239 xhci_port_t *status;
cl349@4113 1240 int stat = 0;
cl349@4113 1241 int i;
cl349@4113 1242 int retstatus;
cl349@4113 1243 unsigned long flags;
cl349@4113 1244
cl349@4113 1245 __u16 cstatus;
cl349@4113 1246 __u16 bmRType_bReq;
cl349@4113 1247 __u16 wValue;
cl349@4113 1248 __u16 wIndex;
cl349@4113 1249 __u16 wLength;
cl349@4113 1250
cl349@4113 1251 if (usb_pipetype(pipe) == PIPE_INTERRUPT) {
cl349@4113 1252 xhci->rh.urb = urb;
cl349@4113 1253 xhci->rh.send = 1;
cl349@4113 1254 xhci->rh.interval = urb->interval;
cl349@4113 1255 rh_init_int_timer(urb);
cl349@4113 1256
cl349@4113 1257 return -EINPROGRESS;
cl349@4113 1258 }
cl349@4113 1259
cl349@4113 1260 bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
cl349@4113 1261 wValue = le16_to_cpu(cmd->wValue);
cl349@4113 1262 wIndex = le16_to_cpu(cmd->wIndex);
cl349@4113 1263 wLength = le16_to_cpu(cmd->wLength);
cl349@4113 1264
cl349@4113 1265 for (i = 0; i < 8; i++)
cl349@4113 1266 xhci->rh.c_p_r[i] = 0;
cl349@4113 1267
cl349@4113 1268 status = &xhci->rh.ports[wIndex - 1];
cl349@4113 1269
cl349@4113 1270 spin_lock_irqsave(&xhci->rh.port_state_lock, flags);
cl349@4113 1271
cl349@4113 1272 switch (bmRType_bReq) {
cl349@4113 1273 /* Request Destination:
cl349@4113 1274 without flags: Device,
cl349@4113 1275 RH_INTERFACE: interface,
cl349@4113 1276 RH_ENDPOINT: endpoint,
cl349@4113 1277 RH_CLASS means HUB here,
cl349@4113 1278 RH_OTHER | RH_CLASS almost ever means HUB_PORT here
cl349@4113 1279 */
cl349@4113 1280
cl349@4113 1281 case RH_GET_STATUS:
cl349@4113 1282 *(__u16 *)data = cpu_to_le16(1);
cl349@4113 1283 OK(2);
cl349@4113 1284 case RH_GET_STATUS | RH_INTERFACE:
cl349@4113 1285 *(__u16 *)data = cpu_to_le16(0);
cl349@4113 1286 OK(2);
cl349@4113 1287 case RH_GET_STATUS | RH_ENDPOINT:
cl349@4113 1288 *(__u16 *)data = cpu_to_le16(0);
cl349@4113 1289 OK(2);
cl349@4113 1290 case RH_GET_STATUS | RH_CLASS:
cl349@4113 1291 *(__u32 *)data = cpu_to_le32(0);
cl349@4113 1292 OK(4); /* hub power */
cl349@4113 1293 case RH_GET_STATUS | RH_OTHER | RH_CLASS:
cl349@4113 1294 cstatus = (status->cs_chg) |
cl349@4113 1295 (status->pe_chg << 1) |
cl349@4113 1296 (xhci->rh.c_p_r[wIndex - 1] << 4);
mwilli2@4374 1297 retstatus = (status->cs) |
cl349@4113 1298 (status->pe << 1) |
cl349@4113 1299 (status->susp << 2) |
cl349@4113 1300 (1 << 8) | /* power on */
cl349@4113 1301 (status->lsda << 9);
cl349@4113 1302 *(__u16 *)data = cpu_to_le16(retstatus);
cl349@4113 1303 *(__u16 *)(data + 2) = cpu_to_le16(cstatus);
cl349@4113 1304 OK(4);
cl349@4113 1305 case RH_CLEAR_FEATURE | RH_ENDPOINT:
cl349@4113 1306 switch (wValue) {
cl349@4113 1307 case RH_ENDPOINT_STALL:
cl349@4113 1308 OK(0);
cl349@4113 1309 }
cl349@4113 1310 break;
cl349@4113 1311 case RH_CLEAR_FEATURE | RH_CLASS:
cl349@4113 1312 switch (wValue) {
cl349@4113 1313 case RH_C_HUB_OVER_CURRENT:
cl349@4113 1314 OK(0); /* hub power over current */
cl349@4113 1315 }
cl349@4113 1316 break;
cl349@4113 1317 case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
cl349@4113 1318 switch (wValue) {
cl349@4113 1319 case RH_PORT_ENABLE:
cl349@4113 1320 status->pe = 0;
cl349@4113 1321 OK(0);
cl349@4113 1322 case RH_PORT_SUSPEND:
cl349@4113 1323 status->susp = 0;
cl349@4113 1324 OK(0);
cl349@4113 1325 case RH_PORT_POWER:
cl349@4113 1326 OK(0); /* port power */
cl349@4113 1327 case RH_C_PORT_CONNECTION:
cl349@4113 1328 status->cs_chg = 0;
cl349@4113 1329 OK(0);
cl349@4113 1330 case RH_C_PORT_ENABLE:
cl349@4113 1331 status->pe_chg = 0;
cl349@4113 1332 OK(0);
cl349@4113 1333 case RH_C_PORT_SUSPEND:
cl349@4113 1334 /*** WR_RH_PORTSTAT(RH_PS_PSSC); */
cl349@4113 1335 OK(0);
cl349@4113 1336 case RH_C_PORT_OVER_CURRENT:
cl349@4113 1337 OK(0); /* port power over current */
cl349@4113 1338 case RH_C_PORT_RESET:
cl349@4113 1339 xhci->rh.c_p_r[wIndex - 1] = 0;
cl349@4113 1340 OK(0);
cl349@4113 1341 }
cl349@4113 1342 break;
cl349@4113 1343 case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
cl349@4113 1344 switch (wValue) {
cl349@4113 1345 case RH_PORT_SUSPEND:
cl349@4113 1346 status->susp = 1;
cl349@4113 1347 OK(0);
cl349@4113 1348 case RH_PORT_RESET:
cl349@4113 1349 {
cl349@4113 1350 int ret;
cl349@4113 1351 xhci->rh.c_p_r[wIndex - 1] = 1;
cl349@4113 1352 status->pr = 0;
cl349@4113 1353 status->pe = 1;
cl349@4113 1354 ret = xhci_port_reset(wIndex - 1);
cl349@4113 1355 /* XXX MAW: should probably cancel queued transfers during reset... *\/ */
cl349@4113 1356 if ( ret == 0 ) { OK(0); }
cl349@4113 1357 else { return ret; }
cl349@4113 1358 }
cl349@4113 1359 break;
cl349@4113 1360 case RH_PORT_POWER:
cl349@4113 1361 OK(0); /* port power ** */
cl349@4113 1362 case RH_PORT_ENABLE:
cl349@4113 1363 status->pe = 1;
cl349@4113 1364 OK(0);
cl349@4113 1365 }
cl349@4113 1366 break;
cl349@4113 1367 case RH_SET_ADDRESS:
cl349@4113 1368 xhci->rh.devnum = wValue;
cl349@4113 1369 OK(0);
cl349@4113 1370 case RH_GET_DESCRIPTOR:
cl349@4113 1371 switch ((wValue & 0xff00) >> 8) {
cl349@4113 1372 case 0x01: /* device descriptor */
cl349@4113 1373 len = min_t(unsigned int, leni,
cl349@4113 1374 min_t(unsigned int,
cl349@4113 1375 sizeof(root_hub_dev_des), wLength));
cl349@4113 1376 memcpy(data, root_hub_dev_des, len);
cl349@4113 1377 OK(len);
cl349@4113 1378 case 0x02: /* configuration descriptor */
cl349@4113 1379 len = min_t(unsigned int, leni,
cl349@4113 1380 min_t(unsigned int,
cl349@4113 1381 sizeof(root_hub_config_des), wLength));
cl349@4113 1382 memcpy (data, root_hub_config_des, len);
cl349@4113 1383 OK(len);
cl349@4113 1384 case 0x03: /* string descriptors */
cl349@4113 1385 len = usb_root_hub_string (wValue & 0xff,
cl349@4113 1386 0, "XHCI-alt",
cl349@4113 1387 data, wLength);
cl349@4113 1388 if (len > 0) {
cl349@4113 1389 OK(min_t(int, leni, len));
cl349@4113 1390 } else
cl349@4113 1391 stat = -EPIPE;
cl349@4113 1392 }
cl349@4113 1393 break;
cl349@4113 1394 case RH_GET_DESCRIPTOR | RH_CLASS:
cl349@4113 1395 root_hub_hub_des[2] = xhci->rh.numports;
cl349@4113 1396 len = min_t(unsigned int, leni,
cl349@4113 1397 min_t(unsigned int, sizeof(root_hub_hub_des), wLength));
cl349@4113 1398 memcpy(data, root_hub_hub_des, len);
cl349@4113 1399 OK(len);
cl349@4113 1400 case RH_GET_CONFIGURATION:
cl349@4113 1401 *(__u8 *)data = 0x01;
cl349@4113 1402 OK(1);
cl349@4113 1403 case RH_SET_CONFIGURATION:
cl349@4113 1404 OK(0);
cl349@4113 1405 case RH_GET_INTERFACE | RH_INTERFACE:
cl349@4113 1406 *(__u8 *)data = 0x00;
cl349@4113 1407 OK(1);
cl349@4113 1408 case RH_SET_INTERFACE | RH_INTERFACE:
cl349@4113 1409 OK(0);
cl349@4113 1410 default:
cl349@4113 1411 stat = -EPIPE;
cl349@4113 1412 }
cl349@4113 1413
cl349@4113 1414 spin_unlock_irqrestore(&xhci->rh.port_state_lock, flags);
cl349@4113 1415
cl349@4113 1416 urb->actual_length = len;
cl349@4113 1417
cl349@4113 1418 return stat;
cl349@4113 1419 }
cl349@4113 1420
cl349@4113 1421 /*
cl349@4113 1422 * MUST be called with urb->lock acquired
cl349@4113 1423 */
cl349@4113 1424 static int rh_unlink_urb(struct urb *urb)
cl349@4113 1425 {
cl349@4113 1426 if (xhci->rh.urb == urb) {
cl349@4113 1427 urb->status = -ENOENT;
cl349@4113 1428 xhci->rh.send = 0;
cl349@4113 1429 xhci->rh.urb = NULL;
cl349@4113 1430 del_timer(&xhci->rh.rh_int_timer);
cl349@4113 1431 }
cl349@4113 1432 return 0;
cl349@4113 1433 }
cl349@4113 1434
cl349@4113 1435 /******************************************************************************
cl349@4113 1436 * CONTROL PLANE FUNCTIONALITY
cl349@4113 1437 */
cl349@4113 1438
cl349@4113 1439 /**
cl349@4113 1440 * alloc_xhci - initialise a new virtual root hub for a new USB device channel
cl349@4113 1441 */
cl349@4113 1442 static int alloc_xhci(void)
cl349@4113 1443 {
cl349@4113 1444 int retval;
cl349@4113 1445 struct usb_bus *bus;
cl349@4113 1446
cl349@4113 1447 retval = -EBUSY;
cl349@4113 1448
cl349@4113 1449 xhci = kmalloc(sizeof(*xhci), GFP_KERNEL);
cl349@4113 1450 if (!xhci) {
cl349@4113 1451 err("couldn't allocate xhci structure");
cl349@4113 1452 retval = -ENOMEM;
cl349@4113 1453 goto err_alloc_xhci;
cl349@4113 1454 }
cl349@4113 1455
cl349@4113 1456 xhci->state = USBIF_STATE_CLOSED;
cl349@4113 1457
cl349@4113 1458 spin_lock_init(&xhci->urb_list_lock);
cl349@4113 1459 INIT_LIST_HEAD(&xhci->urb_list);
cl349@4113 1460
cl349@4113 1461 spin_lock_init(&xhci->complete_list_lock);
cl349@4113 1462 INIT_LIST_HEAD(&xhci->complete_list);
cl349@4113 1463
cl349@4113 1464 spin_lock_init(&xhci->frame_list_lock);
cl349@4113 1465
cl349@4113 1466 bus = usb_alloc_bus(&xhci_device_operations);
cl349@4113 1467
cl349@4113 1468 if (!bus) {
cl349@4113 1469 err("unable to allocate bus");
cl349@4113 1470 goto err_alloc_bus;
cl349@4113 1471 }
cl349@4113 1472
cl349@4113 1473 xhci->bus = bus;
cl349@4113 1474 bus->bus_name = "XHCI";
cl349@4113 1475 bus->hcpriv = xhci;
cl349@4113 1476
cl349@4113 1477 usb_register_bus(xhci->bus);
cl349@4113 1478
cl349@4113 1479 /* Initialize the root hub */
cl349@4113 1480
cl349@4113 1481 xhci->rh.numports = 0;
cl349@4113 1482
cl349@4113 1483 xhci->bus->root_hub = xhci->rh.dev = usb_alloc_dev(NULL, xhci->bus);
cl349@4113 1484 if (!xhci->rh.dev) {
cl349@4113 1485 err("unable to allocate root hub");
cl349@4113 1486 goto err_alloc_root_hub;
cl349@4113 1487 }
cl349@4113 1488
cl349@4113 1489 xhci->state = 0;
cl349@4113 1490
cl349@4113 1491 return 0;
cl349@4113 1492
cl349@4113 1493 /*
cl349@4113 1494 * error exits:
cl349@4113 1495 */
cl349@4113 1496 err_alloc_root_hub:
cl349@4113 1497 usb_deregister_bus(xhci->bus);
cl349@4113 1498 usb_free_bus(xhci->bus);
cl349@4113 1499 xhci->bus = NULL;
cl349@4113 1500
cl349@4113 1501 err_alloc_bus:
cl349@4113 1502 kfree(xhci);
cl349@4113 1503
cl349@4113 1504 err_alloc_xhci:
cl349@4113 1505 return retval;
cl349@4113 1506 }
cl349@4113 1507
cl349@4113 1508 /**
cl349@4113 1509 * usbif_status_change - deal with an incoming USB_INTERFACE_STATUS_ message
cl349@4113 1510 */
cl349@4113 1511 static void usbif_status_change(usbif_fe_interface_status_changed_t *status)
cl349@4113 1512 {
cl349@4113 1513 ctrl_msg_t cmsg;
cl349@4113 1514 usbif_fe_interface_connect_t up;
cl349@4113 1515 long rc;
cl349@4113 1516 usbif_sring_t *sring;
cl349@4113 1517
cl349@4113 1518 switch ( status->status )
cl349@4113 1519 {
cl349@4113 1520 case USBIF_INTERFACE_STATUS_DESTROYED:
cl349@4113 1521 printk(KERN_WARNING "Unexpected usbif-DESTROYED message in state %d\n",
cl349@4113 1522 xhci->state);
cl349@4113 1523 break;
cl349@4113 1524
cl349@4113 1525 case USBIF_INTERFACE_STATUS_DISCONNECTED:
cl349@4113 1526 if ( xhci->state != USBIF_STATE_CLOSED )
cl349@4113 1527 {
cl349@4113 1528 printk(KERN_WARNING "Unexpected usbif-DISCONNECTED message"
cl349@4113 1529 " in state %d\n", xhci->state);
cl349@4113 1530 break;
cl349@4113 1531 /* Not bothering to do recovery here for now. Keep things
cl349@4113 1532 * simple. */
mwilli2@4374 1533
mwilli2@4374 1534 spin_lock_irq(&xhci->ring_lock);
mwilli2@4374 1535
mwilli2@4374 1536 /* Clean up resources. */
mwilli2@4374 1537 free_page((unsigned long)xhci->usb_ring.sring);
kaf24@6005 1538 unbind_evtchn_from_irqhandler(xhci->evtchn, xhci);
mwilli2@4374 1539
mwilli2@4374 1540 /* Plug the ring. */
mwilli2@4374 1541 xhci->recovery = 1;
mwilli2@4374 1542 wmb();
mwilli2@4374 1543
mwilli2@4374 1544 spin_unlock_irq(&xhci->ring_lock);
cl349@4113 1545 }
cl349@4113 1546
cl349@4113 1547 /* Move from CLOSED to DISCONNECTED state. */
cl349@4113 1548 sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL);
cl349@4113 1549 SHARED_RING_INIT(sring);
kaf24@4429 1550 FRONT_RING_INIT(&xhci->usb_ring, sring, PAGE_SIZE);
cl349@4113 1551 xhci->state = USBIF_STATE_DISCONNECTED;
cl349@4113 1552
cl349@4113 1553 /* Construct an interface-CONNECT message for the domain controller. */
cl349@4113 1554 cmsg.type = CMSG_USBIF_FE;
cl349@4113 1555 cmsg.subtype = CMSG_USBIF_FE_INTERFACE_CONNECT;
cl349@4113 1556 cmsg.length = sizeof(usbif_fe_interface_connect_t);
kaf24@6279 1557 up.shmem_frame = virt_to_mfn(sring);
cl349@4113 1558 memcpy(cmsg.msg, &up, sizeof(up));
cl349@4113 1559
cl349@4113 1560 /* Tell the controller to bring up the interface. */
cl349@4113 1561 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
cl349@4113 1562 break;
cl349@4113 1563
cl349@4113 1564 case USBIF_INTERFACE_STATUS_CONNECTED:
cl349@4113 1565 if ( xhci->state == USBIF_STATE_CLOSED )
cl349@4113 1566 {
cl349@4113 1567 printk(KERN_WARNING "Unexpected usbif-CONNECTED message"
cl349@4113 1568 " in state %d\n", xhci->state);
cl349@4113 1569 break;
cl349@4113 1570 }
cl349@4113 1571
cl349@4113 1572 xhci->evtchn = status->evtchn;
cl349@4113 1573 xhci->bandwidth = status->bandwidth;
cl349@4113 1574 xhci->rh.numports = status->num_ports;
cl349@4113 1575
cl349@4113 1576 xhci->rh.ports = kmalloc (sizeof(xhci_port_t) * xhci->rh.numports, GFP_KERNEL);
mwilli2@4117 1577
mwilli2@4117 1578 if ( xhci->rh.ports == NULL )
mwilli2@4117 1579 goto alloc_ports_nomem;
mwilli2@4117 1580
cl349@4113 1581 memset(xhci->rh.ports, 0, sizeof(xhci_port_t) * xhci->rh.numports);
cl349@4113 1582
cl349@4113 1583 usb_connect(xhci->rh.dev);
cl349@4113 1584
cl349@4113 1585 if (usb_new_device(xhci->rh.dev) != 0) {
cl349@4113 1586 err("unable to start root hub");
cl349@4113 1587 }
cl349@4113 1588
cl349@4113 1589 /* Allocate the appropriate USB bandwidth here... Need to
cl349@4113 1590 * somehow know what the total available is thought to be so we
cl349@4113 1591 * can calculate the reservation correctly. */
cl349@4113 1592 usb_claim_bandwidth(xhci->rh.dev, xhci->rh.urb,
cl349@4113 1593 1000 - xhci->bandwidth, 0);
cl349@4113 1594
kaf24@6005 1595 if ( (rc = bind_evtchn_to_irqhandler(xhci->evtchn, xhci_interrupt,
cl349@4113 1596 SA_SAMPLE_RANDOM, "usbif", xhci)) )
cl349@4113 1597 printk(KERN_ALERT"usbfront request_irq failed (%ld)\n",rc);
cl349@4113 1598
cl349@4113 1599 DPRINTK(KERN_INFO __FILE__
kaf24@6005 1600 ": USB XHCI: SHM at %p (0x%lx), EVTCHN %d\n",
kaf24@6279 1601 xhci->usb_ring.sring, virt_to_mfn(xhci->usbif),
kaf24@6005 1602 xhci->evtchn);
cl349@4113 1603
cl349@4113 1604 xhci->state = USBIF_STATE_CONNECTED;
mwilli2@4117 1605
cl349@4113 1606 break;
cl349@4113 1607
cl349@4113 1608 default:
cl349@4113 1609 printk(KERN_WARNING "Status change to unknown value %d\n",
cl349@4113 1610 status->status);
cl349@4113 1611 break;
cl349@4113 1612 }
mwilli2@4117 1613
mwilli2@4117 1614 return;
mwilli2@4117 1615
mwilli2@4117 1616 alloc_ports_nomem:
mwilli2@4117 1617 printk(KERN_WARNING "Failed to allocate port memory, XHCI failed to connect.\n");
mwilli2@4117 1618 return;
cl349@4113 1619 }
cl349@4113 1620
cl349@4113 1621 /**
cl349@4113 1622 * usbif_ctrlif_rx - demux control messages by subtype
cl349@4113 1623 */
cl349@4113 1624 static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
cl349@4113 1625 {
cl349@4113 1626 switch ( msg->subtype )
cl349@4113 1627 {
cl349@4113 1628 case CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED:
cl349@4113 1629 usbif_status_change((usbif_fe_interface_status_changed_t *)
cl349@4113 1630 &msg->msg[0]);
kaf24@4402 1631 break;
cl349@4113 1632
cl349@4113 1633 /* New interface...? */
cl349@4113 1634 default:
kaf24@4402 1635 msg->length = 0;
kaf24@4402 1636 break;
cl349@4113 1637 }
cl349@4113 1638
cl349@4113 1639 ctrl_if_send_response(msg);
cl349@4113 1640 }
cl349@4113 1641
mwilli2@4374 1642 static void send_driver_up(void)
mwilli2@4374 1643 {
mwilli2@4374 1644 control_msg_t cmsg;
mwilli2@4374 1645 usbif_fe_interface_status_changed_t st;
mwilli2@4374 1646
mwilli2@4374 1647 /* Send a driver-UP notification to the domain controller. */
mwilli2@4374 1648 cmsg.type = CMSG_USBIF_FE;
mwilli2@4374 1649 cmsg.subtype = CMSG_USBIF_FE_DRIVER_STATUS_CHANGED;
mwilli2@4374 1650 cmsg.length = sizeof(usbif_fe_driver_status_changed_t);
mwilli2@4374 1651 st.status = USBIF_DRIVER_STATUS_UP;
mwilli2@4374 1652 memcpy(cmsg.msg, &st, sizeof(st));
mwilli2@4374 1653 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
mwilli2@4374 1654 }
mwilli2@4374 1655
mwilli2@4374 1656 void usbif_resume(void)
mwilli2@4374 1657 {
mwilli2@4374 1658 int i;
mwilli2@4374 1659
mwilli2@4374 1660 /* Fake disconnection on all virtual USB ports (suspending / migrating
mwilli2@4374 1661 * will destroy hard state associated will the USB devices anyhow). */
mwilli2@4374 1662 /* No need to lock here. */
mwilli2@4374 1663 for ( i = 0; i < xhci->rh.numports; i++ )
mwilli2@4374 1664 {
mwilli2@4374 1665 xhci->rh.ports[i].cs = 0;
mwilli2@4374 1666 xhci->rh.ports[i].cs_chg = 1;
mwilli2@4374 1667 xhci->rh.ports[i].pe = 0;
mwilli2@4374 1668 }
mwilli2@4374 1669
mwilli2@4374 1670 send_driver_up();
mwilli2@4374 1671 }
cl349@4113 1672
cl349@4113 1673 static int __init xhci_hcd_init(void)
cl349@4113 1674 {
cl349@4113 1675 int retval = -ENOMEM, i;
cl349@4113 1676
cl349@6618 1677 if ( (xen_start_info->flags & SIF_INITDOMAIN) ||
cl349@6618 1678 (xen_start_info->flags & SIF_USB_BE_DOMAIN) )
cl349@4113 1679 return 0;
cl349@4113 1680
cl349@4113 1681 info(DRIVER_DESC " " DRIVER_VERSION);
cl349@4113 1682
cl349@4113 1683 if (debug) {
cl349@4113 1684 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
cl349@4113 1685 if (!errbuf)
cl349@4113 1686 goto errbuf_failed;
cl349@4113 1687 }
cl349@4113 1688
cl349@4113 1689 xhci_up_cachep = kmem_cache_create("xhci_urb_priv",
cl349@4113 1690 sizeof(struct urb_priv), 0, 0, NULL, NULL);
cl349@4113 1691 if (!xhci_up_cachep)
cl349@4113 1692 goto up_failed;
cl349@4113 1693
cl349@4113 1694 /* Let the domain controller know we're here. For now we wait until
cl349@4113 1695 * connection, as for the block and net drivers. This is only strictly
cl349@4113 1696 * necessary if we're going to boot off a USB device. */
cl349@4113 1697 printk(KERN_INFO "Initialising Xen virtual USB hub\n");
cl349@4113 1698
cl349@4113 1699 (void)ctrl_if_register_receiver(CMSG_USBIF_FE, usbif_ctrlif_rx,
cl349@4113 1700 CALLBACK_IN_BLOCKING_CONTEXT);
cl349@4113 1701
cl349@4113 1702 alloc_xhci();
cl349@4113 1703
mwilli2@4374 1704 send_driver_up();
mwilli2@4374 1705
cl349@4113 1706 /*
cl349@4113 1707 * We should read 'nr_interfaces' from response message and wait
cl349@4113 1708 * for notifications before proceeding. For now we assume that we
cl349@4113 1709 * will be notified of exactly one interface.
cl349@4113 1710 */
cl349@4113 1711 for ( i=0; (xhci->state != USBIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
cl349@4113 1712 {
cl349@4113 1713 set_current_state(TASK_INTERRUPTIBLE);
cl349@4113 1714 schedule_timeout(1);
cl349@4113 1715 }
cl349@4113 1716
cl349@4113 1717 if (xhci->state != USBIF_STATE_CONNECTED)
cl349@4113 1718 printk(KERN_WARNING "Timeout connecting USB frontend driver!\n");
cl349@4113 1719
cl349@4113 1720 return 0;
cl349@4113 1721
cl349@4113 1722 up_failed:
cl349@4113 1723 if (errbuf)
cl349@4113 1724 kfree(errbuf);
cl349@4113 1725
cl349@4113 1726 errbuf_failed:
cl349@4113 1727 return retval;
cl349@4113 1728 }
cl349@4113 1729
cl349@4113 1730 module_init(xhci_hcd_init);
cl349@4113 1731
cl349@4113 1732 MODULE_AUTHOR(DRIVER_AUTHOR);
cl349@4113 1733 MODULE_DESCRIPTION(DRIVER_DESC);
cl349@4113 1734 MODULE_LICENSE("GPL");
cl349@4113 1735