diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-10 15:05:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-10 15:05:02 -0700 |
commit | 9895850b23886e030cd1e7241d5529a57e969c3d (patch) | |
tree | 1061626db450aeb72dcfcd247c24b33e5238c8c4 /drivers/usb/host/xhci.c | |
parent | fc385c313275b114bc6ad36e60c5177d63250548 (diff) | |
parent | b58af4066d240b18b43f202e07b9ec7461d90b17 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (148 commits)
USB: serial: fix stalled writes
USB: remove fake "address-of" expressions
USB: fix thread-unsafe anchor utiliy routines
USB: usbtest: support test device with only one iso-in or iso-out endpoint
USB: usbtest: avoid to free coherent buffer in atomic context
USB: xhci: Set DMA mask for host.
USB: xhci: Don't flush doorbell writes.
USB: xhci: Reduce reads and writes of interrupter registers.
USB: xhci: Make xhci_set_hc_event_deq() static.
USB: xhci: Minimize HW event ring dequeue pointer writes.
USB: xhci: Make xhci_handle_event() static.
USB: xhci: Remove unnecessary reads of IRQ_PENDING register.
USB: xhci: Performance - move xhci_work() into xhci_irq()
USB: xhci: Performance - move interrupt handlers into xhci-ring.c
USB: xhci: Performance - move functions that find ep ring.
USB:: fix linux/usb.h kernel-doc warnings
USB: add USB serial ssu100 driver
USB: usb-storage: implement autosuspend
USB: ehci: fix remove of ehci debugfs dir
USB: Add USB 2.0 to ssb ohci driver
...
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 344 |
1 files changed, 202 insertions, 142 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3998f72cd0c..d5c550ea3e6 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -20,6 +20,7 @@ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/pci.h> #include <linux/irq.h> #include <linux/log2.h> #include <linux/module.h> @@ -171,22 +172,84 @@ int xhci_reset(struct xhci_hcd *xhci) return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); } +/* + * Free IRQs + * free all IRQs request + */ +static void xhci_free_irq(struct xhci_hcd *xhci) +{ + int i; + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); -#if 0 -/* Set up MSI-X table for entry 0 (may claim other entries later) */ -static int xhci_setup_msix(struct xhci_hcd *xhci) + /* return if using legacy interrupt */ + if (xhci_to_hcd(xhci)->irq >= 0) + return; + + if (xhci->msix_entries) { + for (i = 0; i < xhci->msix_count; i++) + if (xhci->msix_entries[i].vector) + free_irq(xhci->msix_entries[i].vector, + xhci_to_hcd(xhci)); + } else if (pdev->irq >= 0) + free_irq(pdev->irq, xhci_to_hcd(xhci)); + + return; +} + +/* + * Set up MSI + */ +static int xhci_setup_msi(struct xhci_hcd *xhci) { int ret; + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + + ret = pci_enable_msi(pdev); + if (ret) { + xhci_err(xhci, "failed to allocate MSI entry\n"); + return ret; + } + + ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, + 0, "xhci_hcd", xhci_to_hcd(xhci)); + if (ret) { + xhci_err(xhci, "disable MSI interrupt\n"); + pci_disable_msi(pdev); + } + + return ret; +} + +/* + * Set up MSI-X + */ +static int xhci_setup_msix(struct xhci_hcd *xhci) +{ + int i, ret = 0; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - xhci->msix_count = 0; - /* XXX: did I do this right? ixgbe does kcalloc for more than one */ - xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); + /* + * calculate number of msi-x vectors supported. + * - HCS_MAX_INTRS: the max number of interrupts the host can handle, + * with max number of interrupters based on the xhci HCSPARAMS1. + * - num_online_cpus: maximum msi-x vectors per CPUs core. + * Add additional 1 vector to ensure always available interrupt. + */ + xhci->msix_count = min(num_online_cpus() + 1, + HCS_MAX_INTRS(xhci->hcs_params1)); + + xhci->msix_entries = + kmalloc((sizeof(struct msix_entry))*xhci->msix_count, + GFP_KERNEL); if (!xhci->msix_entries) { xhci_err(xhci, "Failed to allocate MSI-X entries\n"); return -ENOMEM; } - xhci->msix_entries[0].entry = 0; + + for (i = 0; i < xhci->msix_count; i++) { + xhci->msix_entries[i].entry = i; + xhci->msix_entries[i].vector = 0; + } ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); if (ret) { @@ -194,20 +257,19 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) goto free_entries; } - /* - * Pass the xhci pointer value as the request_irq "cookie". - * If more irqs are added, this will need to be unique for each one. - */ - ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, - "xHCI", xhci_to_hcd(xhci)); - if (ret) { - xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); - goto disable_msix; + for (i = 0; i < xhci->msix_count; i++) { + ret = request_irq(xhci->msix_entries[i].vector, + (irq_handler_t)xhci_msi_irq, + 0, "xhci_hcd", xhci_to_hcd(xhci)); + if (ret) + goto disable_msix; } - xhci_dbg(xhci, "Finished setting up MSI-X\n"); - return 0; + + return ret; disable_msix: + xhci_err(xhci, "disable MSI-X interrupt\n"); + xhci_free_irq(xhci); pci_disable_msix(pdev); free_entries: kfree(xhci->msix_entries); @@ -215,21 +277,23 @@ free_entries: return ret; } -/* XXX: code duplication; can xhci_setup_msix call this? */ /* Free any IRQs and disable MSI-X */ static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - if (!xhci->msix_entries) - return; - free_irq(xhci->msix_entries[0].vector, xhci); - pci_disable_msix(pdev); - kfree(xhci->msix_entries); - xhci->msix_entries = NULL; - xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); + xhci_free_irq(xhci); + + if (xhci->msix_entries) { + pci_disable_msix(pdev); + kfree(xhci->msix_entries); + xhci->msix_entries = NULL; + } else { + pci_disable_msi(pdev); + } + + return; } -#endif /* * Initialize memory for HCD and xHC (one-time init). @@ -257,100 +321,8 @@ int xhci_init(struct usb_hcd *hcd) return retval; } -/* - * Called in interrupt context when there might be work - * queued on the event ring - * - * xhci->lock must be held by caller. - */ -static void xhci_work(struct xhci_hcd *xhci) -{ - u32 temp; - u64 temp_64; - - /* - * Clear the op reg interrupt status first, - * so we can receive interrupts from other MSI-X interrupters. - * Write 1 to clear the interrupt status. - */ - temp = xhci_readl(xhci, &xhci->op_regs->status); - temp |= STS_EINT; - xhci_writel(xhci, temp, &xhci->op_regs->status); - /* FIXME when MSI-X is supported and there are multiple vectors */ - /* Clear the MSI-X event interrupt status */ - - /* Acknowledge the interrupt */ - temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); - temp |= 0x3; - xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); - /* Flush posted writes */ - xhci_readl(xhci, &xhci->ir_set->irq_pending); - - if (xhci->xhc_state & XHCI_STATE_DYING) - xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " - "Shouldn't IRQs be disabled?\n"); - else - /* FIXME this should be a delayed service routine - * that clears the EHB. - */ - xhci_handle_event(xhci); - - /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); - /* Flush posted writes -- FIXME is this necessary? */ - xhci_readl(xhci, &xhci->ir_set->irq_pending); -} - /*-------------------------------------------------------------------------*/ -/* - * xHCI spec says we can get an interrupt, and if the HC has an error condition, - * we might get bad data out of the event ring. Section 4.10.2.7 has a list of - * indicators of an event TRB error, but we check the status *first* to be safe. - */ -irqreturn_t xhci_irq(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - u32 temp, temp2; - union xhci_trb *trb; - - spin_lock(&xhci->lock); - trb = xhci->event_ring->dequeue; - /* Check if the xHC generated the interrupt, or the irq is shared */ - temp = xhci_readl(xhci, &xhci->op_regs->status); - temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); - if (temp == 0xffffffff && temp2 == 0xffffffff) - goto hw_died; - - if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { - spin_unlock(&xhci->lock); - return IRQ_NONE; - } - xhci_dbg(xhci, "op reg status = %08x\n", temp); - xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); - xhci_dbg(xhci, "Event ring dequeue ptr:\n"); - xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", - (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), - lower_32_bits(trb->link.segment_ptr), - upper_32_bits(trb->link.segment_ptr), - (unsigned int) trb->link.intr_target, - (unsigned int) trb->link.control); - - if (temp & STS_FATAL) { - xhci_warn(xhci, "WARNING: Host System Error\n"); - xhci_halt(xhci); -hw_died: - xhci_to_hcd(xhci)->state = HC_STATE_HALT; - spin_unlock(&xhci->lock); - return -ESHUTDOWN; - } - - xhci_work(xhci); - spin_unlock(&xhci->lock); - - return IRQ_HANDLED; -} #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING void xhci_event_ring_work(unsigned long arg) @@ -423,21 +395,36 @@ int xhci_run(struct usb_hcd *hcd) { u32 temp; u64 temp_64; + u32 ret; struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); void (*doorbell)(struct xhci_hcd *) = NULL; hcd->uses_new_polling = 1; - hcd->poll_rh = 0; xhci_dbg(xhci, "xhci_run\n"); -#if 0 /* FIXME: MSI not setup yet */ - /* Do this at the very last minute */ + /* unregister the legacy interrupt */ + if (hcd->irq) + free_irq(hcd->irq, hcd); + hcd->irq = -1; + ret = xhci_setup_msix(xhci); - if (!ret) - return ret; + if (ret) + /* fall back to msi*/ + ret = xhci_setup_msi(xhci); + + if (ret) { + /* fall back to legacy interrupt*/ + ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, + hcd->irq_descr, hcd); + if (ret) { + xhci_err(xhci, "request interrupt %d failed\n", + pdev->irq); + return ret; + } + hcd->irq = pdev->irq; + } - return -ENOSYS; -#endif #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING init_timer(&xhci->event_ring_timer); xhci->event_ring_timer.data = (unsigned long) xhci; @@ -495,7 +482,6 @@ int xhci_run(struct usb_hcd *hcd) return -ENODEV; } - xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); if (doorbell) (*doorbell)(xhci); if (xhci->quirks & XHCI_NEC_HOST) @@ -522,11 +508,9 @@ void xhci_stop(struct usb_hcd *hcd) spin_lock_irq(&xhci->lock); xhci_halt(xhci); xhci_reset(xhci); + xhci_cleanup_msix(xhci); spin_unlock_irq(&xhci->lock); -#if 0 /* No MSI yet */ - xhci_cleanup_msix(xhci); -#endif #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING /* Tell the event ring poll function not to reschedule */ xhci->zombie = 1; @@ -560,11 +544,8 @@ void xhci_shutdown(struct usb_hcd *hcd) spin_lock_irq(&xhci->lock); xhci_halt(xhci); - spin_unlock_irq(&xhci->lock); - -#if 0 xhci_cleanup_msix(xhci); -#endif + spin_unlock_irq(&xhci->lock); xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", xhci_readl(xhci, &xhci->op_regs->status)); @@ -720,7 +701,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) unsigned long flags; int ret = 0; unsigned int slot_id, ep_index; - + struct urb_priv *urb_priv; + int size, i; if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) return -EINVAL; @@ -734,12 +716,36 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) ret = -EINVAL; goto exit; } - if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { + if (!HCD_HW_ACCESSIBLE(hcd)) { if (!in_interrupt()) xhci_dbg(xhci, "urb submitted during PCI suspend\n"); ret = -ESHUTDOWN; goto exit; } + + if (usb_endpoint_xfer_isoc(&urb->ep->desc)) + size = urb->number_of_packets; + else + size = 1; + + urb_priv = kzalloc(sizeof(struct urb_priv) + + size * sizeof(struct xhci_td *), mem_flags); + if (!urb_priv) + return -ENOMEM; + + for (i = 0; i < size; i++) { + urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags); + if (!urb_priv->td[i]) { + urb_priv->length = i; + xhci_urb_free_priv(xhci, urb_priv); + return -ENOMEM; + } + } + + urb_priv->length = size; + urb_priv->td_cnt = 0; + urb->hcpriv = urb_priv; + if (usb_endpoint_xfer_control(&urb->ep->desc)) { /* Check to see if the max packet size for the default control * endpoint changed during FS device enumeration @@ -788,11 +794,18 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else { - ret = -EINVAL; + spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; + ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); } exit: return ret; dying: + xhci_urb_free_priv(xhci, urb_priv); + urb->hcpriv = NULL; xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " "non-responsive xHCI host.\n", urb->ep->desc.bEndpointAddress, urb); @@ -800,6 +813,47 @@ dying: return -ESHUTDOWN; } +/* Get the right ring for the given URB. + * If the endpoint supports streams, boundary check the URB's stream ID. + * If the endpoint doesn't support streams, return the singular endpoint ring. + */ +static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, + struct urb *urb) +{ + unsigned int slot_id; + unsigned int ep_index; + unsigned int stream_id; + struct xhci_virt_ep *ep; + + slot_id = urb->dev->slot_id; + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + stream_id = urb->stream_id; + ep = &xhci->devs[slot_id]->eps[ep_index]; + /* Common case: no streams */ + if (!(ep->ep_state & EP_HAS_STREAMS)) + return ep->ring; + + if (stream_id == 0) { + xhci_warn(xhci, + "WARN: Slot ID %u, ep index %u has streams, " + "but URB has no stream ID.\n", + slot_id, ep_index); + return NULL; + } + + if (stream_id < ep->stream_info->num_streams) + return ep->stream_info->stream_rings[stream_id]; + + xhci_warn(xhci, + "WARN: Slot ID %u, ep index %u has " + "stream IDs 1 to %u allocated, " + "but stream ID %u is requested.\n", + slot_id, ep_index, + ep->stream_info->num_streams - 1, + stream_id); + return NULL; +} + /* * Remove the URB's TD from the endpoint ring. This may cause the HC to stop * USB transfers, potentially stopping in the middle of a TRB buffer. The HC @@ -834,9 +888,10 @@ dying: int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { unsigned long flags; - int ret; + int ret, i; u32 temp; struct xhci_hcd *xhci; + struct urb_priv *urb_priv; struct xhci_td *td; unsigned int ep_index; struct xhci_ring *ep_ring; @@ -851,12 +906,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) temp = xhci_readl(xhci, &xhci->op_regs->status); if (temp == 0xffffffff) { xhci_dbg(xhci, "HW died, freeing TD.\n"); - td = (struct xhci_td *) urb->hcpriv; + urb_priv = urb->hcpriv; usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&xhci->lock, flags); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); - kfree(td); + xhci_urb_free_priv(xhci, urb_priv); return ret; } if (xhci->xhc_state & XHCI_STATE_DYING) { @@ -884,9 +939,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_dbg(xhci, "Endpoint ring:\n"); xhci_debug_ring(xhci, ep_ring); - td = (struct xhci_td *) urb->hcpriv; - list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + urb_priv = urb->hcpriv; + + for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { + td = urb_priv->td[i]; + list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + } + /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ |