diff options
Diffstat (limited to 'drivers/usb/host')
69 files changed, 8294 insertions, 1252 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 4263d011392..5be0326aae3 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -29,15 +29,6 @@ if USB_XHCI_HCD config USB_XHCI_PLATFORM tristate -config USB_XHCI_HCD_DEBUGGING - bool "Debugging for the xHCI host controller" - ---help--- - Say 'Y' to turn on debugging for the xHCI host controller driver. - This will spew debugging output, even in interrupt context. - This should only be used for debugging xHCI driver bugs. - - If unsure, say N. - endif # USB_XHCI_HCD config USB_EHCI_HCD @@ -113,12 +104,6 @@ config USB_EHCI_HCD_PMC_MSP Enables support for the onchip USB controller on the PMC_MSP7100 Family SoC's. If unsure, say N. -config USB_EHCI_BIG_ENDIAN_MMIO - bool - -config USB_EHCI_BIG_ENDIAN_DESC - bool - config XPS_USB_HCD_XILINX bool "Use Xilinx usb host EHCI controller core" depends on (PPC32 || MICROBLAZE) @@ -148,13 +133,11 @@ config USB_EHCI_MXC config USB_EHCI_HCD_OMAP tristate "EHCI support for OMAP3 and later chips" depends on ARCH_OMAP + select NOP_USB_XCEIV default y ---help--- Enables support for the on-chip EHCI controller on OMAP3 and later chips. - If your system uses a PHY on the USB port, you will need to - enable USB_PHY and the appropriate PHY driver as well. Most - boards need the NOP_USB_XCEIV PHY driver. config USB_EHCI_HCD_ORION tristate "Support for Marvell EBU on-chip EHCI USB controller" @@ -186,7 +169,6 @@ config USB_EHCI_HCD_AT91 config USB_EHCI_MSM tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller" depends on ARCH_MSM - depends on USB_PHY select USB_EHCI_ROOT_HUB_TT select USB_MSM_OTG ---help--- @@ -354,6 +336,18 @@ config USB_FUSBH200_HCD To compile this driver as a module, choose M here: the module will be called fusbh200-hcd. +config USB_FOTG210_HCD + tristate "FOTG210 HCD support" + depends on USB + default N + ---help--- + Faraday FOTG210 is an OTG controller which can be configured as + an USB2.0 host. It is designed to meet USB2.0 EHCI specification + with minor modification. + + To compile this driver as a module, choose M here: the + module will be called fotg210-hcd. + config USB_OHCI_HCD tristate "OHCI HCD (USB 1.1) support" select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 @@ -497,20 +491,6 @@ config USB_OCTEON_OHCI controller. It is needed for low-speed USB 1.0 device support. All CN6XXX based chips with USB are supported. - -config USB_OHCI_BIG_ENDIAN_DESC - bool - default n - -config USB_OHCI_BIG_ENDIAN_MMIO - bool - default n - -config USB_OHCI_LITTLE_ENDIAN - bool - default n if STB03xxx || PPC_MPC52xx - default y - endif # USB_OHCI_HCD config USB_UHCI_HCD @@ -710,3 +690,20 @@ config USB_HCD_SSB for ehci and ohci. If unsure, say N. + +config USB_HCD_TEST_MODE + bool "HCD test mode support" + ---help--- + Say 'Y' to enable additional software test modes that may be + supported by the host controller drivers. + + One such test mode is the Embedded High-speed Host Electrical Test + (EHSET) for EHCI host controller hardware, specifically the "Single + Step Set Feature" test. Typically this will be enabled for On-the-Go + or embedded hosts that need to undergo USB-IF compliance testing with + the aid of special testing hardware. In the future, this may expand + to include other tests that require support from a HCD driver. + + This option is of interest only to developers who need to validate + their USB hardware designs. It is not needed for normal use. If + unsure, say N. diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index bea71127b15..50b0041c09a 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -4,6 +4,9 @@ ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG +# tell define_trace.h where to find the xhci trace header +CFLAGS_xhci-trace.o := -I$(src) + isp1760-y := isp1760-hcd.o isp1760-if.o fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o @@ -13,6 +16,7 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o xhci-hcd-y := xhci.o xhci-mem.o xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o +xhci-hcd-y += xhci-trace.o xhci-hcd-$(CONFIG_PCI) += xhci-pci.o ifneq ($(CONFIG_USB_XHCI_PLATFORM), ) @@ -58,3 +62,4 @@ obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o +obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 5429d2645bb..aa5b603f393 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -18,7 +18,7 @@ /* this file is part of ehci-hcd.c */ -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* check the values in the HCSPARAMS register * (host controller _Structural_ parameters) @@ -62,7 +62,7 @@ static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {} #endif -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* check the values in the HCCPARAMS register * (host controller _Capability_ parameters) @@ -101,7 +101,7 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {} #endif -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) static void __maybe_unused dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd) @@ -301,7 +301,7 @@ static inline int __maybe_unused dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status) { return 0; } -#endif /* DEBUG */ +#endif /* DEBUG || CONFIG_DYNAMIC_DEBUG */ /* functions have the "wrong" filename when they're output... */ #define dbg_status(ehci, label, status) { \ @@ -336,7 +336,6 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { } static int debug_async_open(struct inode *, struct file *); static int debug_periodic_open(struct inode *, struct file *); static int debug_registers_open(struct inode *, struct file *); -static int debug_async_open(struct inode *, struct file *); static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*); static int debug_close(struct inode *, struct file *); diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index bd831ec06dc..947b009009f 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -57,7 +57,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, pr_debug("initializing FSL-SOC USB Controller\n"); /* Need platform data for setup */ - pdata = (struct fsl_usb2_platform_data *)pdev->dev.platform_data; + pdata = (struct fsl_usb2_platform_data *)dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "No platform data for %s.\n", dev_name(&pdev->dev)); @@ -190,7 +190,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, static void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev) { - struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; + struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); if (!IS_ERR_OR_NULL(hcd->phy)) { otg_set_host(hcd->phy->otg, NULL); @@ -218,7 +218,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd, struct ehci_hcd *ehci = hcd_to_ehci(hcd); void __iomem *non_ehci = hcd->regs; struct device *dev = hcd->self.controller; - struct fsl_usb2_platform_data *pdata = dev->platform_data; + struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev); if (pdata->controller_ver < 0) { dev_warn(hcd->self.controller, "Could not get controller version\n"); @@ -291,7 +291,7 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci) struct fsl_usb2_platform_data *pdata; void __iomem *non_ehci = hcd->regs; - pdata = hcd->self.controller->platform_data; + pdata = dev_get_platdata(hcd->self.controller); if (pdata->have_sysif_regs) { /* @@ -363,7 +363,7 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) struct device *dev; dev = hcd->self.controller; - pdata = hcd->self.controller->platform_data; + pdata = dev_get_platdata(hcd->self.controller); ehci->big_endian_desc = pdata->big_endian_desc; ehci->big_endian_mmio = pdata->big_endian_mmio; @@ -415,10 +415,10 @@ static int ehci_fsl_mpc512x_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); - struct fsl_usb2_platform_data *pdata = dev->platform_data; + struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev); u32 tmp; -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE); mode &= USBMODE_CM_MASK; tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */ @@ -484,7 +484,7 @@ static int ehci_fsl_mpc512x_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); - struct fsl_usb2_platform_data *pdata = dev->platform_data; + struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev); u32 tmp; dev_dbg(dev, "suspend=%d already_suspended=%d\n", @@ -669,7 +669,7 @@ static const struct hc_driver ehci_fsl_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_USB2 | HCD_MEMORY, + .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, /* * basic lifecycle operations diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c index a77bd8dc33f..b52a66ce92e 100644 --- a/drivers/usb/host/ehci-grlib.c +++ b/drivers/usb/host/ehci-grlib.c @@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations @@ -167,15 +167,6 @@ static int ehci_hcd_grlib_remove(struct platform_device *op) } -static void ehci_hcd_grlib_shutdown(struct platform_device *op) -{ - struct usb_hcd *hcd = platform_get_drvdata(op); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - - static const struct of_device_id ehci_hcd_grlib_of_match[] = { { .name = "GAISLER_EHCI", @@ -191,7 +182,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match); static struct platform_driver ehci_grlib_driver = { .probe = ehci_hcd_grlib_probe, .remove = ehci_hcd_grlib_remove, - .shutdown = ehci_hcd_grlib_shutdown, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "grlib-ehci", .owner = THIS_MODULE, diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 7abf1ce3a67..5d6022f30eb 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -440,14 +440,6 @@ static void ehci_stop (struct usb_hcd *hcd) if (ehci->amd_pll_fix == 1) usb_amd_dev_put(); -#ifdef EHCI_STATS - ehci_dbg(ehci, "irq normal %ld err %ld iaa %ld (lost %ld)\n", - ehci->stats.normal, ehci->stats.error, ehci->stats.iaa, - ehci->stats.lost_iaa); - ehci_dbg (ehci, "complete %ld unlink %ld\n", - ehci->stats.complete, ehci->stats.unlink); -#endif - dbg_status (ehci, "ehci_stop completed", ehci_readl(ehci, &ehci->regs->status)); } @@ -487,6 +479,7 @@ static int ehci_init(struct usb_hcd *hcd) ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->async_unlink); INIT_LIST_HEAD(&ehci->async_idle); + INIT_LIST_HEAD(&ehci->intr_unlink_wait); INIT_LIST_HEAD(&ehci->intr_unlink); INIT_LIST_HEAD(&ehci->intr_qh_list); INIT_LIST_HEAD(&ehci->cached_itd_list); @@ -942,7 +935,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); unsigned long flags; - struct ehci_qh *qh, *tmp; + struct ehci_qh *qh; /* ASSERT: any requests/urbs are being unlinked */ /* ASSERT: nobody can be submitting urbs for this any more */ @@ -972,17 +965,13 @@ rescan: qh->qh_state = QH_STATE_IDLE; switch (qh->qh_state) { case QH_STATE_LINKED: - case QH_STATE_COMPLETING: - for (tmp = ehci->async->qh_next.qh; - tmp && tmp != qh; - tmp = tmp->qh_next.qh) - continue; - /* periodic qh self-unlinks on empty, and a COMPLETING qh - * may already be unlinked. - */ - if (tmp) + WARN_ON(!list_empty(&qh->qtd_list)); + if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT) start_unlink_async(ehci, qh); + else + start_unlink_intr(ehci, qh); /* FALL THROUGH */ + case QH_STATE_COMPLETING: /* already in unlinking */ case QH_STATE_UNLINK: /* wait for hw to finish? */ case QH_STATE_UNLINK_WAIT: idle_timeout: @@ -1169,7 +1158,7 @@ static const struct hc_driver ehci_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations @@ -1303,7 +1292,7 @@ static int __init ehci_hcd_init(void) sizeof(struct ehci_qh), sizeof(struct ehci_qtd), sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root); if (!ehci_debug_root) { retval = -ENOENT; @@ -1352,7 +1341,7 @@ clean2: platform_driver_unregister(&PLATFORM_DRIVER); clean0: #endif -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) debugfs_remove(ehci_debug_root); ehci_debug_root = NULL; err_debug: @@ -1376,7 +1365,7 @@ static void __exit ehci_hcd_cleanup(void) #ifdef PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); #endif -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) debugfs_remove(ehci_debug_root); #endif clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 2b702772d04..835fc0844a6 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -183,7 +183,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, spin_lock_irq(&ehci->lock); /* clear phy low-power mode before changing wakeup flags */ - if (ehci->has_hostpc) { + if (ehci->has_tdi_phy_lpm) { port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; @@ -211,13 +211,11 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, else t2 |= PORT_WKOC_E | PORT_WKCONN_E; } - ehci_vdbg(ehci, "port %d, %08x -> %08x\n", - port + 1, t1, t2); ehci_writel(ehci, t2, reg); } /* enter phy low-power mode again */ - if (ehci->has_hostpc) { + if (ehci->has_tdi_phy_lpm) { port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; @@ -302,14 +300,12 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) } if (t1 != t2) { - ehci_vdbg (ehci, "port %d, %08x -> %08x\n", - port + 1, t1, t2); ehci_writel(ehci, t2, reg); changed = 1; } } - if (changed && ehci->has_hostpc) { + if (changed && ehci->has_tdi_phy_lpm) { spin_unlock_irq(&ehci->lock); msleep(5); /* 5 ms for HCD to enter low-power mode */ spin_lock_irq(&ehci->lock); @@ -345,6 +341,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) end_unlink_async(ehci); unlink_empty_async_suspended(ehci); + ehci_handle_start_intr_unlinks(ehci); ehci_handle_intr_unlinks(ehci); end_free_itds(ehci); @@ -435,7 +432,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd) goto shutdown; /* clear phy low-power mode before resume */ - if (ehci->bus_suspended && ehci->has_hostpc) { + if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) { i = HCS_N_PORTS(ehci->hcs_params); while (i--) { if (test_bit(i, &ehci->bus_suspended)) { @@ -482,7 +479,6 @@ static int ehci_bus_resume (struct usb_hcd *hcd) if (test_bit(i, &resume_needed)) { temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME); ehci_writel(ehci, temp, &ehci->regs->port_status [i]); - ehci_vdbg (ehci, "resumed port %d\n", i + 1); } } @@ -711,6 +707,145 @@ ehci_hub_descriptor ( } /*-------------------------------------------------------------------------*/ +#ifdef CONFIG_USB_HCD_TEST_MODE + +#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06 + +static void usb_ehset_completion(struct urb *urb) +{ + struct completion *done = urb->context; + + complete(done); +} +static int submit_single_step_set_feature( + struct usb_hcd *hcd, + struct urb *urb, + int is_setup +); + +/* + * Allocate and initialize a control URB. This request will be used by the + * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages + * of the GetDescriptor request are sent 15 seconds after the SETUP stage. + * Return NULL if failed. + */ +static struct urb *request_single_step_set_feature_urb( + struct usb_device *udev, + void *dr, + void *buf, + struct completion *done +) { + struct urb *urb; + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + struct usb_host_endpoint *ep; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return NULL; + + urb->pipe = usb_rcvctrlpipe(udev, 0); + ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out) + [usb_pipeendpoint(urb->pipe)]; + if (!ep) { + usb_free_urb(urb); + return NULL; + } + + urb->ep = ep; + urb->dev = udev; + urb->setup_packet = (void *)dr; + urb->transfer_buffer = buf; + urb->transfer_buffer_length = USB_DT_DEVICE_SIZE; + urb->complete = usb_ehset_completion; + urb->status = -EINPROGRESS; + urb->actual_length = 0; + urb->transfer_flags = URB_DIR_IN; + usb_get_urb(urb); + atomic_inc(&urb->use_count); + atomic_inc(&urb->dev->urbnum); + urb->setup_dma = dma_map_single( + hcd->self.controller, + urb->setup_packet, + sizeof(struct usb_ctrlrequest), + DMA_TO_DEVICE); + urb->transfer_dma = dma_map_single( + hcd->self.controller, + urb->transfer_buffer, + urb->transfer_buffer_length, + DMA_FROM_DEVICE); + urb->context = done; + return urb; +} + +static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) +{ + int retval = -ENOMEM; + struct usb_ctrlrequest *dr; + struct urb *urb; + struct usb_device *udev; + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct usb_device_descriptor *buf; + DECLARE_COMPLETION_ONSTACK(done); + + /* Obtain udev of the rhub's child port */ + udev = usb_hub_find_child(hcd->self.root_hub, port); + if (!udev) { + ehci_err(ehci, "No device attached to the RootHub\n"); + return -ENODEV; + } + buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!dr) { + kfree(buf); + return -ENOMEM; + } + + /* Fill Setup packet for GetDescriptor */ + dr->bRequestType = USB_DIR_IN; + dr->bRequest = USB_REQ_GET_DESCRIPTOR; + dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8); + dr->wIndex = 0; + dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE); + urb = request_single_step_set_feature_urb(udev, dr, buf, &done); + if (!urb) + goto cleanup; + + /* Submit just the SETUP stage */ + retval = submit_single_step_set_feature(hcd, urb, 1); + if (retval) + goto out1; + if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { + usb_kill_urb(urb); + retval = -ETIMEDOUT; + ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__); + goto out1; + } + msleep(15 * 1000); + + /* Complete remaining DATA and STATUS stages using the same URB */ + urb->status = -EINPROGRESS; + usb_get_urb(urb); + atomic_inc(&urb->use_count); + atomic_inc(&urb->dev->urbnum); + retval = submit_single_step_set_feature(hcd, urb, 0); + if (!retval && !wait_for_completion_timeout(&done, + msecs_to_jiffies(2000))) { + usb_kill_urb(urb); + retval = -ETIMEDOUT; + ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__); + } +out1: + usb_free_urb(urb); +cleanup: + kfree(dr); + kfree(buf); + return retval; +} +#endif /* CONFIG_USB_HCD_TEST_MODE */ +/*-------------------------------------------------------------------------*/ static int ehci_hub_control ( struct usb_hcd *hcd, @@ -788,7 +923,7 @@ static int ehci_hub_control ( goto error; /* clear phy low-power mode before resume */ - if (ehci->has_hostpc) { + if (ehci->has_tdi_phy_lpm) { temp1 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, hostpc_reg); @@ -801,6 +936,8 @@ static int ehci_hub_control ( ehci_writel(ehci, temp | PORT_RESUME, status_reg); ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(20); + set_bit(wIndex, &ehci->resuming_ports); + usb_hcd_start_port_resume(&hcd->self, wIndex); break; case USB_PORT_FEAT_C_SUSPEND: clear_bit(wIndex, &ehci->port_c_suspend); @@ -865,52 +1002,49 @@ static int ehci_hub_control ( } } - /* whoever resumes must GetPortStatus to complete it!! */ - if (temp & PORT_RESUME) { + /* no reset or resume pending */ + if (!ehci->reset_done[wIndex]) { /* Remote Wakeup received? */ - if (!ehci->reset_done[wIndex]) { + if (temp & PORT_RESUME) { /* resume signaling for 20 msec */ ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(20); usb_hcd_start_port_resume(&hcd->self, wIndex); + set_bit(wIndex, &ehci->resuming_ports); /* check the port again */ mod_timer(&ehci_to_hcd(ehci)->rh_timer, ehci->reset_done[wIndex]); } - /* resume completed? */ - else if (time_after_eq(jiffies, - ehci->reset_done[wIndex])) { - clear_bit(wIndex, &ehci->suspended_ports); - set_bit(wIndex, &ehci->port_c_suspend); - ehci->reset_done[wIndex] = 0; - usb_hcd_end_port_resume(&hcd->self, wIndex); - - /* stop resume signaling */ - temp &= ~(PORT_RWC_BITS | - PORT_SUSPEND | PORT_RESUME); - ehci_writel(ehci, temp, status_reg); - clear_bit(wIndex, &ehci->resuming_ports); - retval = ehci_handshake(ehci, status_reg, - PORT_RESUME, 0, 2000 /* 2msec */); - if (retval != 0) { - ehci_err(ehci, - "port %d resume error %d\n", + /* reset or resume not yet complete */ + } else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) { + ; /* wait until it is complete */ + + /* resume completed */ + } else if (test_bit(wIndex, &ehci->resuming_ports)) { + clear_bit(wIndex, &ehci->suspended_ports); + set_bit(wIndex, &ehci->port_c_suspend); + ehci->reset_done[wIndex] = 0; + usb_hcd_end_port_resume(&hcd->self, wIndex); + + /* stop resume signaling */ + temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME); + ehci_writel(ehci, temp, status_reg); + clear_bit(wIndex, &ehci->resuming_ports); + retval = ehci_handshake(ehci, status_reg, + PORT_RESUME, 0, 2000 /* 2msec */); + if (retval != 0) { + ehci_err(ehci, "port %d resume error %d\n", wIndex + 1, retval); - goto error; - } - temp = ehci_readl(ehci, status_reg); + goto error; } - } + temp = ehci_readl(ehci, status_reg); /* whoever resets must GetPortStatus to complete it!! */ - if ((temp & PORT_RESET) - && time_after_eq(jiffies, - ehci->reset_done[wIndex])) { + } else { status |= USB_PORT_STAT_C_RESET << 16; ehci->reset_done [wIndex] = 0; - clear_bit(wIndex, &ehci->resuming_ports); /* force reset to complete */ ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET), @@ -931,11 +1065,6 @@ static int ehci_hub_control ( ehci_readl(ehci, status_reg)); } - if (!(temp & (PORT_RESUME|PORT_RESET))) { - ehci->reset_done[wIndex] = 0; - clear_bit(wIndex, &ehci->resuming_ports); - } - /* transfer dedicated ports to the companion hc */ if ((temp & PORT_CONNECT) && test_bit(wIndex, &ehci->companion_ports)) { @@ -1031,12 +1160,12 @@ static int ehci_hub_control ( /* After above check the port must be connected. * Set appropriate bit thus could put phy into low power - * mode if we have hostpc feature + * mode if we have tdi_phy_lpm feature */ temp &= ~PORT_WKCONN_E; temp |= PORT_WKDISC_E | PORT_WKOC_E; ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); - if (ehci->has_hostpc) { + if (ehci->has_tdi_phy_lpm) { spin_unlock_irqrestore(&ehci->lock, flags); msleep(5);/* 5ms for HCD enter low pwr mode */ spin_lock_irqsave(&ehci->lock, flags); @@ -1056,7 +1185,7 @@ static int ehci_hub_control ( status_reg); break; case USB_PORT_FEAT_RESET: - if (temp & PORT_RESUME) + if (temp & (PORT_SUSPEND|PORT_RESUME)) goto error; /* line status bits may report this as low speed, * which can be fine if this root hub has a @@ -1070,7 +1199,6 @@ static int ehci_hub_control ( wIndex + 1); temp |= PORT_OWNER; } else { - ehci_vdbg (ehci, "port %d reset\n", wIndex + 1); temp |= PORT_RESET; temp &= ~PORT_PE; @@ -1091,6 +1219,15 @@ static int ehci_hub_control ( * about the EHCI-specific stuff. */ case USB_PORT_FEAT_TEST: +#ifdef CONFIG_USB_HCD_TEST_MODE + if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) { + spin_unlock_irqrestore(&ehci->lock, flags); + retval = ehset_single_step_set_feature(hcd, + wIndex); + spin_lock_irqsave(&ehci->lock, flags); + break; + } +#endif if (!selector || selector > 5) goto error; spin_unlock_irqrestore(&ehci->lock, flags); diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index ef2c3a1eca4..52a77734a22 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -93,6 +93,7 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) qh->qh_dma = dma; // INIT_LIST_HEAD (&qh->qh_list); INIT_LIST_HEAD (&qh->qtd_list); + INIT_LIST_HEAD(&qh->unlink_node); /* dummy td enables safe urb queuing */ qh->dummy = ehci_qtd_alloc (ehci, flags); diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index 915c2db96dc..417c10da945 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations @@ -131,7 +131,7 @@ static const struct hc_driver mv_ehci_hc_driver = { static int mv_ehci_probe(struct platform_device *pdev) { - struct mv_usb_platform_data *pdata = pdev->dev.platform_data; + struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); struct usb_hcd *hcd; struct ehci_hcd *ehci; struct ehci_hcd_mv *ehci_mv; diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index e4c34ac386c..0528dc4526c 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -49,7 +49,7 @@ static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = { static int ehci_mxc_drv_probe(struct platform_device *pdev) { - struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; + struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev); struct usb_hcd *hcd; struct resource *res; int irq, ret; @@ -174,7 +174,7 @@ err_alloc: static int ehci_mxc_drv_remove(struct platform_device *pdev) { - struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; + struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev); struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv; @@ -184,7 +184,7 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev) if (pdata && pdata->exit) pdata->exit(pdev); - if (pdata->otg) + if (pdata && pdata->otg) usb_phy_shutdown(pdata->otg); clk_disable_unprepare(priv->usbclk); @@ -197,20 +197,12 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev) return 0; } -static void ehci_mxc_drv_shutdown(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - MODULE_ALIAS("platform:mxc-ehci"); static struct platform_driver ehci_mxc_driver = { .probe = ehci_mxc_drv_probe, .remove = ehci_mxc_drv_remove, - .shutdown = ehci_mxc_drv_shutdown, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "mxc-ehci", }, diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c index 45cc0015841..ab0397e4d8f 100644 --- a/drivers/usb/host/ehci-octeon.c +++ b/drivers/usb/host/ehci-octeon.c @@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 9bd7dfe3315..78b01fa475b 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -100,7 +100,7 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = { static int ehci_hcd_omap_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct usbhs_omap_platform_data *pdata = dev->platform_data; + struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev); struct resource *res; struct usb_hcd *hcd; void __iomem *regs; @@ -119,7 +119,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) /* For DT boot, get platform data from parent. i.e. usbhshost */ if (dev->of_node) { - pdata = dev->parent->platform_data; + pdata = dev_get_platdata(dev->parent); dev->platform_data = pdata; } @@ -278,14 +278,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) return 0; } -static void ehci_hcd_omap_shutdown(struct platform_device *pdev) -{ - struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - static const struct of_device_id omap_ehci_dt_ids[] = { { .compatible = "ti,ehci-omap" }, { } @@ -296,7 +288,7 @@ MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids); static struct platform_driver ehci_hcd_omap_driver = { .probe = ehci_hcd_omap_probe, .remove = ehci_hcd_omap_remove, - .shutdown = ehci_hcd_omap_shutdown, + .shutdown = usb_hcd_platform_shutdown, /*.suspend = ehci_hcd_omap_suspend, */ /*.resume = ehci_hcd_omap_resume, */ .driver = { diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 1a450aa13eb..d1dfb9db5b4 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -139,7 +139,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd, static int ehci_orion_drv_probe(struct platform_device *pdev) { - struct orion_ehci_data *pd = pdev->dev.platform_data; + struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev); const struct mbus_dram_target_info *dram; struct resource *res; struct usb_hcd *hcd; diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 595d210655b..6bd299e61f5 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -315,53 +315,11 @@ done: * Also they depend on separate root hub suspend/resume. */ -static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev) -{ - return pdev->class == PCI_CLASS_SERIAL_USB_EHCI && - pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == 0x1E26 || - pdev->device == 0x8C2D || - pdev->device == 0x8C26 || - pdev->device == 0x9C26); -} - -static void ehci_enable_xhci_companion(void) -{ - struct pci_dev *companion = NULL; - - /* The xHCI and EHCI controllers are not on the same PCI slot */ - for_each_pci_dev(companion) { - if (!usb_is_intel_switchable_xhci(companion)) - continue; - usb_enable_xhci_ports(companion); - return; - } -} - static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - /* The BIOS on systems with the Intel Panther Point chipset may or may - * not support xHCI natively. That means that during system resume, it - * may switch the ports back to EHCI so that users can use their - * keyboard to select a kernel from GRUB after resume from hibernate. - * - * The BIOS is supposed to remember whether the OS had xHCI ports - * enabled before resume, and switch the ports back to xHCI when the - * BIOS/OS semaphore is written, but we all know we can't trust BIOS - * writers. - * - * Unconditionally switch the ports back to xHCI after a system resume. - * We can't tell whether the EHCI or xHCI controller will be resumed - * first, so we have to do the port switchover in both drivers. Writing - * a '1' to the port switchover registers should have no effect if the - * port was already switched over. - */ - if (usb_is_intel_switchable_ehci(pdev)) - ehci_enable_xhci_companion(); - if (ehci_resume(hcd, hibernated) != 0) (void) ehci_pci_reinit(ehci, pdev); return 0; diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 5196d728517..f6b790ca8cf 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -39,7 +39,7 @@ static const char hcd_name[] = "ehci-platform"; static int ehci_platform_reset(struct usb_hcd *hcd) { struct platform_device *pdev = to_platform_device(hcd->self.controller); - struct usb_ehci_pdata *pdata = pdev->dev.platform_data; + struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; @@ -87,14 +87,14 @@ static int ehci_platform_probe(struct platform_device *dev) * use reasonable defaults so platforms don't have to provide these. * with DT probing on ARM, none of these are set. */ - if (!dev->dev.platform_data) + if (!dev_get_platdata(&dev->dev)) dev->dev.platform_data = &ehci_platform_defaults; if (!dev->dev.dma_mask) dev->dev.dma_mask = &dev->dev.coherent_dma_mask; if (!dev->dev.coherent_dma_mask) dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - pdata = dev->dev.platform_data; + pdata = dev_get_platdata(&dev->dev); irq = platform_get_irq(dev, 0); if (irq < 0) { @@ -148,7 +148,7 @@ err_power: static int ehci_platform_remove(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); - struct usb_ehci_pdata *pdata = dev->dev.platform_data; + struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev); usb_remove_hcd(hcd); usb_put_hcd(hcd); @@ -167,7 +167,7 @@ static int ehci_platform_remove(struct platform_device *dev) static int ehci_platform_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct usb_ehci_pdata *pdata = dev->platform_data; + struct usb_ehci_pdata *pdata = dev_get_platdata(dev); struct platform_device *pdev = container_of(dev, struct platform_device, dev); bool do_wakeup = device_may_wakeup(dev); @@ -184,7 +184,7 @@ static int ehci_platform_suspend(struct device *dev) static int ehci_platform_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct usb_ehci_pdata *pdata = dev->platform_data; + struct usb_ehci_pdata *pdata = dev_get_platdata(dev); struct platform_device *pdev = container_of(dev, struct platform_device, dev); diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c index 601e208bd78..893b707f000 100644 --- a/drivers/usb/host/ehci-pmcmsp.c +++ b/drivers/usb/host/ehci-pmcmsp.c @@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = { #else .irq = ehci_irq, #endif - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 86da09c0f8d..6cc5567bf9c 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -28,7 +28,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations @@ -215,15 +215,6 @@ static int ehci_hcd_ppc_of_remove(struct platform_device *op) } -static void ehci_hcd_ppc_of_shutdown(struct platform_device *op) -{ - struct usb_hcd *hcd = platform_get_drvdata(op); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - - static const struct of_device_id ehci_hcd_ppc_of_match[] = { { .compatible = "usb-ehci", @@ -236,7 +227,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match); static struct platform_driver ehci_hcd_ppc_of_driver = { .probe = ehci_hcd_ppc_of_probe, .remove = ehci_hcd_ppc_of_remove, - .shutdown = ehci_hcd_ppc_of_shutdown, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ppc-of-ehci", .owner = THIS_MODULE, diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c index fd983771b02..8188542ba17 100644 --- a/drivers/usb/host/ehci-ps3.c +++ b/drivers/usb/host/ehci-ps3.c @@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = { .product_desc = "PS3 EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, .reset = ps3_ehci_hc_reset, .start = ehci_run, .stop = ehci_stop, diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index d34b399b78e..e321804c347 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -168,13 +168,13 @@ static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, * Note: this routine is never called for Isochronous transfers. */ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) struct usb_device *tt = urb->dev->tt->hub; dev_dbg(&tt->dev, "clear tt buffer port %d, a%d ep%d t%08x\n", urb->dev->ttport, urb->dev->devnum, usb_pipeendpoint(urb->pipe), token); -#endif /* DEBUG */ +#endif /* DEBUG || CONFIG_DYNAMIC_DEBUG */ if (!ehci_is_TDI(ehci) || urb->dev->tt->hub != ehci_to_hcd(ehci)->self.root_hub) { @@ -240,13 +240,6 @@ static int qtd_copy_status ( } else { /* unknown */ status = -EPROTO; } - - ehci_vdbg (ehci, - "dev%d ep%d%s qtd token %08x --> status %d\n", - usb_pipedevice (urb->pipe), - usb_pipeendpoint (urb->pipe), - usb_pipein (urb->pipe) ? "in" : "out", - token, status); } return status; @@ -254,8 +247,6 @@ static int qtd_copy_status ( static void ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) -__releases(ehci->lock) -__acquires(ehci->lock) { if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { /* ... update hc-wide periodic stats */ @@ -281,11 +272,8 @@ __acquires(ehci->lock) urb->actual_length, urb->transfer_buffer_length); #endif - /* complete() can reenter this HCD */ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); - spin_unlock (&ehci->lock); usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); - spin_lock (&ehci->lock); } static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); @@ -1144,6 +1132,109 @@ submit_async ( } /*-------------------------------------------------------------------------*/ +#ifdef CONFIG_USB_HCD_TEST_MODE +/* + * This function creates the qtds and submits them for the + * SINGLE_STEP_SET_FEATURE Test. + * This is done in two parts: first SETUP req for GetDesc is sent then + * 15 seconds later, the IN stage for GetDesc starts to req data from dev + * + * is_setup : i/p arguement decides which of the two stage needs to be + * performed; TRUE - SETUP and FALSE - IN+STATUS + * Returns 0 if success + */ +static int submit_single_step_set_feature( + struct usb_hcd *hcd, + struct urb *urb, + int is_setup +) { + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct list_head qtd_list; + struct list_head *head; + + struct ehci_qtd *qtd, *qtd_prev; + dma_addr_t buf; + int len, maxpacket; + u32 token; + + INIT_LIST_HEAD(&qtd_list); + head = &qtd_list; + + /* URBs map to sequences of QTDs: one logical transaction */ + qtd = ehci_qtd_alloc(ehci, GFP_KERNEL); + if (unlikely(!qtd)) + return -1; + list_add_tail(&qtd->qtd_list, head); + qtd->urb = urb; + + token = QTD_STS_ACTIVE; + token |= (EHCI_TUNE_CERR << 10); + + len = urb->transfer_buffer_length; + /* + * Check if the request is to perform just the SETUP stage (getDesc) + * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens + * 15 secs after the setup + */ + if (is_setup) { + /* SETUP pid */ + qtd_fill(ehci, qtd, urb->setup_dma, + sizeof(struct usb_ctrlrequest), + token | (2 /* "setup" */ << 8), 8); + + submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); + return 0; /*Return now; we shall come back after 15 seconds*/ + } + + /* + * IN: data transfer stage: buffer setup : start the IN txn phase for + * the get_Desc SETUP which was sent 15seconds back + */ + token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/ + buf = urb->transfer_dma; + + token |= (1 /* "in" */ << 8); /*This is IN stage*/ + + maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0)); + + qtd_fill(ehci, qtd, buf, len, token, maxpacket); + + /* + * Our IN phase shall always be a short read; so keep the queue running + * and let it advance to the next qtd which zero length OUT status + */ + qtd->hw_alt_next = EHCI_LIST_END(ehci); + + /* STATUS stage for GetDesc control request */ + token ^= 0x0100; /* "in" <--> "out" */ + token |= QTD_TOGGLE; /* force DATA1 */ + + qtd_prev = qtd; + qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC); + if (unlikely(!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail(&qtd->qtd_list, head); + + /* dont fill any data in such packets */ + qtd_fill(ehci, qtd, 0, 0, token, 0); + + /* by default, enable interrupt on urb completion */ + if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) + qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); + + submit_async(ehci, urb, &qtd_list, GFP_KERNEL); + + return 0; + +cleanup: + qtd_list_free(ehci, urb, head); + return -1; +} +#endif /* CONFIG_USB_HCD_TEST_MODE */ + +/*-------------------------------------------------------------------------*/ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) { diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c index 7cc26e621aa..7c3de95c705 100644 --- a/drivers/usb/host/ehci-s5p.c +++ b/drivers/usb/host/ehci-s5p.c @@ -75,7 +75,7 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev) static int s5p_ehci_probe(struct platform_device *pdev) { - struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; + struct s5p_ehci_platdata *pdata = dev_get_platdata(&pdev->dev); struct s5p_ehci_hcd *s5p_ehci; struct usb_hcd *hcd; struct ehci_hcd *ehci; @@ -220,14 +220,6 @@ static int s5p_ehci_remove(struct platform_device *pdev) return 0; } -static void s5p_ehci_shutdown(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - #ifdef CONFIG_PM static int s5p_ehci_suspend(struct device *dev) { @@ -297,7 +289,7 @@ MODULE_DEVICE_TABLE(of, exynos_ehci_match); static struct platform_driver s5p_ehci_driver = { .probe = s5p_ehci_probe, .remove = s5p_ehci_remove, - .shutdown = s5p_ehci_shutdown, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "s5p-ehci", .owner = THIS_MODULE, diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index f80d0330d54..85dd24ed97a 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -169,7 +169,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) break; } } -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) if (usecs > ehci->uframe_periodic_max) ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", frame * 8 + uframe, usecs); @@ -327,17 +327,8 @@ static int tt_available ( periodic_tt_usecs (ehci, dev, frame, tt_usecs); - ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in" - " schedule %d/%d/%d/%d/%d/%d/%d/%d\n", - frame, usecs, uframe, - tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3], - tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]); - - if (max_tt_usecs[uframe] <= tt_usecs[uframe]) { - ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n", - frame, uframe); + if (max_tt_usecs[uframe] <= tt_usecs[uframe]) return 0; - } /* special case for isoc transfers larger than 125us: * the first and each subsequent fully used uframe @@ -348,13 +339,8 @@ static int tt_available ( int ufs = (usecs / 125); int i; for (i = uframe; i < (uframe + ufs) && i < 8; i++) - if (0 < tt_usecs[i]) { - ehci_vdbg(ehci, - "multi-uframe xfer can't fit " - "in frame %d uframe %d\n", - frame, i); + if (0 < tt_usecs[i]) return 0; - } } tt_usecs[uframe] += usecs; @@ -362,12 +348,8 @@ static int tt_available ( carryover_tt_bandwidth(tt_usecs); /* fail if the carryover pushed bw past the last uframe's limit */ - if (max_tt_usecs[7] < tt_usecs[7]) { - ehci_vdbg(ehci, - "tt unavailable usecs %d frame %d uframe %d\n", - usecs, frame, uframe); + if (max_tt_usecs[7] < tt_usecs[7]) return 0; - } } return 1; @@ -601,12 +583,29 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) list_del(&qh->intr_node); } +static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + if (qh->qh_state != QH_STATE_LINKED || + list_empty(&qh->unlink_node)) + return; + + list_del_init(&qh->unlink_node); + + /* + * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for + * avoiding unnecessary CPU wakeup + */ +} + static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) { /* If the QH isn't linked then there's nothing we can do. */ if (qh->qh_state != QH_STATE_LINKED) return; + /* if the qh is waiting for unlink, cancel it now */ + cancel_unlink_wait_intr(ehci, qh); + qh_unlink_periodic (ehci, qh); /* Make sure the unlinks are visible before starting the timer */ @@ -632,6 +631,27 @@ static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) } } +/* + * It is common only one intr URB is scheduled on one qh, and + * given complete() is run in tasklet context, introduce a bit + * delay to avoid unlink qh too early. + */ +static void start_unlink_intr_wait(struct ehci_hcd *ehci, + struct ehci_qh *qh) +{ + qh->unlink_cycle = ehci->intr_unlink_wait_cycle; + + /* New entries go at the end of the intr_unlink_wait list */ + list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait); + + if (ehci->rh_state < EHCI_RH_RUNNING) + ehci_handle_start_intr_unlinks(ehci); + else if (ehci->intr_unlink_wait.next == &qh->unlink_node) { + ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true); + ++ehci->intr_unlink_wait_cycle; + } +} + static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) { struct ehci_qh_hw *hw = qh->hw; @@ -889,6 +909,9 @@ static int intr_submit ( if (qh->qh_state == QH_STATE_IDLE) { qh_refresh(ehci, qh); qh_link_periodic(ehci, qh); + } else { + /* cancel unlink wait for the qh */ + cancel_unlink_wait_intr(ehci, qh); } /* ... update usbfs periodic stats */ @@ -924,9 +947,11 @@ static void scan_intr(struct ehci_hcd *ehci) * in qh_unlink_periodic(). */ temp = qh_completions(ehci, qh); - if (unlikely(temp || (list_empty(&qh->qtd_list) && - qh->qh_state == QH_STATE_LINKED))) + if (unlikely(temp)) start_unlink_intr(ehci, qh); + else if (unlikely(list_empty(&qh->qtd_list) && + qh->qh_state == QH_STATE_LINKED)) + start_unlink_intr_wait(ehci, qh); } } } @@ -1391,21 +1416,20 @@ iso_stream_schedule ( /* Behind the scheduling threshold? */ if (unlikely(start < next)) { + unsigned now2 = (now - base) & (mod - 1); /* USB_ISO_ASAP: Round up to the first available slot */ if (urb->transfer_flags & URB_ISO_ASAP) start += (next - start + period - 1) & -period; /* - * Not ASAP: Use the next slot in the stream. If - * the entire URB falls before the threshold, fail. + * Not ASAP: Use the next slot in the stream, + * no matter what. */ - else if (start + span - period < next) { - ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", + else if (start + span - period < now2) { + ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", urb, start + base, - span - period, next + base); - status = -EXDEV; - goto fail; + span - period, now2 + base); } } @@ -1574,16 +1598,9 @@ static void itd_link_urb( next_uframe = stream->next_uframe & (mod - 1); - if (unlikely (list_empty(&stream->td_list))) { + if (unlikely (list_empty(&stream->td_list))) ehci_to_hcd(ehci)->self.bandwidth_allocated += stream->bandwidth; - ehci_vdbg (ehci, - "schedule devp %s ep%d%s-iso period %d start %d.%d\n", - urb->dev->devpath, stream->bEndpointAddress & 0x0f, - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", - urb->interval, - next_uframe >> 3, next_uframe & 0x7); - } if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { if (ehci->amd_pll_fix == 1) @@ -1718,14 +1735,9 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd) usb_amd_quirk_pll_enable(); } - if (unlikely(list_is_singular(&stream->td_list))) { + if (unlikely(list_is_singular(&stream->td_list))) ehci_to_hcd(ehci)->self.bandwidth_allocated -= stream->bandwidth; - ehci_vdbg (ehci, - "deschedule devp %s ep%d%s-iso\n", - dev->devpath, stream->bEndpointAddress & 0x0f, - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); - } done: itd->urb = NULL; @@ -1983,17 +1995,10 @@ static void sitd_link_urb( next_uframe = stream->next_uframe; - if (list_empty(&stream->td_list)) { + if (list_empty(&stream->td_list)) /* usbfs ignores TT bandwidth */ ehci_to_hcd(ehci)->self.bandwidth_allocated += stream->bandwidth; - ehci_vdbg (ehci, - "sched devp %s ep%d%s-iso [%d] %dms/%04x\n", - urb->dev->devpath, stream->bEndpointAddress & 0x0f, - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", - (next_uframe >> 3) & (ehci->periodic_size - 1), - stream->interval, hc32_to_cpu(ehci, stream->splits)); - } if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { if (ehci->amd_pll_fix == 1) @@ -2107,14 +2112,9 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd) usb_amd_quirk_pll_enable(); } - if (list_is_singular(&stream->td_list)) { + if (list_is_singular(&stream->td_list)) ehci_to_hcd(ehci)->self.bandwidth_allocated -= stream->bandwidth; - ehci_vdbg (ehci, - "deschedule devp %s ep%d%s-iso\n", - dev->devpath, stream->bEndpointAddress & 0x0f, - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); - } done: sitd->urb = NULL; diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c index b2de52d3961..8a734498079 100644 --- a/drivers/usb/host/ehci-sead3.c +++ b/drivers/usb/host/ehci-sead3.c @@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index c4c0ee92a39..dc899eb2b86 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c @@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_USB2 | HCD_MEMORY, + .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, /* * basic lifecycle operations @@ -104,7 +104,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) goto fail_create_hcd; } - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); /* initialize hcd */ hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 6ee7ef79b4f..78fa76da332 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -25,9 +25,9 @@ #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> -#include <linux/platform_data/tegra_usb.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/usb/ehci_def.h> @@ -51,6 +51,10 @@ static struct hc_driver __read_mostly tegra_ehci_hc_driver; +struct tegra_ehci_soc_config { + bool has_hostpc; +}; + static int (*orig_hub_control)(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength); @@ -58,7 +62,6 @@ static int (*orig_hub_control)(struct usb_hcd *hcd, struct tegra_ehci_hcd { struct tegra_usb_phy *phy; struct clk *clk; - struct usb_phy *transceiver; int port_resuming; bool needs_double_reset; enum tegra_usb_phy_port_speed port_speed; @@ -322,50 +325,38 @@ static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) free_dma_aligned_buffer(urb); } -static int setup_vbus_gpio(struct platform_device *pdev, - struct tegra_ehci_platform_data *pdata) -{ - int err = 0; - int gpio; - - gpio = pdata->vbus_gpio; - if (!gpio_is_valid(gpio)) - gpio = of_get_named_gpio(pdev->dev.of_node, - "nvidia,vbus-gpio", 0); - if (!gpio_is_valid(gpio)) - return 0; +static const struct tegra_ehci_soc_config tegra30_soc_config = { + .has_hostpc = true, +}; - err = gpio_request(gpio, "vbus_gpio"); - if (err) { - dev_err(&pdev->dev, "can't request vbus gpio %d", gpio); - return err; - } - err = gpio_direction_output(gpio, 1); - if (err) { - dev_err(&pdev->dev, "can't enable vbus\n"); - return err; - } +static const struct tegra_ehci_soc_config tegra20_soc_config = { + .has_hostpc = false, +}; - return err; -} +static struct of_device_id tegra_ehci_of_match[] = { + { .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config }, + { .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config }, + { }, +}; static int tegra_ehci_probe(struct platform_device *pdev) { + const struct of_device_id *match; + const struct tegra_ehci_soc_config *soc_config; struct resource *res; struct usb_hcd *hcd; struct ehci_hcd *ehci; struct tegra_ehci_hcd *tegra; - struct tegra_ehci_platform_data *pdata; int err = 0; int irq; - struct device_node *np_phy; struct usb_phy *u_phy; - pdata = pdev->dev.platform_data; - if (!pdata) { - dev_err(&pdev->dev, "Platform data missing\n"); - return -EINVAL; + match = of_match_device(tegra_ehci_of_match, &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "Error: No device match found\n"); + return -ENODEV; } + soc_config = match->data; /* Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. @@ -376,14 +367,11 @@ static int tegra_ehci_probe(struct platform_device *pdev) if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - setup_vbus_gpio(pdev, pdata); - hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); - err = -ENOMEM; - goto cleanup_vbus_gpio; + return -ENOMEM; } platform_set_drvdata(pdev, hcd); ehci = hcd_to_ehci(hcd); @@ -406,13 +394,7 @@ static int tegra_ehci_probe(struct platform_device *pdev) udelay(1); tegra_periph_reset_deassert(tegra->clk); - np_phy = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); - if (!np_phy) { - err = -ENODEV; - goto cleanup_clk_en; - } - - u_phy = tegra_usb_get_phy(np_phy); + u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0); if (IS_ERR(u_phy)) { err = PTR_ERR(u_phy); goto cleanup_clk_en; @@ -437,6 +419,7 @@ static int tegra_ehci_probe(struct platform_device *pdev) goto cleanup_clk_en; } ehci->caps = hcd->regs + 0x100; + ehci->has_hostpc = soc_config->has_hostpc; err = usb_phy_init(hcd->phy); if (err) { @@ -466,26 +449,18 @@ static int tegra_ehci_probe(struct platform_device *pdev) goto cleanup_phy; } - if (pdata->operating_mode == TEGRA_USB_OTG) { - tegra->transceiver = - devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); - if (!IS_ERR(tegra->transceiver)) - otg_set_host(tegra->transceiver->otg, &hcd->self); - } else { - tegra->transceiver = ERR_PTR(-ENODEV); - } + otg_set_host(u_phy->otg, &hcd->self); err = usb_add_hcd(hcd, irq, IRQF_SHARED); if (err) { dev_err(&pdev->dev, "Failed to add USB HCD\n"); - goto cleanup_transceiver; + goto cleanup_otg_set_host; } return err; -cleanup_transceiver: - if (!IS_ERR(tegra->transceiver)) - otg_set_host(tegra->transceiver->otg, NULL); +cleanup_otg_set_host: + otg_set_host(u_phy->otg, NULL); cleanup_phy: usb_phy_shutdown(hcd->phy); cleanup_clk_en: @@ -494,8 +469,6 @@ cleanup_clk_get: clk_put(tegra->clk); cleanup_hcd_create: usb_put_hcd(hcd); -cleanup_vbus_gpio: - /* FIXME: Undo setup_vbus_gpio() here */ return err; } @@ -505,8 +478,7 @@ static int tegra_ehci_remove(struct platform_device *pdev) struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; - if (!IS_ERR(tegra->transceiver)) - otg_set_host(tegra->transceiver->otg, NULL); + otg_set_host(hcd->phy->otg, NULL); usb_phy_shutdown(hcd->phy); usb_remove_hcd(hcd); @@ -525,11 +497,6 @@ static void tegra_ehci_hcd_shutdown(struct platform_device *pdev) hcd->driver->shutdown(hcd); } -static struct of_device_id tegra_ehci_of_match[] = { - { .compatible = "nvidia,tegra20-ehci", }, - { }, -}; - static struct platform_driver tegra_ehci_driver = { .probe = tegra_ehci_probe, .remove = tegra_ehci_remove, diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c index d72b2929c03..67026ffbf9a 100644 --- a/drivers/usb/host/ehci-tilegx.c +++ b/drivers/usb/host/ehci-tilegx.c @@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = { * Generic hardware linkage. */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * Basic lifecycle operations. @@ -101,7 +101,7 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct ehci_hcd *ehci; - struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data; + struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); pte_t pte = { 0 }; int my_cpu = smp_processor_id(); int ret; @@ -186,7 +186,7 @@ err_hcd: static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); - struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data; + struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); usb_remove_hcd(hcd); usb_put_hcd(hcd); diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 11e5b32f73e..424ac5d8371 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c @@ -72,6 +72,7 @@ static unsigned event_delays_ns[] = { 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */ 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */ 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */ + 5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */ 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */ @@ -215,6 +216,36 @@ static void ehci_handle_controller_death(struct ehci_hcd *ehci) /* Not in process context, so don't try to reset the controller */ } +/* start to unlink interrupt QHs */ +static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci) +{ + bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); + + /* + * Process all the QHs on the intr_unlink list that were added + * before the current unlink cycle began. The list is in + * temporal order, so stop when we reach the first entry in the + * current cycle. But if the root hub isn't running then + * process all the QHs on the list. + */ + while (!list_empty(&ehci->intr_unlink_wait)) { + struct ehci_qh *qh; + + qh = list_first_entry(&ehci->intr_unlink_wait, + struct ehci_qh, unlink_node); + if (!stopped && (qh->unlink_cycle == + ehci->intr_unlink_wait_cycle)) + break; + list_del_init(&qh->unlink_node); + start_unlink_intr(ehci, qh); + } + + /* Handle remaining entries later */ + if (!list_empty(&ehci->intr_unlink_wait)) { + ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true); + ++ehci->intr_unlink_wait_cycle; + } +} /* Handle unlinked interrupt QHs once they are gone from the hardware */ static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci) @@ -236,7 +267,7 @@ static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci) unlink_node); if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle) break; - list_del(&qh->unlink_node); + list_del_init(&qh->unlink_node); end_unlink_intr(ehci, qh); } @@ -363,6 +394,7 @@ static void (*event_handlers[])(struct ehci_hcd *) = { ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */ ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */ end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */ + ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */ unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */ ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */ ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */ diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c index 59e0e24c753..1c370dfbee0 100644 --- a/drivers/usb/host/ehci-w90x900.c +++ b/drivers/usb/host/ehci-w90x900.c @@ -108,7 +108,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_USB2|HCD_MEMORY, + .flags = HCD_USB2|HCD_MEMORY|HCD_BH, /* * basic lifecycle operations diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index 35c7f90384a..95979f9f438 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = { * generic hardware linkage */ .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations @@ -220,21 +220,6 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op) return 0; } -/** - * ehci_hcd_xilinx_of_shutdown - shutdown the hcd - * @op: pointer to platform_device structure that is to be removed - * - * Properly shutdown the hcd, call driver's shutdown routine. - */ -static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op) -{ - struct usb_hcd *hcd = platform_get_drvdata(op); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - - static const struct of_device_id ehci_hcd_xilinx_of_match[] = { {.compatible = "xlnx,xps-usb-host-1.00.a",}, {}, @@ -244,7 +229,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); static struct platform_driver ehci_hcd_xilinx_of_driver = { .probe = ehci_hcd_xilinx_of_probe, .remove = ehci_hcd_xilinx_of_remove, - .shutdown = ehci_hcd_xilinx_of_shutdown, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xilinx-of-ehci", .owner = THIS_MODULE, diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 64f9a08e959..291db7d09f2 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -38,7 +38,7 @@ typedef __u16 __bitwise __hc16; #endif /* statistics can be kept for tuning/monitoring */ -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) #define EHCI_STATS #endif @@ -88,6 +88,7 @@ enum ehci_hrtimer_event { EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */ EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */ EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */ + EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */ EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */ EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */ EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ @@ -143,7 +144,9 @@ struct ehci_hcd { /* one per controller */ unsigned i_thresh; /* uframes HC might cache */ union ehci_shadow *pshadow; /* mirror hw periodic table */ + struct list_head intr_unlink_wait; struct list_head intr_unlink; + unsigned intr_unlink_wait_cycle; unsigned intr_unlink_cycle; unsigned now_frame; /* frame from HC hardware */ unsigned last_iso_frame; /* last frame scanned for iso */ @@ -210,6 +213,7 @@ struct ehci_hcd { /* one per controller */ #define OHCI_HCCTRL_LEN 0x4 __hc32 *ohci_hcctrl_reg; unsigned has_hostpc:1; + unsigned has_tdi_phy_lpm:1; unsigned has_ppcd:1; /* support per-port change bits */ u8 sbrn; /* packed release number */ @@ -222,7 +226,7 @@ struct ehci_hcd { /* one per controller */ #endif /* debug files */ -#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) struct dentry *debug_dir; #endif @@ -778,15 +782,10 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x) #define ehci_warn(ehci, fmt, args...) \ dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args) -#ifdef VERBOSE_DEBUG -# define ehci_vdbg ehci_dbg -#else - static inline void ehci_vdbg(struct ehci_hcd *ehci, ...) {} -#endif -#ifndef DEBUG +#if !defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) #define STUB_DEBUG_FILES -#endif /* DEBUG */ +#endif /* !DEBUG && !CONFIG_DYNAMIC_DEBUG */ /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c new file mode 100644 index 00000000000..fce13bcc4a3 --- /dev/null +++ b/drivers/usb/host/fotg210-hcd.c @@ -0,0 +1,6049 @@ +/* + * Faraday FOTG210 EHCI-like driver + * + * Copyright (c) 2013 Faraday Technology Corporation + * + * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com> + * Feng-Hsin Chiang <john453@faraday-tech.com> + * Po-Yu Chuang <ratbert.chuang@gmail.com> + * + * Most of code borrowed from the Linux-3.7 EHCI driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/module.h> +#include <linux/device.h> +#include <linux/dmapool.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/ioport.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/hrtimer.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/usb.h> +#include <linux/usb/hcd.h> +#include <linux/moduleparam.h> +#include <linux/dma-mapping.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/platform_device.h> +#include <linux/io.h> + +#include <asm/byteorder.h> +#include <asm/irq.h> +#include <asm/unaligned.h> + +/*-------------------------------------------------------------------------*/ +#define DRIVER_AUTHOR "Yuan-Hsin Chen" +#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver" + +static const char hcd_name[] = "fotg210_hcd"; + +#undef VERBOSE_DEBUG +#undef FOTG210_URB_TRACE + +#ifdef DEBUG +#define FOTG210_STATS +#endif + +/* magic numbers that can affect system performance */ +#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ +#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ +#define FOTG210_TUNE_RL_TT 0 +#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ +#define FOTG210_TUNE_MULT_TT 1 +/* + * Some drivers think it's safe to schedule isochronous transfers more than + * 256 ms into the future (partly as a result of an old bug in the scheduling + * code). In an attempt to avoid trouble, we will use a minimum scheduling + * length of 512 frames instead of 256. + */ +#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */ + +/* Initial IRQ latency: faster than hw default */ +static int log2_irq_thresh; /* 0 to 6 */ +module_param(log2_irq_thresh, int, S_IRUGO); +MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); + +/* initial park setting: slower than hw default */ +static unsigned park; +module_param(park, uint, S_IRUGO); +MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets"); + +/* for link power management(LPM) feature */ +static unsigned int hird; +module_param(hird, int, S_IRUGO); +MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us"); + +#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) + +#include "fotg210.h" + +/*-------------------------------------------------------------------------*/ + +#define fotg210_dbg(fotg210, fmt, args...) \ + dev_dbg(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) +#define fotg210_err(fotg210, fmt, args...) \ + dev_err(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) +#define fotg210_info(fotg210, fmt, args...) \ + dev_info(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) +#define fotg210_warn(fotg210, fmt, args...) \ + dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) + +#ifdef VERBOSE_DEBUG +# define fotg210_vdbg fotg210_dbg +#else + static inline void fotg210_vdbg(struct fotg210_hcd *fotg210, ...) {} +#endif + +#ifdef DEBUG + +/* check the values in the HCSPARAMS register + * (host controller _Structural_ parameters) + * see EHCI spec, Table 2-4 for each value + */ +static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label) +{ + u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params); + + fotg210_dbg(fotg210, + "%s hcs_params 0x%x ports=%d\n", + label, params, + HCS_N_PORTS(params) + ); +} +#else + +static inline void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label) {} + +#endif + +#ifdef DEBUG + +/* check the values in the HCCPARAMS register + * (host controller _Capability_ parameters) + * see EHCI Spec, Table 2-5 for each value + * */ +static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label) +{ + u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); + + fotg210_dbg(fotg210, + "%s hcc_params %04x uframes %s%s\n", + label, + params, + HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", + HCC_CANPARK(params) ? " park" : ""); +} +#else + +static inline void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label) {} + +#endif + +#ifdef DEBUG + +static void __maybe_unused +dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd) +{ + fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd, + hc32_to_cpup(fotg210, &qtd->hw_next), + hc32_to_cpup(fotg210, &qtd->hw_alt_next), + hc32_to_cpup(fotg210, &qtd->hw_token), + hc32_to_cpup(fotg210, &qtd->hw_buf[0])); + if (qtd->hw_buf[1]) + fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n", + hc32_to_cpup(fotg210, &qtd->hw_buf[1]), + hc32_to_cpup(fotg210, &qtd->hw_buf[2]), + hc32_to_cpup(fotg210, &qtd->hw_buf[3]), + hc32_to_cpup(fotg210, &qtd->hw_buf[4])); +} + +static void __maybe_unused +dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + struct fotg210_qh_hw *hw = qh->hw; + + fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, + qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); + dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next); +} + +static void __maybe_unused +dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd) +{ + fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", + label, itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next), + itd->urb); + fotg210_dbg(fotg210, + " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(fotg210, itd->hw_transaction[0]), + hc32_to_cpu(fotg210, itd->hw_transaction[1]), + hc32_to_cpu(fotg210, itd->hw_transaction[2]), + hc32_to_cpu(fotg210, itd->hw_transaction[3]), + hc32_to_cpu(fotg210, itd->hw_transaction[4]), + hc32_to_cpu(fotg210, itd->hw_transaction[5]), + hc32_to_cpu(fotg210, itd->hw_transaction[6]), + hc32_to_cpu(fotg210, itd->hw_transaction[7])); + fotg210_dbg(fotg210, + " buf: %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(fotg210, itd->hw_bufp[0]), + hc32_to_cpu(fotg210, itd->hw_bufp[1]), + hc32_to_cpu(fotg210, itd->hw_bufp[2]), + hc32_to_cpu(fotg210, itd->hw_bufp[3]), + hc32_to_cpu(fotg210, itd->hw_bufp[4]), + hc32_to_cpu(fotg210, itd->hw_bufp[5]), + hc32_to_cpu(fotg210, itd->hw_bufp[6])); + fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n", + itd->index[0], itd->index[1], itd->index[2], + itd->index[3], itd->index[4], itd->index[5], + itd->index[6], itd->index[7]); +} + +static int __maybe_unused +dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) +{ + return scnprintf(buf, len, + "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", + label, label[0] ? " " : "", status, + (status & STS_ASS) ? " Async" : "", + (status & STS_PSS) ? " Periodic" : "", + (status & STS_RECL) ? " Recl" : "", + (status & STS_HALT) ? " Halt" : "", + (status & STS_IAA) ? " IAA" : "", + (status & STS_FATAL) ? " FATAL" : "", + (status & STS_FLR) ? " FLR" : "", + (status & STS_PCD) ? " PCD" : "", + (status & STS_ERR) ? " ERR" : "", + (status & STS_INT) ? " INT" : "" + ); +} + +static int __maybe_unused +dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) +{ + return scnprintf(buf, len, + "%s%sintrenable %02x%s%s%s%s%s%s", + label, label[0] ? " " : "", enable, + (enable & STS_IAA) ? " IAA" : "", + (enable & STS_FATAL) ? " FATAL" : "", + (enable & STS_FLR) ? " FLR" : "", + (enable & STS_PCD) ? " PCD" : "", + (enable & STS_ERR) ? " ERR" : "", + (enable & STS_INT) ? " INT" : "" + ); +} + +static const char *const fls_strings[] = { "1024", "512", "256", "??" }; + +static int +dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) +{ + return scnprintf(buf, len, + "%s%scommand %07x %s=%d ithresh=%d%s%s%s " + "period=%s%s %s", + label, label[0] ? " " : "", command, + (command & CMD_PARK) ? " park" : "(park)", + CMD_PARK_CNT(command), + (command >> 16) & 0x3f, + (command & CMD_IAAD) ? " IAAD" : "", + (command & CMD_ASE) ? " Async" : "", + (command & CMD_PSE) ? " Periodic" : "", + fls_strings[(command >> 2) & 0x3], + (command & CMD_RESET) ? " Reset" : "", + (command & CMD_RUN) ? "RUN" : "HALT" + ); +} + +static int +dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) +{ + char *sig; + + /* signaling state */ + switch (status & (3 << 10)) { + case 0 << 10: + sig = "se0"; + break; + case 1 << 10: + sig = "k"; + break; /* low speed */ + case 2 << 10: + sig = "j"; + break; + default: + sig = "?"; + break; + } + + return scnprintf(buf, len, + "%s%sport:%d status %06x %d " + "sig=%s%s%s%s%s%s%s%s", + label, label[0] ? " " : "", port, status, + status>>25,/*device address */ + sig, + (status & PORT_RESET) ? " RESET" : "", + (status & PORT_SUSPEND) ? " SUSPEND" : "", + (status & PORT_RESUME) ? " RESUME" : "", + (status & PORT_PEC) ? " PEC" : "", + (status & PORT_PE) ? " PE" : "", + (status & PORT_CSC) ? " CSC" : "", + (status & PORT_CONNECT) ? " CONNECT" : ""); +} + +#else +static inline void __maybe_unused +dbg_qh(char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{} + +static inline int __maybe_unused +dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) +{ return 0; } + +static inline int __maybe_unused +dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) +{ return 0; } + +static inline int __maybe_unused +dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) +{ return 0; } + +static inline int __maybe_unused +dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) +{ return 0; } + +#endif /* DEBUG */ + +/* functions have the "wrong" filename when they're output... */ +#define dbg_status(fotg210, label, status) { \ + char _buf[80]; \ + dbg_status_buf(_buf, sizeof(_buf), label, status); \ + fotg210_dbg(fotg210, "%s\n", _buf); \ +} + +#define dbg_cmd(fotg210, label, command) { \ + char _buf[80]; \ + dbg_command_buf(_buf, sizeof(_buf), label, command); \ + fotg210_dbg(fotg210, "%s\n", _buf); \ +} + +#define dbg_port(fotg210, label, port, status) { \ + char _buf[80]; \ + dbg_port_buf(_buf, sizeof(_buf), label, port, status); \ + fotg210_dbg(fotg210, "%s\n", _buf); \ +} + +/*-------------------------------------------------------------------------*/ + +#ifdef STUB_DEBUG_FILES + +static inline void create_debug_files(struct fotg210_hcd *bus) { } +static inline void remove_debug_files(struct fotg210_hcd *bus) { } + +#else + +/* troubleshooting help: expose state in debugfs */ + +static int debug_async_open(struct inode *, struct file *); +static int debug_periodic_open(struct inode *, struct file *); +static int debug_registers_open(struct inode *, struct file *); +static int debug_async_open(struct inode *, struct file *); + +static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*); +static int debug_close(struct inode *, struct file *); + +static const struct file_operations debug_async_fops = { + .owner = THIS_MODULE, + .open = debug_async_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; +static const struct file_operations debug_periodic_fops = { + .owner = THIS_MODULE, + .open = debug_periodic_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; +static const struct file_operations debug_registers_fops = { + .owner = THIS_MODULE, + .open = debug_registers_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; + +static struct dentry *fotg210_debug_root; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); /* fill method */ + struct usb_bus *bus; + struct mutex mutex; /* protect filling of buffer */ + size_t count; /* number of characters filled into buffer */ + char *output_buf; + size_t alloc_size; +}; + +#define speed_char(info1)({ char tmp; \ + switch (info1 & (3 << 12)) { \ + case QH_FULL_SPEED: \ + tmp = 'f'; break; \ + case QH_LOW_SPEED: \ + tmp = 'l'; break; \ + case QH_HIGH_SPEED: \ + tmp = 'h'; break; \ + default: \ + tmp = '?'; break; \ + }; tmp; }) + +static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token) +{ + __u32 v = hc32_to_cpu(fotg210, token); + + if (v & QTD_STS_ACTIVE) + return '*'; + if (v & QTD_STS_HALT) + return '-'; + if (!IS_SHORT_READ(v)) + return ' '; + /* tries to advance through hw_alt_next */ + return '/'; +} + +static void qh_lines( + struct fotg210_hcd *fotg210, + struct fotg210_qh *qh, + char **nextp, + unsigned *sizep +) +{ + u32 scratch; + u32 hw_curr; + struct fotg210_qtd *td; + unsigned temp; + unsigned size = *sizep; + char *next = *nextp; + char mark; + __le32 list_end = FOTG210_LIST_END(fotg210); + struct fotg210_qh_hw *hw = qh->hw; + + if (hw->hw_qtd_next == list_end) /* NEC does this */ + mark = '@'; + else + mark = token_mark(fotg210, hw->hw_token); + if (mark == '/') { /* qh_alt_next controls qh advance? */ + if ((hw->hw_alt_next & QTD_MASK(fotg210)) + == fotg210->async->hw->hw_alt_next) + mark = '#'; /* blocked */ + else if (hw->hw_alt_next == list_end) + mark = '.'; /* use hw_qtd_next */ + /* else alt_next points to some other qtd */ + } + scratch = hc32_to_cpup(fotg210, &hw->hw_info1); + hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0; + temp = scnprintf(next, size, + "qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)", + qh, scratch & 0x007f, + speed_char(scratch), + (scratch >> 8) & 0x000f, + scratch, hc32_to_cpup(fotg210, &hw->hw_info2), + hc32_to_cpup(fotg210, &hw->hw_token), mark, + (cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token) + ? "data1" : "data0", + (hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f); + size -= temp; + next += temp; + + /* hc may be modifying the list as we read it ... */ + list_for_each_entry(td, &qh->qtd_list, qtd_list) { + scratch = hc32_to_cpup(fotg210, &td->hw_token); + mark = ' '; + if (hw_curr == td->qtd_dma) + mark = '*'; + else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma)) + mark = '+'; + else if (QTD_LENGTH(scratch)) { + if (td->hw_alt_next == fotg210->async->hw->hw_alt_next) + mark = '#'; + else if (td->hw_alt_next != list_end) + mark = '/'; + } + temp = snprintf(next, size, + "\n\t%p%c%s len=%d %08x urb %p", + td, mark, ({ char *tmp; + switch ((scratch>>8)&0x03) { + case 0: + tmp = "out"; + break; + case 1: + tmp = "in"; + break; + case 2: + tmp = "setup"; + break; + default: + tmp = "?"; + break; + } tmp; }), + (scratch >> 16) & 0x7fff, + scratch, + td->urb); + if (size < temp) + temp = size; + size -= temp; + next += temp; + if (temp == size) + goto done; + } + + temp = snprintf(next, size, "\n"); + if (size < temp) + temp = size; + size -= temp; + next += temp; + +done: + *sizep = size; + *nextp = next; +} + +static ssize_t fill_async_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct fotg210_hcd *fotg210; + unsigned long flags; + unsigned temp, size; + char *next; + struct fotg210_qh *qh; + + hcd = bus_to_hcd(buf->bus); + fotg210 = hcd_to_fotg210(hcd); + next = buf->output_buf; + size = buf->alloc_size; + + *next = 0; + + /* dumps a snapshot of the async schedule. + * usually empty except for long-term bulk reads, or head. + * one QH per line, and TDs we know about + */ + spin_lock_irqsave(&fotg210->lock, flags); + for (qh = fotg210->async->qh_next.qh; size > 0 && qh; + qh = qh->qh_next.qh) + qh_lines(fotg210, qh, &next, &size); + if (fotg210->async_unlink && size > 0) { + temp = scnprintf(next, size, "\nunlink =\n"); + size -= temp; + next += temp; + + for (qh = fotg210->async_unlink; size > 0 && qh; + qh = qh->unlink_next) + qh_lines(fotg210, qh, &next, &size); + } + spin_unlock_irqrestore(&fotg210->lock, flags); + + return strlen(buf->output_buf); +} + +#define DBG_SCHED_LIMIT 64 +static ssize_t fill_periodic_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct fotg210_hcd *fotg210; + unsigned long flags; + union fotg210_shadow p, *seen; + unsigned temp, size, seen_count; + char *next; + unsigned i; + __hc32 tag; + + seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC); + if (!seen) + return 0; + seen_count = 0; + + hcd = bus_to_hcd(buf->bus); + fotg210 = hcd_to_fotg210(hcd); + next = buf->output_buf; + size = buf->alloc_size; + + temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size); + size -= temp; + next += temp; + + /* dump a snapshot of the periodic schedule. + * iso changes, interrupt usually doesn't. + */ + spin_lock_irqsave(&fotg210->lock, flags); + for (i = 0; i < fotg210->periodic_size; i++) { + p = fotg210->pshadow[i]; + if (likely(!p.ptr)) + continue; + tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]); + + temp = scnprintf(next, size, "%4d: ", i); + size -= temp; + next += temp; + + do { + struct fotg210_qh_hw *hw; + + switch (hc32_to_cpu(fotg210, tag)) { + case Q_TYPE_QH: + hw = p.qh->hw; + temp = scnprintf(next, size, " qh%d-%04x/%p", + p.qh->period, + hc32_to_cpup(fotg210, + &hw->hw_info2) + /* uframe masks */ + & (QH_CMASK | QH_SMASK), + p.qh); + size -= temp; + next += temp; + /* don't repeat what follows this qh */ + for (temp = 0; temp < seen_count; temp++) { + if (seen[temp].ptr != p.ptr) + continue; + if (p.qh->qh_next.ptr) { + temp = scnprintf(next, size, + " ..."); + size -= temp; + next += temp; + } + break; + } + /* show more info the first time around */ + if (temp == seen_count) { + u32 scratch = hc32_to_cpup(fotg210, + &hw->hw_info1); + struct fotg210_qtd *qtd; + char *type = ""; + + /* count tds, get ep direction */ + temp = 0; + list_for_each_entry(qtd, + &p.qh->qtd_list, + qtd_list) { + temp++; + switch (0x03 & (hc32_to_cpu( + fotg210, + qtd->hw_token) >> 8)) { + case 0: + type = "out"; + continue; + case 1: + type = "in"; + continue; + } + } + + temp = scnprintf(next, size, + "(%c%d ep%d%s " + "[%d/%d] q%d p%d)", + speed_char(scratch), + scratch & 0x007f, + (scratch >> 8) & 0x000f, type, + p.qh->usecs, p.qh->c_usecs, + temp, + 0x7ff & (scratch >> 16)); + + if (seen_count < DBG_SCHED_LIMIT) + seen[seen_count++].qh = p.qh; + } else + temp = 0; + tag = Q_NEXT_TYPE(fotg210, hw->hw_next); + p = p.qh->qh_next; + break; + case Q_TYPE_FSTN: + temp = scnprintf(next, size, + " fstn-%8x/%p", p.fstn->hw_prev, + p.fstn); + tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next); + p = p.fstn->fstn_next; + break; + case Q_TYPE_ITD: + temp = scnprintf(next, size, + " itd/%p", p.itd); + tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next); + p = p.itd->itd_next; + break; + } + size -= temp; + next += temp; + } while (p.ptr); + + temp = scnprintf(next, size, "\n"); + size -= temp; + next += temp; + } + spin_unlock_irqrestore(&fotg210->lock, flags); + kfree(seen); + + return buf->alloc_size - size; +} +#undef DBG_SCHED_LIMIT + +static const char *rh_state_string(struct fotg210_hcd *fotg210) +{ + switch (fotg210->rh_state) { + case FOTG210_RH_HALTED: + return "halted"; + case FOTG210_RH_SUSPENDED: + return "suspended"; + case FOTG210_RH_RUNNING: + return "running"; + case FOTG210_RH_STOPPING: + return "stopping"; + } + return "?"; +} + +static ssize_t fill_registers_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct fotg210_hcd *fotg210; + unsigned long flags; + unsigned temp, size, i; + char *next, scratch[80]; + static const char fmt[] = "%*s\n"; + static const char label[] = ""; + + hcd = bus_to_hcd(buf->bus); + fotg210 = hcd_to_fotg210(hcd); + next = buf->output_buf; + size = buf->alloc_size; + + spin_lock_irqsave(&fotg210->lock, flags); + + if (!HCD_HW_ACCESSIBLE(hcd)) { + size = scnprintf(next, size, + "bus %s, device %s\n" + "%s\n" + "SUSPENDED(no register access)\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc); + goto done; + } + + /* Capability Registers */ + i = HC_VERSION(fotg210, fotg210_readl(fotg210, + &fotg210->caps->hc_capbase)); + temp = scnprintf(next, size, + "bus %s, device %s\n" + "%s\n" + "EHCI %x.%02x, rh state %s\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc, + i >> 8, i & 0x0ff, rh_state_string(fotg210)); + size -= temp; + next += temp; + + /* FIXME interpret both types of params */ + i = fotg210_readl(fotg210, &fotg210->caps->hcs_params); + temp = scnprintf(next, size, "structural params 0x%08x\n", i); + size -= temp; + next += temp; + + i = fotg210_readl(fotg210, &fotg210->caps->hcc_params); + temp = scnprintf(next, size, "capability params 0x%08x\n", i); + size -= temp; + next += temp; + + /* Operational Registers */ + temp = dbg_status_buf(scratch, sizeof(scratch), label, + fotg210_readl(fotg210, &fotg210->regs->status)); + temp = scnprintf(next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = dbg_command_buf(scratch, sizeof(scratch), label, + fotg210_readl(fotg210, &fotg210->regs->command)); + temp = scnprintf(next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = dbg_intr_buf(scratch, sizeof(scratch), label, + fotg210_readl(fotg210, &fotg210->regs->intr_enable)); + temp = scnprintf(next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = scnprintf(next, size, "uframe %04x\n", + fotg210_read_frame_index(fotg210)); + size -= temp; + next += temp; + + if (fotg210->async_unlink) { + temp = scnprintf(next, size, "async unlink qh %p\n", + fotg210->async_unlink); + size -= temp; + next += temp; + } + +#ifdef FOTG210_STATS + temp = scnprintf(next, size, + "irq normal %ld err %ld iaa %ld(lost %ld)\n", + fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa, + fotg210->stats.lost_iaa); + size -= temp; + next += temp; + + temp = scnprintf(next, size, "complete %ld unlink %ld\n", + fotg210->stats.complete, fotg210->stats.unlink); + size -= temp; + next += temp; +#endif + +done: + spin_unlock_irqrestore(&fotg210->lock, flags); + + return buf->alloc_size - size; +} + +static struct debug_buffer *alloc_buffer(struct usb_bus *bus, + ssize_t (*fill_func)(struct debug_buffer *)) +{ + struct debug_buffer *buf; + + buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); + + if (buf) { + buf->bus = bus; + buf->fill_func = fill_func; + mutex_init(&buf->mutex); + buf->alloc_size = PAGE_SIZE; + } + + return buf; +} + +static int fill_buffer(struct debug_buffer *buf) +{ + int ret = 0; + + if (!buf->output_buf) + buf->output_buf = vmalloc(buf->alloc_size); + + if (!buf->output_buf) { + ret = -ENOMEM; + goto out; + } + + ret = buf->fill_func(buf); + + if (ret >= 0) { + buf->count = ret; + ret = 0; + } + +out: + return ret; +} + +static ssize_t debug_output(struct file *file, char __user *user_buf, + size_t len, loff_t *offset) +{ + struct debug_buffer *buf = file->private_data; + int ret = 0; + + mutex_lock(&buf->mutex); + if (buf->count == 0) { + ret = fill_buffer(buf); + if (ret != 0) { + mutex_unlock(&buf->mutex); + goto out; + } + } + mutex_unlock(&buf->mutex); + + ret = simple_read_from_buffer(user_buf, len, offset, + buf->output_buf, buf->count); + +out: + return ret; + +} + +static int debug_close(struct inode *inode, struct file *file) +{ + struct debug_buffer *buf = file->private_data; + + if (buf) { + vfree(buf->output_buf); + kfree(buf); + } + + return 0; +} +static int debug_async_open(struct inode *inode, struct file *file) +{ + file->private_data = alloc_buffer(inode->i_private, fill_async_buffer); + + return file->private_data ? 0 : -ENOMEM; +} + +static int debug_periodic_open(struct inode *inode, struct file *file) +{ + struct debug_buffer *buf; + buf = alloc_buffer(inode->i_private, fill_periodic_buffer); + if (!buf) + return -ENOMEM; + + buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE; + file->private_data = buf; + return 0; +} + +static int debug_registers_open(struct inode *inode, struct file *file) +{ + file->private_data = alloc_buffer(inode->i_private, + fill_registers_buffer); + + return file->private_data ? 0 : -ENOMEM; +} + +static inline void create_debug_files(struct fotg210_hcd *fotg210) +{ + struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self; + + fotg210->debug_dir = debugfs_create_dir(bus->bus_name, + fotg210_debug_root); + if (!fotg210->debug_dir) + return; + + if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus, + &debug_async_fops)) + goto file_error; + + if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus, + &debug_periodic_fops)) + goto file_error; + + if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus, + &debug_registers_fops)) + goto file_error; + + return; + +file_error: + debugfs_remove_recursive(fotg210->debug_dir); +} + +static inline void remove_debug_files(struct fotg210_hcd *fotg210) +{ + debugfs_remove_recursive(fotg210->debug_dir); +} + +#endif /* STUB_DEBUG_FILES */ +/*-------------------------------------------------------------------------*/ + +/* + * handshake - spin reading hc until handshake completes or fails + * @ptr: address of hc register to be read + * @mask: bits to look at in result of read + * @done: value of those bits when handshake succeeds + * @usec: timeout in microseconds + * + * Returns negative errno, or zero on success + * + * Success happens when the "mask" bits have the specified value (hardware + * handshake done). There are two failure modes: "usec" have passed (major + * hardware flakeout), or the register reads as all-ones (hardware removed). + * + * That last failure should_only happen in cases like physical cardbus eject + * before driver shutdown. But it also seems to be caused by bugs in cardbus + * bridge shutdown: shutting down the bridge before the devices using it. + */ +static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr, + u32 mask, u32 done, int usec) +{ + u32 result; + + do { + result = fotg210_readl(fotg210, ptr); + if (result == ~(u32)0) /* card removed */ + return -ENODEV; + result &= mask; + if (result == done) + return 0; + udelay(1); + usec--; + } while (usec > 0); + return -ETIMEDOUT; +} + +/* + * Force HC to halt state from unknown (EHCI spec section 2.3). + * Must be called with interrupts enabled and the lock not held. + */ +static int fotg210_halt(struct fotg210_hcd *fotg210) +{ + u32 temp; + + spin_lock_irq(&fotg210->lock); + + /* disable any irqs left enabled by previous code */ + fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable); + + /* + * This routine gets called during probe before fotg210->command + * has been initialized, so we can't rely on its value. + */ + fotg210->command &= ~CMD_RUN; + temp = fotg210_readl(fotg210, &fotg210->regs->command); + temp &= ~(CMD_RUN | CMD_IAAD); + fotg210_writel(fotg210, temp, &fotg210->regs->command); + + spin_unlock_irq(&fotg210->lock); + synchronize_irq(fotg210_to_hcd(fotg210)->irq); + + return handshake(fotg210, &fotg210->regs->status, + STS_HALT, STS_HALT, 16 * 125); +} + +/* + * Reset a non-running (STS_HALT == 1) controller. + * Must be called with interrupts enabled and the lock not held. + */ +static int fotg210_reset(struct fotg210_hcd *fotg210) +{ + int retval; + u32 command = fotg210_readl(fotg210, &fotg210->regs->command); + + /* If the EHCI debug controller is active, special care must be + * taken before and after a host controller reset */ + if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210))) + fotg210->debug = NULL; + + command |= CMD_RESET; + dbg_cmd(fotg210, "reset", command); + fotg210_writel(fotg210, command, &fotg210->regs->command); + fotg210->rh_state = FOTG210_RH_HALTED; + fotg210->next_statechange = jiffies; + retval = handshake(fotg210, &fotg210->regs->command, + CMD_RESET, 0, 250 * 1000); + + if (retval) + return retval; + + if (fotg210->debug) + dbgp_external_startup(fotg210_to_hcd(fotg210)); + + fotg210->port_c_suspend = fotg210->suspended_ports = + fotg210->resuming_ports = 0; + return retval; +} + +/* + * Idle the controller (turn off the schedules). + * Must be called with interrupts enabled and the lock not held. + */ +static void fotg210_quiesce(struct fotg210_hcd *fotg210) +{ + u32 temp; + + if (fotg210->rh_state != FOTG210_RH_RUNNING) + return; + + /* wait for any schedule enables/disables to take effect */ + temp = (fotg210->command << 10) & (STS_ASS | STS_PSS); + handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp, + 16 * 125); + + /* then disable anything that's still active */ + spin_lock_irq(&fotg210->lock); + fotg210->command &= ~(CMD_ASE | CMD_PSE); + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); + spin_unlock_irq(&fotg210->lock); + + /* hardware can take 16 microframes to turn off ... */ + handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0, + 16 * 125); +} + +/*-------------------------------------------------------------------------*/ + +static void end_unlink_async(struct fotg210_hcd *fotg210); +static void unlink_empty_async(struct fotg210_hcd *fotg210); +static void fotg210_work(struct fotg210_hcd *fotg210); +static void start_unlink_intr(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh); +static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); + +/*-------------------------------------------------------------------------*/ + +/* Set a bit in the USBCMD register */ +static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit) +{ + fotg210->command |= bit; + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); + + /* unblock posted write */ + fotg210_readl(fotg210, &fotg210->regs->command); +} + +/* Clear a bit in the USBCMD register */ +static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit) +{ + fotg210->command &= ~bit; + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); + + /* unblock posted write */ + fotg210_readl(fotg210, &fotg210->regs->command); +} + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI timer support... Now using hrtimers. + * + * Lots of different events are triggered from fotg210->hrtimer. Whenever + * the timer routine runs, it checks each possible event; events that are + * currently enabled and whose expiration time has passed get handled. + * The set of enabled events is stored as a collection of bitflags in + * fotg210->enabled_hrtimer_events, and they are numbered in order of + * increasing delay values (ranging between 1 ms and 100 ms). + * + * Rather than implementing a sorted list or tree of all pending events, + * we keep track only of the lowest-numbered pending event, in + * fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its + * expiration time is set to the timeout value for this event. + * + * As a result, events might not get handled right away; the actual delay + * could be anywhere up to twice the requested delay. This doesn't + * matter, because none of the events are especially time-critical. The + * ones that matter most all have a delay of 1 ms, so they will be + * handled after 2 ms at most, which is okay. In addition to this, we + * allow for an expiration range of 1 ms. + */ + +/* + * Delay lengths for the hrtimer event types. + * Keep this list sorted by delay length, in the same order as + * the event types indexed by enum fotg210_hrtimer_event in fotg210.h. + */ +static unsigned event_delays_ns[] = { + 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */ + 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */ + 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */ + 1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */ + 2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */ + 6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */ + 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */ + 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */ + 15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */ + 100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */ +}; + +/* Enable a pending hrtimer event */ +static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event, + bool resched) +{ + ktime_t *timeout = &fotg210->hr_timeouts[event]; + + if (resched) + *timeout = ktime_add(ktime_get(), + ktime_set(0, event_delays_ns[event])); + fotg210->enabled_hrtimer_events |= (1 << event); + + /* Track only the lowest-numbered pending event */ + if (event < fotg210->next_hrtimer_event) { + fotg210->next_hrtimer_event = event; + hrtimer_start_range_ns(&fotg210->hrtimer, *timeout, + NSEC_PER_MSEC, HRTIMER_MODE_ABS); + } +} + + +/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ +static void fotg210_poll_ASS(struct fotg210_hcd *fotg210) +{ + unsigned actual, want; + + /* Don't enable anything if the controller isn't running (e.g., died) */ + if (fotg210->rh_state != FOTG210_RH_RUNNING) + return; + + want = (fotg210->command & CMD_ASE) ? STS_ASS : 0; + actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS; + + if (want != actual) { + + /* Poll again later, but give up after about 20 ms */ + if (fotg210->ASS_poll_count++ < 20) { + fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS, + true); + return; + } + fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n", + want, actual); + } + fotg210->ASS_poll_count = 0; + + /* The status is up-to-date; restart or stop the schedule as needed */ + if (want == 0) { /* Stopped */ + if (fotg210->async_count > 0) + fotg210_set_command_bit(fotg210, CMD_ASE); + + } else { /* Running */ + if (fotg210->async_count == 0) { + + /* Turn off the schedule after a while */ + fotg210_enable_event(fotg210, + FOTG210_HRTIMER_DISABLE_ASYNC, + true); + } + } +} + +/* Turn off the async schedule after a brief delay */ +static void fotg210_disable_ASE(struct fotg210_hcd *fotg210) +{ + fotg210_clear_command_bit(fotg210, CMD_ASE); +} + + +/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ +static void fotg210_poll_PSS(struct fotg210_hcd *fotg210) +{ + unsigned actual, want; + + /* Don't do anything if the controller isn't running (e.g., died) */ + if (fotg210->rh_state != FOTG210_RH_RUNNING) + return; + + want = (fotg210->command & CMD_PSE) ? STS_PSS : 0; + actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS; + + if (want != actual) { + + /* Poll again later, but give up after about 20 ms */ + if (fotg210->PSS_poll_count++ < 20) { + fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS, + true); + return; + } + fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n", + want, actual); + } + fotg210->PSS_poll_count = 0; + + /* The status is up-to-date; restart or stop the schedule as needed */ + if (want == 0) { /* Stopped */ + if (fotg210->periodic_count > 0) + fotg210_set_command_bit(fotg210, CMD_PSE); + + } else { /* Running */ + if (fotg210->periodic_count == 0) { + + /* Turn off the schedule after a while */ + fotg210_enable_event(fotg210, + FOTG210_HRTIMER_DISABLE_PERIODIC, + true); + } + } +} + +/* Turn off the periodic schedule after a brief delay */ +static void fotg210_disable_PSE(struct fotg210_hcd *fotg210) +{ + fotg210_clear_command_bit(fotg210, CMD_PSE); +} + + +/* Poll the STS_HALT status bit; see when a dead controller stops */ +static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210) +{ + if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) { + + /* Give up after a few milliseconds */ + if (fotg210->died_poll_count++ < 5) { + /* Try again later */ + fotg210_enable_event(fotg210, + FOTG210_HRTIMER_POLL_DEAD, true); + return; + } + fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n"); + } + + /* Clean up the mess */ + fotg210->rh_state = FOTG210_RH_HALTED; + fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable); + fotg210_work(fotg210); + end_unlink_async(fotg210); + + /* Not in process context, so don't try to reset the controller */ +} + + +/* Handle unlinked interrupt QHs once they are gone from the hardware */ +static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210) +{ + bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING); + + /* + * Process all the QHs on the intr_unlink list that were added + * before the current unlink cycle began. The list is in + * temporal order, so stop when we reach the first entry in the + * current cycle. But if the root hub isn't running then + * process all the QHs on the list. + */ + fotg210->intr_unlinking = true; + while (fotg210->intr_unlink) { + struct fotg210_qh *qh = fotg210->intr_unlink; + + if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle) + break; + fotg210->intr_unlink = qh->unlink_next; + qh->unlink_next = NULL; + end_unlink_intr(fotg210, qh); + } + + /* Handle remaining entries later */ + if (fotg210->intr_unlink) { + fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR, + true); + ++fotg210->intr_unlink_cycle; + } + fotg210->intr_unlinking = false; +} + + +/* Start another free-iTDs/siTDs cycle */ +static void start_free_itds(struct fotg210_hcd *fotg210) +{ + if (!(fotg210->enabled_hrtimer_events & + BIT(FOTG210_HRTIMER_FREE_ITDS))) { + fotg210->last_itd_to_free = list_entry( + fotg210->cached_itd_list.prev, + struct fotg210_itd, itd_list); + fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true); + } +} + +/* Wait for controller to stop using old iTDs and siTDs */ +static void end_free_itds(struct fotg210_hcd *fotg210) +{ + struct fotg210_itd *itd, *n; + + if (fotg210->rh_state < FOTG210_RH_RUNNING) + fotg210->last_itd_to_free = NULL; + + list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) { + list_del(&itd->itd_list); + dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma); + if (itd == fotg210->last_itd_to_free) + break; + } + + if (!list_empty(&fotg210->cached_itd_list)) + start_free_itds(fotg210); +} + + +/* Handle lost (or very late) IAA interrupts */ +static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210) +{ + if (fotg210->rh_state != FOTG210_RH_RUNNING) + return; + + /* + * Lost IAA irqs wedge things badly; seen first with a vt8235. + * So we need this watchdog, but must protect it against both + * (a) SMP races against real IAA firing and retriggering, and + * (b) clean HC shutdown, when IAA watchdog was pending. + */ + if (fotg210->async_iaa) { + u32 cmd, status; + + /* If we get here, IAA is *REALLY* late. It's barely + * conceivable that the system is so busy that CMD_IAAD + * is still legitimately set, so let's be sure it's + * clear before we read STS_IAA. (The HC should clear + * CMD_IAAD when it sets STS_IAA.) + */ + cmd = fotg210_readl(fotg210, &fotg210->regs->command); + + /* + * If IAA is set here it either legitimately triggered + * after the watchdog timer expired (_way_ late, so we'll + * still count it as lost) ... or a silicon erratum: + * - VIA seems to set IAA without triggering the IRQ; + * - IAAD potentially cleared without setting IAA. + */ + status = fotg210_readl(fotg210, &fotg210->regs->status); + if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { + COUNT(fotg210->stats.lost_iaa); + fotg210_writel(fotg210, STS_IAA, + &fotg210->regs->status); + } + + fotg210_vdbg(fotg210, "IAA watchdog: status %x cmd %x\n", + status, cmd); + end_unlink_async(fotg210); + } +} + + +/* Enable the I/O watchdog, if appropriate */ +static void turn_on_io_watchdog(struct fotg210_hcd *fotg210) +{ + /* Not needed if the controller isn't running or it's already enabled */ + if (fotg210->rh_state != FOTG210_RH_RUNNING || + (fotg210->enabled_hrtimer_events & + BIT(FOTG210_HRTIMER_IO_WATCHDOG))) + return; + + /* + * Isochronous transfers always need the watchdog. + * For other sorts we use it only if the flag is set. + */ + if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog && + fotg210->async_count + fotg210->intr_count > 0)) + fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG, + true); +} + + +/* + * Handler functions for the hrtimer event types. + * Keep this array in the same order as the event types indexed by + * enum fotg210_hrtimer_event in fotg210.h. + */ +static void (*event_handlers[])(struct fotg210_hcd *) = { + fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */ + fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */ + fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */ + fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */ + end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */ + unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */ + fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */ + fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */ + fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */ + fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */ +}; + +static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t) +{ + struct fotg210_hcd *fotg210 = + container_of(t, struct fotg210_hcd, hrtimer); + ktime_t now; + unsigned long events; + unsigned long flags; + unsigned e; + + spin_lock_irqsave(&fotg210->lock, flags); + + events = fotg210->enabled_hrtimer_events; + fotg210->enabled_hrtimer_events = 0; + fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT; + + /* + * Check each pending event. If its time has expired, handle + * the event; otherwise re-enable it. + */ + now = ktime_get(); + for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) { + if (now.tv64 >= fotg210->hr_timeouts[e].tv64) + event_handlers[e](fotg210); + else + fotg210_enable_event(fotg210, e, false); + } + + spin_unlock_irqrestore(&fotg210->lock, flags); + return HRTIMER_NORESTART; +} + +/*-------------------------------------------------------------------------*/ + +#define fotg210_bus_suspend NULL +#define fotg210_bus_resume NULL + +/*-------------------------------------------------------------------------*/ + +static int check_reset_complete( + struct fotg210_hcd *fotg210, + int index, + u32 __iomem *status_reg, + int port_status +) { + if (!(port_status & PORT_CONNECT)) + return port_status; + + /* if reset finished and it's still not enabled -- handoff */ + if (!(port_status & PORT_PE)) { + /* with integrated TT, there's nobody to hand it to! */ + fotg210_dbg(fotg210, + "Failed to enable port %d on root hub TT\n", + index+1); + return port_status; + } else { + fotg210_dbg(fotg210, "port %d reset complete, port enabled\n", + index + 1); + } + + return port_status; +} + +/*-------------------------------------------------------------------------*/ + + +/* build "status change" packet (one or two bytes) from HC registers */ + +static int +fotg210_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 temp, status; + u32 mask; + int retval = 1; + unsigned long flags; + + /* init status to no-changes */ + buf[0] = 0; + + /* Inform the core about resumes-in-progress by returning + * a non-zero value even if there are no status changes. + */ + status = fotg210->resuming_ports; + + mask = PORT_CSC | PORT_PEC; + /* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */ + + /* no hub change reports (bit 0) for now (power, ...) */ + + /* port N changes (bit N)? */ + spin_lock_irqsave(&fotg210->lock, flags); + + temp = fotg210_readl(fotg210, &fotg210->regs->port_status); + + /* + * Return status information even for ports with OWNER set. + * Otherwise khubd wouldn't see the disconnect event when a + * high-speed device is switched over to the companion + * controller by the user. + */ + + if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) + || (fotg210->reset_done[0] && time_after_eq( + jiffies, fotg210->reset_done[0]))) { + buf[0] |= 1 << 1; + status = STS_PCD; + } + /* FIXME autosuspend idle root hubs */ + spin_unlock_irqrestore(&fotg210->lock, flags); + return status ? retval : 0; +} + +/*-------------------------------------------------------------------------*/ + +static void +fotg210_hub_descriptor( + struct fotg210_hcd *fotg210, + struct usb_hub_descriptor *desc +) { + int ports = HCS_N_PORTS(fotg210->hcs_params); + u16 temp; + + desc->bDescriptorType = 0x29; + desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */ + desc->bHubContrCurrent = 0; + + desc->bNbrPorts = ports; + temp = 1 + (ports / 8); + desc->bDescLength = 7 + 2 * temp; + + /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ + memset(&desc->u.hs.DeviceRemovable[0], 0, temp); + memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); + + temp = 0x0008; /* per-port overcurrent reporting */ + temp |= 0x0002; /* no power switching */ + desc->wHubCharacteristics = cpu_to_le16(temp); +} + +/*-------------------------------------------------------------------------*/ + +static int fotg210_hub_control( + struct usb_hcd *hcd, + u16 typeReq, + u16 wValue, + u16 wIndex, + char *buf, + u16 wLength +) { + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + int ports = HCS_N_PORTS(fotg210->hcs_params); + u32 __iomem *status_reg = &fotg210->regs->port_status; + u32 temp, temp1, status; + unsigned long flags; + int retval = 0; + unsigned selector; + + /* + * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. + * HCS_INDICATOR may say we can change LEDs to off/amber/green. + * (track current state ourselves) ... blink for diagnostics, + * power, "this is the one", etc. EHCI spec supports this. + */ + + spin_lock_irqsave(&fotg210->lock, flags); + switch (typeReq) { + case ClearHubFeature: + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* no hub-wide feature/status flags */ + break; + default: + goto error; + } + break; + case ClearPortFeature: + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + temp = fotg210_readl(fotg210, status_reg); + temp &= ~PORT_RWC_BITS; + + /* + * Even if OWNER is set, so the port is owned by the + * companion controller, khubd needs to be able to clear + * the port-change status bits (especially + * USB_PORT_STAT_C_CONNECTION). + */ + + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + fotg210_writel(fotg210, temp & ~PORT_PE, status_reg); + break; + case USB_PORT_FEAT_C_ENABLE: + fotg210_writel(fotg210, temp | PORT_PEC, status_reg); + break; + case USB_PORT_FEAT_SUSPEND: + if (temp & PORT_RESET) + goto error; + if (!(temp & PORT_SUSPEND)) + break; + if ((temp & PORT_PE) == 0) + goto error; + + /* resume signaling for 20 msec */ + fotg210_writel(fotg210, temp | PORT_RESUME, status_reg); + fotg210->reset_done[wIndex] = jiffies + + msecs_to_jiffies(20); + break; + case USB_PORT_FEAT_C_SUSPEND: + clear_bit(wIndex, &fotg210->port_c_suspend); + break; + case USB_PORT_FEAT_C_CONNECTION: + fotg210_writel(fotg210, temp | PORT_CSC, status_reg); + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + fotg210_writel(fotg210, temp | OTGISR_OVC, + &fotg210->regs->otgisr); + break; + case USB_PORT_FEAT_C_RESET: + /* GetPortStatus clears reset */ + break; + default: + goto error; + } + fotg210_readl(fotg210, &fotg210->regs->command); + break; + case GetHubDescriptor: + fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *) + buf); + break; + case GetHubStatus: + /* no hub-wide feature/status flags */ + memset(buf, 0, 4); + /*cpu_to_le32s ((u32 *) buf); */ + break; + case GetPortStatus: + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + status = 0; + temp = fotg210_readl(fotg210, status_reg); + + /* wPortChange bits */ + if (temp & PORT_CSC) + status |= USB_PORT_STAT_C_CONNECTION << 16; + if (temp & PORT_PEC) + status |= USB_PORT_STAT_C_ENABLE << 16; + + temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr); + if (temp1 & OTGISR_OVC) + status |= USB_PORT_STAT_C_OVERCURRENT << 16; + + /* whoever resumes must GetPortStatus to complete it!! */ + if (temp & PORT_RESUME) { + + /* Remote Wakeup received? */ + if (!fotg210->reset_done[wIndex]) { + /* resume signaling for 20 msec */ + fotg210->reset_done[wIndex] = jiffies + + msecs_to_jiffies(20); + /* check the port again */ + mod_timer(&fotg210_to_hcd(fotg210)->rh_timer, + fotg210->reset_done[wIndex]); + } + + /* resume completed? */ + else if (time_after_eq(jiffies, + fotg210->reset_done[wIndex])) { + clear_bit(wIndex, &fotg210->suspended_ports); + set_bit(wIndex, &fotg210->port_c_suspend); + fotg210->reset_done[wIndex] = 0; + + /* stop resume signaling */ + temp = fotg210_readl(fotg210, status_reg); + fotg210_writel(fotg210, + temp & ~(PORT_RWC_BITS | PORT_RESUME), + status_reg); + clear_bit(wIndex, &fotg210->resuming_ports); + retval = handshake(fotg210, status_reg, + PORT_RESUME, 0, 2000 /* 2msec */); + if (retval != 0) { + fotg210_err(fotg210, + "port %d resume error %d\n", + wIndex + 1, retval); + goto error; + } + temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); + } + } + + /* whoever resets must GetPortStatus to complete it!! */ + if ((temp & PORT_RESET) + && time_after_eq(jiffies, + fotg210->reset_done[wIndex])) { + status |= USB_PORT_STAT_C_RESET << 16; + fotg210->reset_done[wIndex] = 0; + clear_bit(wIndex, &fotg210->resuming_ports); + + /* force reset to complete */ + fotg210_writel(fotg210, + temp & ~(PORT_RWC_BITS | PORT_RESET), + status_reg); + /* REVISIT: some hardware needs 550+ usec to clear + * this bit; seems too long to spin routinely... + */ + retval = handshake(fotg210, status_reg, + PORT_RESET, 0, 1000); + if (retval != 0) { + fotg210_err(fotg210, "port %d reset error %d\n", + wIndex + 1, retval); + goto error; + } + + /* see what we found out */ + temp = check_reset_complete(fotg210, wIndex, status_reg, + fotg210_readl(fotg210, status_reg)); + } + + if (!(temp & (PORT_RESUME|PORT_RESET))) { + fotg210->reset_done[wIndex] = 0; + clear_bit(wIndex, &fotg210->resuming_ports); + } + + /* transfer dedicated ports to the companion hc */ + if ((temp & PORT_CONNECT) && + test_bit(wIndex, &fotg210->companion_ports)) { + temp &= ~PORT_RWC_BITS; + fotg210_writel(fotg210, temp, status_reg); + fotg210_dbg(fotg210, "port %d --> companion\n", + wIndex + 1); + temp = fotg210_readl(fotg210, status_reg); + } + + /* + * Even if OWNER is set, there's no harm letting khubd + * see the wPortStatus values (they should all be 0 except + * for PORT_POWER anyway). + */ + + if (temp & PORT_CONNECT) { + status |= USB_PORT_STAT_CONNECTION; + status |= fotg210_port_speed(fotg210, temp); + } + if (temp & PORT_PE) + status |= USB_PORT_STAT_ENABLE; + + /* maybe the port was unsuspended without our knowledge */ + if (temp & (PORT_SUSPEND|PORT_RESUME)) { + status |= USB_PORT_STAT_SUSPEND; + } else if (test_bit(wIndex, &fotg210->suspended_ports)) { + clear_bit(wIndex, &fotg210->suspended_ports); + clear_bit(wIndex, &fotg210->resuming_ports); + fotg210->reset_done[wIndex] = 0; + if (temp & PORT_PE) + set_bit(wIndex, &fotg210->port_c_suspend); + } + + temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr); + if (temp1 & OTGISR_OVC) + status |= USB_PORT_STAT_OVERCURRENT; + if (temp & PORT_RESET) + status |= USB_PORT_STAT_RESET; + if (test_bit(wIndex, &fotg210->port_c_suspend)) + status |= USB_PORT_STAT_C_SUSPEND << 16; + +#ifndef VERBOSE_DEBUG + if (status & ~0xffff) /* only if wPortChange is interesting */ +#endif + dbg_port(fotg210, "GetStatus", wIndex + 1, temp); + put_unaligned_le32(status, buf); + break; + case SetHubFeature: + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* no hub-wide feature/status flags */ + break; + default: + goto error; + } + break; + case SetPortFeature: + selector = wIndex >> 8; + wIndex &= 0xff; + + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + temp = fotg210_readl(fotg210, status_reg); + temp &= ~PORT_RWC_BITS; + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + if ((temp & PORT_PE) == 0 + || (temp & PORT_RESET) != 0) + goto error; + + /* After above check the port must be connected. + * Set appropriate bit thus could put phy into low power + * mode if we have hostpc feature + */ + fotg210_writel(fotg210, temp | PORT_SUSPEND, + status_reg); + set_bit(wIndex, &fotg210->suspended_ports); + break; + case USB_PORT_FEAT_RESET: + if (temp & PORT_RESUME) + goto error; + /* line status bits may report this as low speed, + * which can be fine if this root hub has a + * transaction translator built in. + */ + fotg210_vdbg(fotg210, "port %d reset\n", wIndex + 1); + temp |= PORT_RESET; + temp &= ~PORT_PE; + + /* + * caller must wait, then call GetPortStatus + * usb 2.0 spec says 50 ms resets on root + */ + fotg210->reset_done[wIndex] = jiffies + + msecs_to_jiffies(50); + fotg210_writel(fotg210, temp, status_reg); + break; + + /* For downstream facing ports (these): one hub port is put + * into test mode according to USB2 11.24.2.13, then the hub + * must be reset (which for root hub now means rmmod+modprobe, + * or else system reboot). See EHCI 2.3.9 and 4.14 for info + * about the EHCI-specific stuff. + */ + case USB_PORT_FEAT_TEST: + if (!selector || selector > 5) + goto error; + spin_unlock_irqrestore(&fotg210->lock, flags); + fotg210_quiesce(fotg210); + spin_lock_irqsave(&fotg210->lock, flags); + + /* Put all enabled ports into suspend */ + temp = fotg210_readl(fotg210, status_reg) & + ~PORT_RWC_BITS; + if (temp & PORT_PE) + fotg210_writel(fotg210, temp | PORT_SUSPEND, + status_reg); + + spin_unlock_irqrestore(&fotg210->lock, flags); + fotg210_halt(fotg210); + spin_lock_irqsave(&fotg210->lock, flags); + + temp = fotg210_readl(fotg210, status_reg); + temp |= selector << 16; + fotg210_writel(fotg210, temp, status_reg); + break; + + default: + goto error; + } + fotg210_readl(fotg210, &fotg210->regs->command); + break; + + default: +error: + /* "stall" on error */ + retval = -EPIPE; + } + spin_unlock_irqrestore(&fotg210->lock, flags); + return retval; +} + +static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd, + int portnum) +{ + return; +} + +static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd, + int portnum) +{ + return 0; +} +/*-------------------------------------------------------------------------*/ +/* + * There's basically three types of memory: + * - data used only by the HCD ... kmalloc is fine + * - async and periodic schedules, shared by HC and HCD ... these + * need to use dma_pool or dma_alloc_coherent + * - driver buffers, read/written by HC ... single shot DMA mapped + * + * There's also "register" data (e.g. PCI or SOC), which is memory mapped. + * No memory seen by this driver is pageable. + */ + +/*-------------------------------------------------------------------------*/ + +/* Allocate the key transfer structures from the previously allocated pool */ + +static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210, + struct fotg210_qtd *qtd, dma_addr_t dma) +{ + memset(qtd, 0, sizeof(*qtd)); + qtd->qtd_dma = dma; + qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT); + qtd->hw_next = FOTG210_LIST_END(fotg210); + qtd->hw_alt_next = FOTG210_LIST_END(fotg210); + INIT_LIST_HEAD(&qtd->qtd_list); +} + +static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210, + gfp_t flags) +{ + struct fotg210_qtd *qtd; + dma_addr_t dma; + + qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma); + if (qtd != NULL) + fotg210_qtd_init(fotg210, qtd, dma); + + return qtd; +} + +static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210, + struct fotg210_qtd *qtd) +{ + dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma); +} + + +static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + /* clean qtds first, and know this is not linked */ + if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) { + fotg210_dbg(fotg210, "unused qh not empty!\n"); + BUG(); + } + if (qh->dummy) + fotg210_qtd_free(fotg210, qh->dummy); + dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma); + kfree(qh); +} + +static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210, + gfp_t flags) +{ + struct fotg210_qh *qh; + dma_addr_t dma; + + qh = kzalloc(sizeof(*qh), GFP_ATOMIC); + if (!qh) + goto done; + qh->hw = (struct fotg210_qh_hw *) + dma_pool_alloc(fotg210->qh_pool, flags, &dma); + if (!qh->hw) + goto fail; + memset(qh->hw, 0, sizeof(*qh->hw)); + qh->qh_dma = dma; + INIT_LIST_HEAD(&qh->qtd_list); + + /* dummy td enables safe urb queuing */ + qh->dummy = fotg210_qtd_alloc(fotg210, flags); + if (qh->dummy == NULL) { + fotg210_dbg(fotg210, "no dummy td\n"); + goto fail1; + } +done: + return qh; +fail1: + dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma); +fail: + kfree(qh); + return NULL; +} + +/*-------------------------------------------------------------------------*/ + +/* The queue heads and transfer descriptors are managed from pools tied + * to each of the "per device" structures. + * This is the initialisation and cleanup code. + */ + +static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210) +{ + if (fotg210->async) + qh_destroy(fotg210, fotg210->async); + fotg210->async = NULL; + + if (fotg210->dummy) + qh_destroy(fotg210, fotg210->dummy); + fotg210->dummy = NULL; + + /* DMA consistent memory and pools */ + if (fotg210->qtd_pool) + dma_pool_destroy(fotg210->qtd_pool); + fotg210->qtd_pool = NULL; + + if (fotg210->qh_pool) { + dma_pool_destroy(fotg210->qh_pool); + fotg210->qh_pool = NULL; + } + + if (fotg210->itd_pool) + dma_pool_destroy(fotg210->itd_pool); + fotg210->itd_pool = NULL; + + if (fotg210->periodic) + dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller, + fotg210->periodic_size * sizeof(u32), + fotg210->periodic, fotg210->periodic_dma); + fotg210->periodic = NULL; + + /* shadow periodic table */ + kfree(fotg210->pshadow); + fotg210->pshadow = NULL; +} + +/* remember to add cleanup code (above) if you add anything here */ +static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags) +{ + int i; + + /* QTDs for control/bulk/intr transfers */ + fotg210->qtd_pool = dma_pool_create("fotg210_qtd", + fotg210_to_hcd(fotg210)->self.controller, + sizeof(struct fotg210_qtd), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!fotg210->qtd_pool) + goto fail; + + /* QHs for control/bulk/intr transfers */ + fotg210->qh_pool = dma_pool_create("fotg210_qh", + fotg210_to_hcd(fotg210)->self.controller, + sizeof(struct fotg210_qh_hw), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!fotg210->qh_pool) + goto fail; + + fotg210->async = fotg210_qh_alloc(fotg210, flags); + if (!fotg210->async) + goto fail; + + /* ITD for high speed ISO transfers */ + fotg210->itd_pool = dma_pool_create("fotg210_itd", + fotg210_to_hcd(fotg210)->self.controller, + sizeof(struct fotg210_itd), + 64 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!fotg210->itd_pool) + goto fail; + + /* Hardware periodic table */ + fotg210->periodic = (__le32 *) + dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller, + fotg210->periodic_size * sizeof(__le32), + &fotg210->periodic_dma, 0); + if (fotg210->periodic == NULL) + goto fail; + + for (i = 0; i < fotg210->periodic_size; i++) + fotg210->periodic[i] = FOTG210_LIST_END(fotg210); + + /* software shadow of hardware table */ + fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *), + flags); + if (fotg210->pshadow != NULL) + return 0; + +fail: + fotg210_dbg(fotg210, "couldn't init memory\n"); + fotg210_mem_cleanup(fotg210); + return -ENOMEM; +} +/*-------------------------------------------------------------------------*/ +/* + * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. + * + * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" + * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned + * buffers needed for the larger number). We use one QH per endpoint, queue + * multiple urbs (all three types) per endpoint. URBs may need several qtds. + * + * ISO traffic uses "ISO TD" (itd) records, and (along with + * interrupts) needs careful scheduling. Performance improvements can be + * an ongoing challenge. That's in "ehci-sched.c". + * + * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, + * or otherwise through transaction translators (TTs) in USB 2.0 hubs using + * (b) special fields in qh entries or (c) split iso entries. TTs will + * buffer low/full speed data so the host collects it at high speed. + */ + +/*-------------------------------------------------------------------------*/ + +/* fill a qtd, returning how much of the buffer we were able to queue up */ + +static int +qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf, + size_t len, int token, int maxpacket) +{ + int i, count; + u64 addr = buf; + + /* one buffer entry per 4K ... first might be short or unaligned */ + qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr); + qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32)); + count = 0x1000 - (buf & 0x0fff); /* rest of that page */ + if (likely(len < count)) /* ... iff needed */ + count = len; + else { + buf += 0x1000; + buf &= ~0x0fff; + + /* per-qtd limit: from 16K to 20K (best alignment) */ + for (i = 1; count < len && i < 5; i++) { + addr = buf; + qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr); + qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210, + (u32)(addr >> 32)); + buf += 0x1000; + if ((count + 0x1000) < len) + count += 0x1000; + else + count = len; + } + + /* short packets may only terminate transfers */ + if (count != len) + count -= (count % maxpacket); + } + qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token); + qtd->length = count; + + return count; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, + struct fotg210_qtd *qtd) +{ + struct fotg210_qh_hw *hw = qh->hw; + + /* writes to an active overlay are unsafe */ + BUG_ON(qh->qh_state != QH_STATE_IDLE); + + hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma); + hw->hw_alt_next = FOTG210_LIST_END(fotg210); + + /* Except for control endpoints, we make hardware maintain data + * toggle (like OHCI) ... here (re)initialize the toggle in the QH, + * and set the pseudo-toggle in udev. Only usb_clear_halt() will + * ever clear it. + */ + if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) { + unsigned is_out, epnum; + + is_out = qh->is_out; + epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f; + if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) { + hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE); + usb_settoggle(qh->dev, epnum, is_out, 1); + } + } + + hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING); +} + +/* if it weren't for a common silicon quirk (writing the dummy into the qh + * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault + * recovery (including urb dequeue) would need software changes to a QH... + */ +static void +qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + struct fotg210_qtd *qtd; + + if (list_empty(&qh->qtd_list)) + qtd = qh->dummy; + else { + qtd = list_entry(qh->qtd_list.next, + struct fotg210_qtd, qtd_list); + /* + * first qtd may already be partially processed. + * If we come here during unlink, the QH overlay region + * might have reference to the just unlinked qtd. The + * qtd is updated in qh_completions(). Update the QH + * overlay here. + */ + if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) { + qh->hw->hw_qtd_next = qtd->hw_next; + qtd = NULL; + } + } + + if (qtd) + qh_update(fotg210, qh, qtd); +} + +/*-------------------------------------------------------------------------*/ + +static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); + +static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_qh *qh = ep->hcpriv; + unsigned long flags; + + spin_lock_irqsave(&fotg210->lock, flags); + qh->clearing_tt = 0; + if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) + && fotg210->rh_state == FOTG210_RH_RUNNING) + qh_link_async(fotg210, qh); + spin_unlock_irqrestore(&fotg210->lock, flags); +} + +static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh, + struct urb *urb, u32 token) +{ + + /* If an async split transaction gets an error or is unlinked, + * the TT buffer may be left in an indeterminate state. We + * have to clear the TT buffer. + * + * Note: this routine is never called for Isochronous transfers. + */ + if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { +#ifdef DEBUG + struct usb_device *tt = urb->dev->tt->hub; + dev_dbg(&tt->dev, + "clear tt buffer port %d, a%d ep%d t%08x\n", + urb->dev->ttport, urb->dev->devnum, + usb_pipeendpoint(urb->pipe), token); +#endif /* DEBUG */ + if (urb->dev->tt->hub != + fotg210_to_hcd(fotg210)->self.root_hub) { + if (usb_hub_clear_tt_buffer(urb) == 0) + qh->clearing_tt = 1; + } + } +} + +static int qtd_copy_status( + struct fotg210_hcd *fotg210, + struct urb *urb, + size_t length, + u32 token +) +{ + int status = -EINPROGRESS; + + /* count IN/OUT bytes, not SETUP (even short packets) */ + if (likely(QTD_PID(token) != 2)) + urb->actual_length += length - QTD_LENGTH(token); + + /* don't modify error codes */ + if (unlikely(urb->unlinked)) + return status; + + /* force cleanup after short read; not always an error */ + if (unlikely(IS_SHORT_READ(token))) + status = -EREMOTEIO; + + /* serious "can't proceed" faults reported by the hardware */ + if (token & QTD_STS_HALT) { + if (token & QTD_STS_BABBLE) { + /* FIXME "must" disable babbling device's port too */ + status = -EOVERFLOW; + /* CERR nonzero + halt --> stall */ + } else if (QTD_CERR(token)) { + status = -EPIPE; + + /* In theory, more than one of the following bits can be set + * since they are sticky and the transaction is retried. + * Which to test first is rather arbitrary. + */ + } else if (token & QTD_STS_MMF) { + /* fs/ls interrupt xfer missed the complete-split */ + status = -EPROTO; + } else if (token & QTD_STS_DBE) { + status = (QTD_PID(token) == 1) /* IN ? */ + ? -ENOSR /* hc couldn't read data */ + : -ECOMM; /* hc couldn't write data */ + } else if (token & QTD_STS_XACT) { + /* timeout, bad CRC, wrong PID, etc */ + fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n", + urb->dev->devpath, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out"); + status = -EPROTO; + } else { /* unknown */ + status = -EPROTO; + } + + fotg210_vdbg(fotg210, + "dev%d ep%d%s qtd token %08x --> status %d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + token, status); + } + + return status; +} + +static void +fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, int status) +__releases(fotg210->lock) +__acquires(fotg210->lock) +{ + if (likely(urb->hcpriv != NULL)) { + struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv; + + /* S-mask in a QH means it's an interrupt urb */ + if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) { + + /* ... update hc-wide periodic stats (for usbfs) */ + fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--; + } + } + + if (unlikely(urb->unlinked)) { + COUNT(fotg210->stats.unlink); + } else { + /* report non-error and short read status as zero */ + if (status == -EINPROGRESS || status == -EREMOTEIO) + status = 0; + COUNT(fotg210->stats.complete); + } + +#ifdef FOTG210_URB_TRACE + fotg210_dbg(fotg210, + "%s %s urb %p ep%d%s status %d len %d/%d\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + status, + urb->actual_length, urb->transfer_buffer_length); +#endif + + /* complete() can reenter this HCD */ + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); + spin_unlock(&fotg210->lock); + usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status); + spin_lock(&fotg210->lock); +} + +static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); + +/* + * Process and free completed qtds for a qh, returning URBs to drivers. + * Chases up to qh->hw_current. Returns number of completions called, + * indicating how much "real" work we did. + */ +static unsigned +qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + struct fotg210_qtd *last, *end = qh->dummy; + struct list_head *entry, *tmp; + int last_status; + int stopped; + unsigned count = 0; + u8 state; + struct fotg210_qh_hw *hw = qh->hw; + + if (unlikely(list_empty(&qh->qtd_list))) + return count; + + /* completions (or tasks on other cpus) must never clobber HALT + * till we've gone through and cleaned everything up, even when + * they add urbs to this qh's queue or mark them for unlinking. + * + * NOTE: unlinking expects to be done in queue order. + * + * It's a bug for qh->qh_state to be anything other than + * QH_STATE_IDLE, unless our caller is scan_async() or + * scan_intr(). + */ + state = qh->qh_state; + qh->qh_state = QH_STATE_COMPLETING; + stopped = (state == QH_STATE_IDLE); + + rescan: + last = NULL; + last_status = -EINPROGRESS; + qh->needs_rescan = 0; + + /* remove de-activated QTDs from front of queue. + * after faults (including short reads), cleanup this urb + * then let the queue advance. + * if queue is stopped, handles unlinks. + */ + list_for_each_safe(entry, tmp, &qh->qtd_list) { + struct fotg210_qtd *qtd; + struct urb *urb; + u32 token = 0; + + qtd = list_entry(entry, struct fotg210_qtd, qtd_list); + urb = qtd->urb; + + /* clean up any state from previous QTD ...*/ + if (last) { + if (likely(last->urb != urb)) { + fotg210_urb_done(fotg210, last->urb, + last_status); + count++; + last_status = -EINPROGRESS; + } + fotg210_qtd_free(fotg210, last); + last = NULL; + } + + /* ignore urbs submitted during completions we reported */ + if (qtd == end) + break; + + /* hardware copies qtd out of qh overlay */ + rmb(); + token = hc32_to_cpu(fotg210, qtd->hw_token); + + /* always clean up qtds the hc de-activated */ + retry_xacterr: + if ((token & QTD_STS_ACTIVE) == 0) { + + /* Report Data Buffer Error: non-fatal but useful */ + if (token & QTD_STS_DBE) + fotg210_dbg(fotg210, + "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", + urb, + usb_endpoint_num(&urb->ep->desc), + usb_endpoint_dir_in(&urb->ep->desc) + ? "in" : "out", + urb->transfer_buffer_length, + qtd, + qh); + + /* on STALL, error, and short reads this urb must + * complete and all its qtds must be recycled. + */ + if ((token & QTD_STS_HALT) != 0) { + + /* retry transaction errors until we + * reach the software xacterr limit + */ + if ((token & QTD_STS_XACT) && + QTD_CERR(token) == 0 && + ++qh->xacterrs < QH_XACTERR_MAX && + !urb->unlinked) { + fotg210_dbg(fotg210, + "detected XactErr len %zu/%zu retry %d\n", + qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); + + /* reset the token in the qtd and the + * qh overlay (which still contains + * the qtd) so that we pick up from + * where we left off + */ + token &= ~QTD_STS_HALT; + token |= QTD_STS_ACTIVE | + (FOTG210_TUNE_CERR << 10); + qtd->hw_token = cpu_to_hc32(fotg210, + token); + wmb(); + hw->hw_token = cpu_to_hc32(fotg210, + token); + goto retry_xacterr; + } + stopped = 1; + + /* magic dummy for some short reads; qh won't advance. + * that silicon quirk can kick in with this dummy too. + * + * other short reads won't stop the queue, including + * control transfers (status stage handles that) or + * most other single-qtd reads ... the queue stops if + * URB_SHORT_NOT_OK was set so the driver submitting + * the urbs could clean it up. + */ + } else if (IS_SHORT_READ(token) + && !(qtd->hw_alt_next + & FOTG210_LIST_END(fotg210))) { + stopped = 1; + } + + /* stop scanning when we reach qtds the hc is using */ + } else if (likely(!stopped + && fotg210->rh_state >= FOTG210_RH_RUNNING)) { + break; + + /* scan the whole queue for unlinks whenever it stops */ + } else { + stopped = 1; + + /* cancel everything if we halt, suspend, etc */ + if (fotg210->rh_state < FOTG210_RH_RUNNING) + last_status = -ESHUTDOWN; + + /* this qtd is active; skip it unless a previous qtd + * for its urb faulted, or its urb was canceled. + */ + else if (last_status == -EINPROGRESS && !urb->unlinked) + continue; + + /* qh unlinked; token in overlay may be most current */ + if (state == QH_STATE_IDLE + && cpu_to_hc32(fotg210, qtd->qtd_dma) + == hw->hw_current) { + token = hc32_to_cpu(fotg210, hw->hw_token); + + /* An unlink may leave an incomplete + * async transaction in the TT buffer. + * We have to clear it. + */ + fotg210_clear_tt_buffer(fotg210, qh, urb, + token); + } + } + + /* unless we already know the urb's status, collect qtd status + * and update count of bytes transferred. in common short read + * cases with only one data qtd (including control transfers), + * queue processing won't halt. but with two or more qtds (for + * example, with a 32 KB transfer), when the first qtd gets a + * short read the second must be removed by hand. + */ + if (last_status == -EINPROGRESS) { + last_status = qtd_copy_status(fotg210, urb, + qtd->length, token); + if (last_status == -EREMOTEIO + && (qtd->hw_alt_next + & FOTG210_LIST_END(fotg210))) + last_status = -EINPROGRESS; + + /* As part of low/full-speed endpoint-halt processing + * we must clear the TT buffer (11.17.5). + */ + if (unlikely(last_status != -EINPROGRESS && + last_status != -EREMOTEIO)) { + /* The TT's in some hubs malfunction when they + * receive this request following a STALL (they + * stop sending isochronous packets). Since a + * STALL can't leave the TT buffer in a busy + * state (if you believe Figures 11-48 - 11-51 + * in the USB 2.0 spec), we won't clear the TT + * buffer in this case. Strictly speaking this + * is a violation of the spec. + */ + if (last_status != -EPIPE) + fotg210_clear_tt_buffer(fotg210, qh, + urb, token); + } + } + + /* if we're removing something not at the queue head, + * patch the hardware queue pointer. + */ + if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { + last = list_entry(qtd->qtd_list.prev, + struct fotg210_qtd, qtd_list); + last->hw_next = qtd->hw_next; + } + + /* remove qtd; it's recycled after possible urb completion */ + list_del(&qtd->qtd_list); + last = qtd; + + /* reinit the xacterr counter for the next qtd */ + qh->xacterrs = 0; + } + + /* last urb's completion might still need calling */ + if (likely(last != NULL)) { + fotg210_urb_done(fotg210, last->urb, last_status); + count++; + fotg210_qtd_free(fotg210, last); + } + + /* Do we need to rescan for URBs dequeued during a giveback? */ + if (unlikely(qh->needs_rescan)) { + /* If the QH is already unlinked, do the rescan now. */ + if (state == QH_STATE_IDLE) + goto rescan; + + /* Otherwise we have to wait until the QH is fully unlinked. + * Our caller will start an unlink if qh->needs_rescan is + * set. But if an unlink has already started, nothing needs + * to be done. + */ + if (state != QH_STATE_LINKED) + qh->needs_rescan = 0; + } + + /* restore original state; caller must unlink or relink */ + qh->qh_state = state; + + /* be sure the hardware's done with the qh before refreshing + * it after fault cleanup, or recovering from silicon wrongly + * overlaying the dummy qtd (which reduces DMA chatter). + */ + if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) { + switch (state) { + case QH_STATE_IDLE: + qh_refresh(fotg210, qh); + break; + case QH_STATE_LINKED: + /* We won't refresh a QH that's linked (after the HC + * stopped the queue). That avoids a race: + * - HC reads first part of QH; + * - CPU updates that first part and the token; + * - HC reads rest of that QH, including token + * Result: HC gets an inconsistent image, and then + * DMAs to/from the wrong memory (corrupting it). + * + * That should be rare for interrupt transfers, + * except maybe high bandwidth ... + */ + + /* Tell the caller to start an unlink */ + qh->needs_rescan = 1; + break; + /* otherwise, unlink already started */ + } + } + + return count; +} + +/*-------------------------------------------------------------------------*/ + +/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ +#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) +/* ... and packet size, for any kind of endpoint descriptor */ +#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) + +/* + * reverse of qh_urb_transaction: free a list of TDs. + * used for cleanup after errors, before HC sees an URB's TDs. + */ +static void qtd_list_free( + struct fotg210_hcd *fotg210, + struct urb *urb, + struct list_head *qtd_list +) { + struct list_head *entry, *temp; + + list_for_each_safe(entry, temp, qtd_list) { + struct fotg210_qtd *qtd; + + qtd = list_entry(entry, struct fotg210_qtd, qtd_list); + list_del(&qtd->qtd_list); + fotg210_qtd_free(fotg210, qtd); + } +} + +/* + * create a list of filled qtds for this URB; won't link into qh. + */ +static struct list_head * +qh_urb_transaction( + struct fotg210_hcd *fotg210, + struct urb *urb, + struct list_head *head, + gfp_t flags +) { + struct fotg210_qtd *qtd, *qtd_prev; + dma_addr_t buf; + int len, this_sg_len, maxpacket; + int is_input; + u32 token; + int i; + struct scatterlist *sg; + + /* + * URBs map to sequences of QTDs: one logical transaction + */ + qtd = fotg210_qtd_alloc(fotg210, flags); + if (unlikely(!qtd)) + return NULL; + list_add_tail(&qtd->qtd_list, head); + qtd->urb = urb; + + token = QTD_STS_ACTIVE; + token |= (FOTG210_TUNE_CERR << 10); + /* for split transactions, SplitXState initialized to zero */ + + len = urb->transfer_buffer_length; + is_input = usb_pipein(urb->pipe); + if (usb_pipecontrol(urb->pipe)) { + /* SETUP pid */ + qtd_fill(fotg210, qtd, urb->setup_dma, + sizeof(struct usb_ctrlrequest), + token | (2 /* "setup" */ << 8), 8); + + /* ... and always at least one more pid */ + token ^= QTD_TOGGLE; + qtd_prev = qtd; + qtd = fotg210_qtd_alloc(fotg210, flags); + if (unlikely(!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma); + list_add_tail(&qtd->qtd_list, head); + + /* for zero length DATA stages, STATUS is always IN */ + if (len == 0) + token |= (1 /* "in" */ << 8); + } + + /* + * data transfer stage: buffer setup + */ + i = urb->num_mapped_sgs; + if (len > 0 && i > 0) { + sg = urb->sg; + buf = sg_dma_address(sg); + + /* urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), len); + } else { + sg = NULL; + buf = urb->transfer_dma; + this_sg_len = len; + } + + if (is_input) + token |= (1 /* "in" */ << 8); + /* else it's already initted to "out" pid (0 << 8) */ + + maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); + + /* + * buffer gets wrapped in one or more qtds; + * last one may be "short" (including zero len) + * and may serve as a control status ack + */ + for (;;) { + int this_qtd_len; + + this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token, + maxpacket); + this_sg_len -= this_qtd_len; + len -= this_qtd_len; + buf += this_qtd_len; + + /* + * short reads advance to a "magic" dummy instead of the next + * qtd ... that forces the queue to stop, for manual cleanup. + * (this will usually be overridden later.) + */ + if (is_input) + qtd->hw_alt_next = fotg210->async->hw->hw_alt_next; + + /* qh makes control packets use qtd toggle; maybe switch it */ + if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) + token ^= QTD_TOGGLE; + + if (likely(this_sg_len <= 0)) { + if (--i <= 0 || len <= 0) + break; + sg = sg_next(sg); + buf = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), len); + } + + qtd_prev = qtd; + qtd = fotg210_qtd_alloc(fotg210, flags); + if (unlikely(!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma); + list_add_tail(&qtd->qtd_list, head); + } + + /* + * unless the caller requires manual cleanup after short reads, + * have the alt_next mechanism keep the queue running after the + * last data qtd (the only one, for control and most other cases). + */ + if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 + || usb_pipecontrol(urb->pipe))) + qtd->hw_alt_next = FOTG210_LIST_END(fotg210); + + /* + * control requests may need a terminating data "status" ack; + * other OUT ones may need a terminating short packet + * (zero length). + */ + if (likely(urb->transfer_buffer_length != 0)) { + int one_more = 0; + + if (usb_pipecontrol(urb->pipe)) { + one_more = 1; + token ^= 0x0100; /* "in" <--> "out" */ + token |= QTD_TOGGLE; /* force DATA1 */ + } else if (usb_pipeout(urb->pipe) + && (urb->transfer_flags & URB_ZERO_PACKET) + && !(urb->transfer_buffer_length % maxpacket)) { + one_more = 1; + } + if (one_more) { + qtd_prev = qtd; + qtd = fotg210_qtd_alloc(fotg210, flags); + if (unlikely(!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma); + list_add_tail(&qtd->qtd_list, head); + + /* never any data in such packets */ + qtd_fill(fotg210, qtd, 0, 0, token, 0); + } + } + + /* by default, enable interrupt on urb completion */ + if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) + qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC); + return head; + +cleanup: + qtd_list_free(fotg210, urb, head); + return NULL; +} + +/*-------------------------------------------------------------------------*/ +/* + * Would be best to create all qh's from config descriptors, + * when each interface/altsetting is established. Unlink + * any previous qh and cancel its urbs first; endpoints are + * implicitly reset then (data toggle too). + * That'd mean updating how usbcore talks to HCDs. (2.7?) +*/ + + +/* + * Each QH holds a qtd list; a QH is used for everything except iso. + * + * For interrupt urbs, the scheduler must set the microframe scheduling + * mask(s) each time the QH gets scheduled. For highspeed, that's + * just one microframe in the s-mask. For split interrupt transactions + * there are additional complications: c-mask, maybe FSTNs. + */ +static struct fotg210_qh * +qh_make( + struct fotg210_hcd *fotg210, + struct urb *urb, + gfp_t flags +) { + struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags); + u32 info1 = 0, info2 = 0; + int is_input, type; + int maxp = 0; + struct usb_tt *tt = urb->dev->tt; + struct fotg210_qh_hw *hw; + + if (!qh) + return qh; + + /* + * init endpoint/device data for this QH + */ + info1 |= usb_pipeendpoint(urb->pipe) << 8; + info1 |= usb_pipedevice(urb->pipe) << 0; + + is_input = usb_pipein(urb->pipe); + type = usb_pipetype(urb->pipe); + maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input); + + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth + * acts like up to 3KB, but is built from smaller packets. + */ + if (max_packet(maxp) > 1024) { + fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", + max_packet(maxp)); + goto done; + } + + /* Compute interrupt scheduling parameters just once, and save. + * - allowing for high bandwidth, how many nsec/uframe are used? + * - split transactions need a second CSPLIT uframe; same question + * - splits also need a schedule gap (for full/low speed I/O) + * - qh has a polling interval + * + * For control/bulk requests, the HC or TT handles these. + */ + if (type == PIPE_INTERRUPT) { + qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, + is_input, 0, + hb_mult(maxp) * max_packet(maxp))); + qh->start = NO_FRAME; + + if (urb->dev->speed == USB_SPEED_HIGH) { + qh->c_usecs = 0; + qh->gap_uf = 0; + + qh->period = urb->interval >> 3; + if (qh->period == 0 && urb->interval != 1) { + /* NOTE interval 2 or 4 uframes could work. + * But interval 1 scheduling is simpler, and + * includes high bandwidth. + */ + urb->interval = 1; + } else if (qh->period > fotg210->periodic_size) { + qh->period = fotg210->periodic_size; + urb->interval = qh->period << 3; + } + } else { + int think_time; + + /* gap is f(FS/LS transfer times) */ + qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed, + is_input, 0, maxp) / (125 * 1000); + + /* FIXME this just approximates SPLIT/CSPLIT times */ + if (is_input) { /* SPLIT, gap, CSPLIT+DATA */ + qh->c_usecs = qh->usecs + HS_USECS(0); + qh->usecs = HS_USECS(1); + } else { /* SPLIT+DATA, gap, CSPLIT */ + qh->usecs += HS_USECS(1); + qh->c_usecs = HS_USECS(0); + } + + think_time = tt ? tt->think_time : 0; + qh->tt_usecs = NS_TO_US(think_time + + usb_calc_bus_time(urb->dev->speed, + is_input, 0, max_packet(maxp))); + qh->period = urb->interval; + if (qh->period > fotg210->periodic_size) { + qh->period = fotg210->periodic_size; + urb->interval = qh->period; + } + } + } + + /* support for tt scheduling, and access to toggles */ + qh->dev = urb->dev; + + /* using TT? */ + switch (urb->dev->speed) { + case USB_SPEED_LOW: + info1 |= QH_LOW_SPEED; + /* FALL THROUGH */ + + case USB_SPEED_FULL: + /* EPS 0 means "full" */ + if (type != PIPE_INTERRUPT) + info1 |= (FOTG210_TUNE_RL_TT << 28); + if (type == PIPE_CONTROL) { + info1 |= QH_CONTROL_EP; /* for TT */ + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ + } + info1 |= maxp << 16; + + info2 |= (FOTG210_TUNE_MULT_TT << 30); + + /* Some Freescale processors have an erratum in which the + * port number in the queue head was 0..N-1 instead of 1..N. + */ + if (fotg210_has_fsl_portno_bug(fotg210)) + info2 |= (urb->dev->ttport-1) << 23; + else + info2 |= urb->dev->ttport << 23; + + /* set the address of the TT; for TDI's integrated + * root hub tt, leave it zeroed. + */ + if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub) + info2 |= tt->hub->devnum << 16; + + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ + + break; + + case USB_SPEED_HIGH: /* no TT involved */ + info1 |= QH_HIGH_SPEED; + if (type == PIPE_CONTROL) { + info1 |= (FOTG210_TUNE_RL_HS << 28); + info1 |= 64 << 16; /* usb2 fixed maxpacket */ + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ + info2 |= (FOTG210_TUNE_MULT_HS << 30); + } else if (type == PIPE_BULK) { + info1 |= (FOTG210_TUNE_RL_HS << 28); + /* The USB spec says that high speed bulk endpoints + * always use 512 byte maxpacket. But some device + * vendors decided to ignore that, and MSFT is happy + * to help them do so. So now people expect to use + * such nonconformant devices with Linux too; sigh. + */ + info1 |= max_packet(maxp) << 16; + info2 |= (FOTG210_TUNE_MULT_HS << 30); + } else { /* PIPE_INTERRUPT */ + info1 |= max_packet(maxp) << 16; + info2 |= hb_mult(maxp) << 30; + } + break; + default: + fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev, + urb->dev->speed); +done: + qh_destroy(fotg210, qh); + return NULL; + } + + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ + + /* init as live, toggle clear, advance to dummy */ + qh->qh_state = QH_STATE_IDLE; + hw = qh->hw; + hw->hw_info1 = cpu_to_hc32(fotg210, info1); + hw->hw_info2 = cpu_to_hc32(fotg210, info2); + qh->is_out = !is_input; + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1); + qh_refresh(fotg210, qh); + return qh; +} + +/*-------------------------------------------------------------------------*/ + +static void enable_async(struct fotg210_hcd *fotg210) +{ + if (fotg210->async_count++) + return; + + /* Stop waiting to turn off the async schedule */ + fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC); + + /* Don't start the schedule until ASS is 0 */ + fotg210_poll_ASS(fotg210); + turn_on_io_watchdog(fotg210); +} + +static void disable_async(struct fotg210_hcd *fotg210) +{ + if (--fotg210->async_count) + return; + + /* The async schedule and async_unlink list are supposed to be empty */ + WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink); + + /* Don't turn off the schedule until ASS is 1 */ + fotg210_poll_ASS(fotg210); +} + +/* move qh (and its qtds) onto async queue; maybe enable queue. */ + +static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + __hc32 dma = QH_NEXT(fotg210, qh->qh_dma); + struct fotg210_qh *head; + + /* Don't link a QH if there's a Clear-TT-Buffer pending */ + if (unlikely(qh->clearing_tt)) + return; + + WARN_ON(qh->qh_state != QH_STATE_IDLE); + + /* clear halt and/or toggle; and maybe recover from silicon quirk */ + qh_refresh(fotg210, qh); + + /* splice right after start */ + head = fotg210->async; + qh->qh_next = head->qh_next; + qh->hw->hw_next = head->hw->hw_next; + wmb(); + + head->qh_next.qh = qh; + head->hw->hw_next = dma; + + qh->xacterrs = 0; + qh->qh_state = QH_STATE_LINKED; + /* qtd completions reported later by interrupt */ + + enable_async(fotg210); +} + +/*-------------------------------------------------------------------------*/ + +/* + * For control/bulk/interrupt, return QH with these TDs appended. + * Allocates and initializes the QH if necessary. + * Returns null if it can't allocate a QH it needs to. + * If the QH has TDs (urbs) already, that's great. + */ +static struct fotg210_qh *qh_append_tds( + struct fotg210_hcd *fotg210, + struct urb *urb, + struct list_head *qtd_list, + int epnum, + void **ptr +) +{ + struct fotg210_qh *qh = NULL; + __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f); + + qh = (struct fotg210_qh *) *ptr; + if (unlikely(qh == NULL)) { + /* can't sleep here, we have fotg210->lock... */ + qh = qh_make(fotg210, urb, GFP_ATOMIC); + *ptr = qh; + } + if (likely(qh != NULL)) { + struct fotg210_qtd *qtd; + + if (unlikely(list_empty(qtd_list))) + qtd = NULL; + else + qtd = list_entry(qtd_list->next, struct fotg210_qtd, + qtd_list); + + /* control qh may need patching ... */ + if (unlikely(epnum == 0)) { + /* usb_reset_device() briefly reverts to address 0 */ + if (usb_pipedevice(urb->pipe) == 0) + qh->hw->hw_info1 &= ~qh_addr_mask; + } + + /* just one way to queue requests: swap with the dummy qtd. + * only hc or qh_refresh() ever modify the overlay. + */ + if (likely(qtd != NULL)) { + struct fotg210_qtd *dummy; + dma_addr_t dma; + __hc32 token; + + /* to avoid racing the HC, use the dummy td instead of + * the first td of our list (becomes new dummy). both + * tds stay deactivated until we're done, when the + * HC is allowed to fetch the old dummy (4.10.2). + */ + token = qtd->hw_token; + qtd->hw_token = HALT_BIT(fotg210); + + dummy = qh->dummy; + + dma = dummy->qtd_dma; + *dummy = *qtd; + dummy->qtd_dma = dma; + + list_del(&qtd->qtd_list); + list_add(&dummy->qtd_list, qtd_list); + list_splice_tail(qtd_list, &qh->qtd_list); + + fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma); + qh->dummy = qtd; + + /* hc must see the new dummy at list end */ + dma = qtd->qtd_dma; + qtd = list_entry(qh->qtd_list.prev, + struct fotg210_qtd, qtd_list); + qtd->hw_next = QTD_NEXT(fotg210, dma); + + /* let the hc process these next qtds */ + wmb(); + dummy->hw_token = token; + + urb->hcpriv = qh; + } + } + return qh; +} + +/*-------------------------------------------------------------------------*/ + +static int +submit_async( + struct fotg210_hcd *fotg210, + struct urb *urb, + struct list_head *qtd_list, + gfp_t mem_flags +) { + int epnum; + unsigned long flags; + struct fotg210_qh *qh = NULL; + int rc; + + epnum = urb->ep->desc.bEndpointAddress; + +#ifdef FOTG210_URB_TRACE + { + struct fotg210_qtd *qtd; + qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list); + fotg210_dbg(fotg210, + "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", + __func__, urb->dev->devpath, urb, + epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", + urb->transfer_buffer_length, + qtd, urb->ep->hcpriv); + } +#endif + + spin_lock_irqsave(&fotg210->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) { + rc = -ESHUTDOWN; + goto done; + } + rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb); + if (unlikely(rc)) + goto done; + + qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv); + if (unlikely(qh == NULL)) { + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); + rc = -ENOMEM; + goto done; + } + + /* Control/bulk operations through TTs don't need scheduling, + * the HC and TT handle it when the TT has a buffer ready. + */ + if (likely(qh->qh_state == QH_STATE_IDLE)) + qh_link_async(fotg210, qh); + done: + spin_unlock_irqrestore(&fotg210->lock, flags); + if (unlikely(qh == NULL)) + qtd_list_free(fotg210, urb, qtd_list); + return rc; +} + +/*-------------------------------------------------------------------------*/ + +static void single_unlink_async(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh) +{ + struct fotg210_qh *prev; + + /* Add to the end of the list of QHs waiting for the next IAAD */ + qh->qh_state = QH_STATE_UNLINK; + if (fotg210->async_unlink) + fotg210->async_unlink_last->unlink_next = qh; + else + fotg210->async_unlink = qh; + fotg210->async_unlink_last = qh; + + /* Unlink it from the schedule */ + prev = fotg210->async; + while (prev->qh_next.qh != qh) + prev = prev->qh_next.qh; + + prev->hw->hw_next = qh->hw->hw_next; + prev->qh_next = qh->qh_next; + if (fotg210->qh_scan_next == qh) + fotg210->qh_scan_next = qh->qh_next.qh; +} + +static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested) +{ + /* + * Do nothing if an IAA cycle is already running or + * if one will be started shortly. + */ + if (fotg210->async_iaa || fotg210->async_unlinking) + return; + + /* Do all the waiting QHs at once */ + fotg210->async_iaa = fotg210->async_unlink; + fotg210->async_unlink = NULL; + + /* If the controller isn't running, we don't have to wait for it */ + if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) { + if (!nested) /* Avoid recursion */ + end_unlink_async(fotg210); + + /* Otherwise start a new IAA cycle */ + } else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) { + /* Make sure the unlinks are all visible to the hardware */ + wmb(); + + fotg210_writel(fotg210, fotg210->command | CMD_IAAD, + &fotg210->regs->command); + fotg210_readl(fotg210, &fotg210->regs->command); + fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG, + true); + } +} + +/* the async qh for the qtds being unlinked are now gone from the HC */ + +static void end_unlink_async(struct fotg210_hcd *fotg210) +{ + struct fotg210_qh *qh; + + /* Process the idle QHs */ + restart: + fotg210->async_unlinking = true; + while (fotg210->async_iaa) { + qh = fotg210->async_iaa; + fotg210->async_iaa = qh->unlink_next; + qh->unlink_next = NULL; + + qh->qh_state = QH_STATE_IDLE; + qh->qh_next.qh = NULL; + + qh_completions(fotg210, qh); + if (!list_empty(&qh->qtd_list) && + fotg210->rh_state == FOTG210_RH_RUNNING) + qh_link_async(fotg210, qh); + disable_async(fotg210); + } + fotg210->async_unlinking = false; + + /* Start a new IAA cycle if any QHs are waiting for it */ + if (fotg210->async_unlink) { + start_iaa_cycle(fotg210, true); + if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) + goto restart; + } +} + +static void unlink_empty_async(struct fotg210_hcd *fotg210) +{ + struct fotg210_qh *qh, *next; + bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING); + bool check_unlinks_later = false; + + /* Unlink all the async QHs that have been empty for a timer cycle */ + next = fotg210->async->qh_next.qh; + while (next) { + qh = next; + next = qh->qh_next.qh; + + if (list_empty(&qh->qtd_list) && + qh->qh_state == QH_STATE_LINKED) { + if (!stopped && qh->unlink_cycle == + fotg210->async_unlink_cycle) + check_unlinks_later = true; + else + single_unlink_async(fotg210, qh); + } + } + + /* Start a new IAA cycle if any QHs are waiting for it */ + if (fotg210->async_unlink) + start_iaa_cycle(fotg210, false); + + /* QHs that haven't been empty for long enough will be handled later */ + if (check_unlinks_later) { + fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS, + true); + ++fotg210->async_unlink_cycle; + } +} + +/* makes sure the async qh will become idle */ +/* caller must own fotg210->lock */ + +static void start_unlink_async(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh) +{ + /* + * If the QH isn't linked then there's nothing we can do + * unless we were called during a giveback, in which case + * qh_completions() has to deal with it. + */ + if (qh->qh_state != QH_STATE_LINKED) { + if (qh->qh_state == QH_STATE_COMPLETING) + qh->needs_rescan = 1; + return; + } + + single_unlink_async(fotg210, qh); + start_iaa_cycle(fotg210, false); +} + +/*-------------------------------------------------------------------------*/ + +static void scan_async(struct fotg210_hcd *fotg210) +{ + struct fotg210_qh *qh; + bool check_unlinks_later = false; + + fotg210->qh_scan_next = fotg210->async->qh_next.qh; + while (fotg210->qh_scan_next) { + qh = fotg210->qh_scan_next; + fotg210->qh_scan_next = qh->qh_next.qh; + rescan: + /* clean any finished work for this qh */ + if (!list_empty(&qh->qtd_list)) { + int temp; + + /* + * Unlinks could happen here; completion reporting + * drops the lock. That's why fotg210->qh_scan_next + * always holds the next qh to scan; if the next qh + * gets unlinked then fotg210->qh_scan_next is adjusted + * in single_unlink_async(). + */ + temp = qh_completions(fotg210, qh); + if (qh->needs_rescan) { + start_unlink_async(fotg210, qh); + } else if (list_empty(&qh->qtd_list) + && qh->qh_state == QH_STATE_LINKED) { + qh->unlink_cycle = fotg210->async_unlink_cycle; + check_unlinks_later = true; + } else if (temp != 0) + goto rescan; + } + } + + /* + * Unlink empty entries, reducing DMA usage as well + * as HCD schedule-scanning costs. Delay for any qh + * we just scanned, there's a not-unusual case that it + * doesn't stay idle for long. + */ + if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING && + !(fotg210->enabled_hrtimer_events & + BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) { + fotg210_enable_event(fotg210, + FOTG210_HRTIMER_ASYNC_UNLINKS, true); + ++fotg210->async_unlink_cycle; + } +} +/*-------------------------------------------------------------------------*/ +/* + * EHCI scheduled transaction support: interrupt, iso, split iso + * These are called "periodic" transactions in the EHCI spec. + * + * Note that for interrupt transfers, the QH/QTD manipulation is shared + * with the "asynchronous" transaction support (control/bulk transfers). + * The only real difference is in how interrupt transfers are scheduled. + * + * For ISO, we make an "iso_stream" head to serve the same role as a QH. + * It keeps track of every ITD (or SITD) that's linked, and holds enough + * pre-calculated schedule data to make appending to the queue be quick. + */ + +static int fotg210_get_frame(struct usb_hcd *hcd); + +/*-------------------------------------------------------------------------*/ + +/* + * periodic_next_shadow - return "next" pointer on shadow list + * @periodic: host pointer to qh/itd + * @tag: hardware tag for type of this record + */ +static union fotg210_shadow * +periodic_next_shadow(struct fotg210_hcd *fotg210, + union fotg210_shadow *periodic, __hc32 tag) +{ + switch (hc32_to_cpu(fotg210, tag)) { + case Q_TYPE_QH: + return &periodic->qh->qh_next; + case Q_TYPE_FSTN: + return &periodic->fstn->fstn_next; + default: + return &periodic->itd->itd_next; + } +} + +static __hc32 * +shadow_next_periodic(struct fotg210_hcd *fotg210, + union fotg210_shadow *periodic, __hc32 tag) +{ + switch (hc32_to_cpu(fotg210, tag)) { + /* our fotg210_shadow.qh is actually software part */ + case Q_TYPE_QH: + return &periodic->qh->hw->hw_next; + /* others are hw parts */ + default: + return periodic->hw_next; + } +} + +/* caller must hold fotg210->lock */ +static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame, + void *ptr) +{ + union fotg210_shadow *prev_p = &fotg210->pshadow[frame]; + __hc32 *hw_p = &fotg210->periodic[frame]; + union fotg210_shadow here = *prev_p; + + /* find predecessor of "ptr"; hw and shadow lists are in sync */ + while (here.ptr && here.ptr != ptr) { + prev_p = periodic_next_shadow(fotg210, prev_p, + Q_NEXT_TYPE(fotg210, *hw_p)); + hw_p = shadow_next_periodic(fotg210, &here, + Q_NEXT_TYPE(fotg210, *hw_p)); + here = *prev_p; + } + /* an interrupt entry (at list end) could have been shared */ + if (!here.ptr) + return; + + /* update shadow and hardware lists ... the old "next" pointers + * from ptr may still be in use, the caller updates them. + */ + *prev_p = *periodic_next_shadow(fotg210, &here, + Q_NEXT_TYPE(fotg210, *hw_p)); + + *hw_p = *shadow_next_periodic(fotg210, &here, + Q_NEXT_TYPE(fotg210, *hw_p)); +} + +/* how many of the uframe's 125 usecs are allocated? */ +static unsigned short +periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe) +{ + __hc32 *hw_p = &fotg210->periodic[frame]; + union fotg210_shadow *q = &fotg210->pshadow[frame]; + unsigned usecs = 0; + struct fotg210_qh_hw *hw; + + while (q->ptr) { + switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) { + case Q_TYPE_QH: + hw = q->qh->hw; + /* is it in the S-mask? */ + if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe)) + usecs += q->qh->usecs; + /* ... or C-mask? */ + if (hw->hw_info2 & cpu_to_hc32(fotg210, + 1 << (8 + uframe))) + usecs += q->qh->c_usecs; + hw_p = &hw->hw_next; + q = &q->qh->qh_next; + break; + /* case Q_TYPE_FSTN: */ + default: + /* for "save place" FSTNs, count the relevant INTR + * bandwidth from the previous frame + */ + if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210)) + fotg210_dbg(fotg210, "ignoring FSTN cost ...\n"); + + hw_p = &q->fstn->hw_next; + q = &q->fstn->fstn_next; + break; + case Q_TYPE_ITD: + if (q->itd->hw_transaction[uframe]) + usecs += q->itd->stream->usecs; + hw_p = &q->itd->hw_next; + q = &q->itd->itd_next; + break; + } + } +#ifdef DEBUG + if (usecs > fotg210->uframe_periodic_max) + fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n", + frame * 8 + uframe, usecs); +#endif + return usecs; +} + +/*-------------------------------------------------------------------------*/ + +static int same_tt(struct usb_device *dev1, struct usb_device *dev2) +{ + if (!dev1->tt || !dev2->tt) + return 0; + if (dev1->tt != dev2->tt) + return 0; + if (dev1->tt->multi) + return dev1->ttport == dev2->ttport; + else + return 1; +} + +/* return true iff the device's transaction translator is available + * for a periodic transfer starting at the specified frame, using + * all the uframes in the mask. + */ +static int tt_no_collision( + struct fotg210_hcd *fotg210, + unsigned period, + struct usb_device *dev, + unsigned frame, + u32 uf_mask +) +{ + if (period == 0) /* error */ + return 0; + + /* note bandwidth wastage: split never follows csplit + * (different dev or endpoint) until the next uframe. + * calling convention doesn't make that distinction. + */ + for (; frame < fotg210->periodic_size; frame += period) { + union fotg210_shadow here; + __hc32 type; + struct fotg210_qh_hw *hw; + + here = fotg210->pshadow[frame]; + type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]); + while (here.ptr) { + switch (hc32_to_cpu(fotg210, type)) { + case Q_TYPE_ITD: + type = Q_NEXT_TYPE(fotg210, here.itd->hw_next); + here = here.itd->itd_next; + continue; + case Q_TYPE_QH: + hw = here.qh->hw; + if (same_tt(dev, here.qh->dev)) { + u32 mask; + + mask = hc32_to_cpu(fotg210, + hw->hw_info2); + /* "knows" no gap is needed */ + mask |= mask >> 8; + if (mask & uf_mask) + break; + } + type = Q_NEXT_TYPE(fotg210, hw->hw_next); + here = here.qh->qh_next; + continue; + /* case Q_TYPE_FSTN: */ + default: + fotg210_dbg(fotg210, + "periodic frame %d bogus type %d\n", + frame, type); + } + + /* collision or error */ + return 0; + } + } + + /* no collision */ + return 1; +} + +/*-------------------------------------------------------------------------*/ + +static void enable_periodic(struct fotg210_hcd *fotg210) +{ + if (fotg210->periodic_count++) + return; + + /* Stop waiting to turn off the periodic schedule */ + fotg210->enabled_hrtimer_events &= + ~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC); + + /* Don't start the schedule until PSS is 0 */ + fotg210_poll_PSS(fotg210); + turn_on_io_watchdog(fotg210); +} + +static void disable_periodic(struct fotg210_hcd *fotg210) +{ + if (--fotg210->periodic_count) + return; + + /* Don't turn off the schedule until PSS is 1 */ + fotg210_poll_PSS(fotg210); +} + +/*-------------------------------------------------------------------------*/ + +/* periodic schedule slots have iso tds (normal or split) first, then a + * sparse tree for active interrupt transfers. + * + * this just links in a qh; caller guarantees uframe masks are set right. + * no FSTN support (yet; fotg210 0.96+) + */ +static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + unsigned i; + unsigned period = qh->period; + + dev_dbg(&qh->dev->dev, + "link qh%d-%04x/%p start %d [%d/%d us]\n", + period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) + & (QH_CMASK | QH_SMASK), + qh, qh->start, qh->usecs, qh->c_usecs); + + /* high bandwidth, or otherwise every microframe */ + if (period == 0) + period = 1; + + for (i = qh->start; i < fotg210->periodic_size; i += period) { + union fotg210_shadow *prev = &fotg210->pshadow[i]; + __hc32 *hw_p = &fotg210->periodic[i]; + union fotg210_shadow here = *prev; + __hc32 type = 0; + + /* skip the iso nodes at list head */ + while (here.ptr) { + type = Q_NEXT_TYPE(fotg210, *hw_p); + if (type == cpu_to_hc32(fotg210, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(fotg210, prev, type); + hw_p = shadow_next_periodic(fotg210, &here, type); + here = *prev; + } + + /* sorting each branch by period (slow-->fast) + * enables sharing interior tree nodes + */ + while (here.ptr && qh != here.qh) { + if (qh->period > here.qh->period) + break; + prev = &here.qh->qh_next; + hw_p = &here.qh->hw->hw_next; + here = *prev; + } + /* link in this qh, unless some earlier pass did that */ + if (qh != here.qh) { + qh->qh_next = here; + if (here.qh) + qh->hw->hw_next = *hw_p; + wmb(); + prev->qh = qh; + *hw_p = QH_NEXT(fotg210, qh->qh_dma); + } + } + qh->qh_state = QH_STATE_LINKED; + qh->xacterrs = 0; + + /* update per-qh bandwidth for usbfs */ + fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period + ? ((qh->usecs + qh->c_usecs) / qh->period) + : (qh->usecs * 8); + + list_add(&qh->intr_node, &fotg210->intr_qh_list); + + /* maybe enable periodic schedule processing */ + ++fotg210->intr_count; + enable_periodic(fotg210); +} + +static void qh_unlink_periodic(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh) +{ + unsigned i; + unsigned period; + + /* + * If qh is for a low/full-speed device, simply unlinking it + * could interfere with an ongoing split transaction. To unlink + * it safely would require setting the QH_INACTIVATE bit and + * waiting at least one frame, as described in EHCI 4.12.2.5. + * + * We won't bother with any of this. Instead, we assume that the + * only reason for unlinking an interrupt QH while the current URB + * is still active is to dequeue all the URBs (flush the whole + * endpoint queue). + * + * If rebalancing the periodic schedule is ever implemented, this + * approach will no longer be valid. + */ + + /* high bandwidth, or otherwise part of every microframe */ + period = qh->period; + if (!period) + period = 1; + + for (i = qh->start; i < fotg210->periodic_size; i += period) + periodic_unlink(fotg210, i, qh); + + /* update per-qh bandwidth for usbfs */ + fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period + ? ((qh->usecs + qh->c_usecs) / qh->period) + : (qh->usecs * 8); + + dev_dbg(&qh->dev->dev, + "unlink qh%d-%04x/%p start %d [%d/%d us]\n", + qh->period, + hc32_to_cpup(fotg210, &qh->hw->hw_info2) & + (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs); + + /* qh->qh_next still "live" to HC */ + qh->qh_state = QH_STATE_UNLINK; + qh->qh_next.ptr = NULL; + + if (fotg210->qh_scan_next == qh) + fotg210->qh_scan_next = list_entry(qh->intr_node.next, + struct fotg210_qh, intr_node); + list_del(&qh->intr_node); +} + +static void start_unlink_intr(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh) +{ + /* If the QH isn't linked then there's nothing we can do + * unless we were called during a giveback, in which case + * qh_completions() has to deal with it. + */ + if (qh->qh_state != QH_STATE_LINKED) { + if (qh->qh_state == QH_STATE_COMPLETING) + qh->needs_rescan = 1; + return; + } + + qh_unlink_periodic(fotg210, qh); + + /* Make sure the unlinks are visible before starting the timer */ + wmb(); + + /* + * The EHCI spec doesn't say how long it takes the controller to + * stop accessing an unlinked interrupt QH. The timer delay is + * 9 uframes; presumably that will be long enough. + */ + qh->unlink_cycle = fotg210->intr_unlink_cycle; + + /* New entries go at the end of the intr_unlink list */ + if (fotg210->intr_unlink) + fotg210->intr_unlink_last->unlink_next = qh; + else + fotg210->intr_unlink = qh; + fotg210->intr_unlink_last = qh; + + if (fotg210->intr_unlinking) + ; /* Avoid recursive calls */ + else if (fotg210->rh_state < FOTG210_RH_RUNNING) + fotg210_handle_intr_unlinks(fotg210); + else if (fotg210->intr_unlink == qh) { + fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR, + true); + ++fotg210->intr_unlink_cycle; + } +} + +static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + struct fotg210_qh_hw *hw = qh->hw; + int rc; + + qh->qh_state = QH_STATE_IDLE; + hw->hw_next = FOTG210_LIST_END(fotg210); + + qh_completions(fotg210, qh); + + /* reschedule QH iff another request is queued */ + if (!list_empty(&qh->qtd_list) && + fotg210->rh_state == FOTG210_RH_RUNNING) { + rc = qh_schedule(fotg210, qh); + + /* An error here likely indicates handshake failure + * or no space left in the schedule. Neither fault + * should happen often ... + * + * FIXME kill the now-dysfunctional queued urbs + */ + if (rc != 0) + fotg210_err(fotg210, "can't reschedule qh %p, err %d\n", + qh, rc); + } + + /* maybe turn off periodic schedule */ + --fotg210->intr_count; + disable_periodic(fotg210); +} + +/*-------------------------------------------------------------------------*/ + +static int check_period( + struct fotg210_hcd *fotg210, + unsigned frame, + unsigned uframe, + unsigned period, + unsigned usecs +) { + int claimed; + + /* complete split running into next frame? + * given FSTN support, we could sometimes check... + */ + if (uframe >= 8) + return 0; + + /* convert "usecs we need" to "max already claimed" */ + usecs = fotg210->uframe_periodic_max - usecs; + + /* we "know" 2 and 4 uframe intervals were rejected; so + * for period 0, check _every_ microframe in the schedule. + */ + if (unlikely(period == 0)) { + do { + for (uframe = 0; uframe < 7; uframe++) { + claimed = periodic_usecs(fotg210, frame, + uframe); + if (claimed > usecs) + return 0; + } + } while ((frame += 1) < fotg210->periodic_size); + + /* just check the specified uframe, at that period */ + } else { + do { + claimed = periodic_usecs(fotg210, frame, uframe); + if (claimed > usecs) + return 0; + } while ((frame += period) < fotg210->periodic_size); + } + + /* success! */ + return 1; +} + +static int check_intr_schedule( + struct fotg210_hcd *fotg210, + unsigned frame, + unsigned uframe, + const struct fotg210_qh *qh, + __hc32 *c_maskp +) +{ + int retval = -ENOSPC; + u8 mask = 0; + + if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ + goto done; + + if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs)) + goto done; + if (!qh->c_usecs) { + retval = 0; + *c_maskp = 0; + goto done; + } + + /* Make sure this tt's buffer is also available for CSPLITs. + * We pessimize a bit; probably the typical full speed case + * doesn't need the second CSPLIT. + * + * NOTE: both SPLIT and CSPLIT could be checked in just + * one smart pass... + */ + mask = 0x03 << (uframe + qh->gap_uf); + *c_maskp = cpu_to_hc32(fotg210, mask << 8); + + mask |= 1 << uframe; + if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) { + if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1, + qh->period, qh->c_usecs)) + goto done; + if (!check_period(fotg210, frame, uframe + qh->gap_uf, + qh->period, qh->c_usecs)) + goto done; + retval = 0; + } +done: + return retval; +} + +/* "first fit" scheduling policy used the first time through, + * or when the previous schedule slot can't be re-used. + */ +static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +{ + int status; + unsigned uframe; + __hc32 c_mask; + unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ + struct fotg210_qh_hw *hw = qh->hw; + + qh_refresh(fotg210, qh); + hw->hw_next = FOTG210_LIST_END(fotg210); + frame = qh->start; + + /* reuse the previous schedule slots, if we can */ + if (frame < qh->period) { + uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK); + status = check_intr_schedule(fotg210, frame, --uframe, + qh, &c_mask); + } else { + uframe = 0; + c_mask = 0; + status = -ENOSPC; + } + + /* else scan the schedule to find a group of slots such that all + * uframes have enough periodic bandwidth available. + */ + if (status) { + /* "normal" case, uframing flexible except with splits */ + if (qh->period) { + int i; + + for (i = qh->period; status && i > 0; --i) { + frame = ++fotg210->random_frame % qh->period; + for (uframe = 0; uframe < 8; uframe++) { + status = check_intr_schedule(fotg210, + frame, uframe, qh, + &c_mask); + if (status == 0) + break; + } + } + + /* qh->period == 0 means every uframe */ + } else { + frame = 0; + status = check_intr_schedule(fotg210, 0, 0, qh, + &c_mask); + } + if (status) + goto done; + qh->start = frame; + + /* reset S-frame and (maybe) C-frame masks */ + hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK)); + hw->hw_info2 |= qh->period + ? cpu_to_hc32(fotg210, 1 << uframe) + : cpu_to_hc32(fotg210, QH_SMASK); + hw->hw_info2 |= c_mask; + } else + fotg210_dbg(fotg210, "reused qh %p schedule\n", qh); + + /* stuff into the periodic schedule */ + qh_link_periodic(fotg210, qh); +done: + return status; +} + +static int intr_submit( + struct fotg210_hcd *fotg210, + struct urb *urb, + struct list_head *qtd_list, + gfp_t mem_flags +) { + unsigned epnum; + unsigned long flags; + struct fotg210_qh *qh; + int status; + struct list_head empty; + + /* get endpoint and transfer/schedule data */ + epnum = urb->ep->desc.bEndpointAddress; + + spin_lock_irqsave(&fotg210->lock, flags); + + if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb); + if (unlikely(status)) + goto done_not_linked; + + /* get qh and force any scheduling errors */ + INIT_LIST_HEAD(&empty); + qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv); + if (qh == NULL) { + status = -ENOMEM; + goto done; + } + if (qh->qh_state == QH_STATE_IDLE) { + status = qh_schedule(fotg210, qh); + if (status) + goto done; + } + + /* then queue the urb's tds to the qh */ + qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv); + BUG_ON(qh == NULL); + + /* ... update usbfs periodic stats */ + fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++; + +done: + if (unlikely(status)) + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); +done_not_linked: + spin_unlock_irqrestore(&fotg210->lock, flags); + if (status) + qtd_list_free(fotg210, urb, qtd_list); + + return status; +} + +static void scan_intr(struct fotg210_hcd *fotg210) +{ + struct fotg210_qh *qh; + + list_for_each_entry_safe(qh, fotg210->qh_scan_next, + &fotg210->intr_qh_list, intr_node) { + rescan: + /* clean any finished work for this qh */ + if (!list_empty(&qh->qtd_list)) { + int temp; + + /* + * Unlinks could happen here; completion reporting + * drops the lock. That's why fotg210->qh_scan_next + * always holds the next qh to scan; if the next qh + * gets unlinked then fotg210->qh_scan_next is adjusted + * in qh_unlink_periodic(). + */ + temp = qh_completions(fotg210, qh); + if (unlikely(qh->needs_rescan || + (list_empty(&qh->qtd_list) && + qh->qh_state == QH_STATE_LINKED))) + start_unlink_intr(fotg210, qh); + else if (temp != 0) + goto rescan; + } + } +} + +/*-------------------------------------------------------------------------*/ + +/* fotg210_iso_stream ops work with both ITD and SITD */ + +static struct fotg210_iso_stream * +iso_stream_alloc(gfp_t mem_flags) +{ + struct fotg210_iso_stream *stream; + + stream = kzalloc(sizeof(*stream), mem_flags); + if (likely(stream != NULL)) { + INIT_LIST_HEAD(&stream->td_list); + INIT_LIST_HEAD(&stream->free_list); + stream->next_uframe = -1; + } + return stream; +} + +static void +iso_stream_init( + struct fotg210_hcd *fotg210, + struct fotg210_iso_stream *stream, + struct usb_device *dev, + int pipe, + unsigned interval +) +{ + u32 buf1; + unsigned epnum, maxp; + int is_input; + long bandwidth; + unsigned multi; + + /* + * this might be a "high bandwidth" highspeed endpoint, + * as encoded in the ep descriptor's wMaxPacket field + */ + epnum = usb_pipeendpoint(pipe); + is_input = usb_pipein(pipe) ? USB_DIR_IN : 0; + maxp = usb_maxpacket(dev, pipe, !is_input); + if (is_input) + buf1 = (1 << 11); + else + buf1 = 0; + + maxp = max_packet(maxp); + multi = hb_mult(maxp); + buf1 |= maxp; + maxp *= multi; + + stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum); + stream->buf1 = cpu_to_hc32(fotg210, buf1); + stream->buf2 = cpu_to_hc32(fotg210, multi); + + /* usbfs wants to report the average usecs per frame tied up + * when transfers on this endpoint are scheduled ... + */ + if (dev->speed == USB_SPEED_FULL) { + interval <<= 3; + stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed, + is_input, 1, maxp)); + stream->usecs /= 8; + } else { + stream->highspeed = 1; + stream->usecs = HS_USECS_ISO(maxp); + } + bandwidth = stream->usecs * 8; + bandwidth /= interval; + + stream->bandwidth = bandwidth; + stream->udev = dev; + stream->bEndpointAddress = is_input | epnum; + stream->interval = interval; + stream->maxp = maxp; +} + +static struct fotg210_iso_stream * +iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb) +{ + unsigned epnum; + struct fotg210_iso_stream *stream; + struct usb_host_endpoint *ep; + unsigned long flags; + + epnum = usb_pipeendpoint(urb->pipe); + if (usb_pipein(urb->pipe)) + ep = urb->dev->ep_in[epnum]; + else + ep = urb->dev->ep_out[epnum]; + + spin_lock_irqsave(&fotg210->lock, flags); + stream = ep->hcpriv; + + if (unlikely(stream == NULL)) { + stream = iso_stream_alloc(GFP_ATOMIC); + if (likely(stream != NULL)) { + ep->hcpriv = stream; + stream->ep = ep; + iso_stream_init(fotg210, stream, urb->dev, urb->pipe, + urb->interval); + } + + /* if dev->ep[epnum] is a QH, hw is set */ + } else if (unlikely(stream->hw != NULL)) { + fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n", + urb->dev->devpath, epnum, + usb_pipein(urb->pipe) ? "in" : "out"); + stream = NULL; + } + + spin_unlock_irqrestore(&fotg210->lock, flags); + return stream; +} + +/*-------------------------------------------------------------------------*/ + +/* fotg210_iso_sched ops can be ITD-only or SITD-only */ + +static struct fotg210_iso_sched * +iso_sched_alloc(unsigned packets, gfp_t mem_flags) +{ + struct fotg210_iso_sched *iso_sched; + int size = sizeof(*iso_sched); + + size += packets * sizeof(struct fotg210_iso_packet); + iso_sched = kzalloc(size, mem_flags); + if (likely(iso_sched != NULL)) + INIT_LIST_HEAD(&iso_sched->td_list); + + return iso_sched; +} + +static inline void +itd_sched_init( + struct fotg210_hcd *fotg210, + struct fotg210_iso_sched *iso_sched, + struct fotg210_iso_stream *stream, + struct urb *urb +) +{ + unsigned i; + dma_addr_t dma = urb->transfer_dma; + + /* how many uframes are needed for these transfers */ + iso_sched->span = urb->number_of_packets * stream->interval; + + /* figure out per-uframe itd fields that we'll need later + * when we fit new itds into the schedule. + */ + for (i = 0; i < urb->number_of_packets; i++) { + struct fotg210_iso_packet *uframe = &iso_sched->packet[i]; + unsigned length; + dma_addr_t buf; + u32 trans; + + length = urb->iso_frame_desc[i].length; + buf = dma + urb->iso_frame_desc[i].offset; + + trans = FOTG210_ISOC_ACTIVE; + trans |= buf & 0x0fff; + if (unlikely(((i + 1) == urb->number_of_packets)) + && !(urb->transfer_flags & URB_NO_INTERRUPT)) + trans |= FOTG210_ITD_IOC; + trans |= length << 16; + uframe->transaction = cpu_to_hc32(fotg210, trans); + + /* might need to cross a buffer page within a uframe */ + uframe->bufp = (buf & ~(u64)0x0fff); + buf += length; + if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff)))) + uframe->cross = 1; + } +} + +static void +iso_sched_free( + struct fotg210_iso_stream *stream, + struct fotg210_iso_sched *iso_sched +) +{ + if (!iso_sched) + return; + /* caller must hold fotg210->lock!*/ + list_splice(&iso_sched->td_list, &stream->free_list); + kfree(iso_sched); +} + +static int +itd_urb_transaction( + struct fotg210_iso_stream *stream, + struct fotg210_hcd *fotg210, + struct urb *urb, + gfp_t mem_flags +) +{ + struct fotg210_itd *itd; + dma_addr_t itd_dma; + int i; + unsigned num_itds; + struct fotg210_iso_sched *sched; + unsigned long flags; + + sched = iso_sched_alloc(urb->number_of_packets, mem_flags); + if (unlikely(sched == NULL)) + return -ENOMEM; + + itd_sched_init(fotg210, sched, stream, urb); + + if (urb->interval < 8) + num_itds = 1 + (sched->span + 7) / 8; + else + num_itds = urb->number_of_packets; + + /* allocate/init ITDs */ + spin_lock_irqsave(&fotg210->lock, flags); + for (i = 0; i < num_itds; i++) { + + /* + * Use iTDs from the free list, but not iTDs that may + * still be in use by the hardware. + */ + if (likely(!list_empty(&stream->free_list))) { + itd = list_first_entry(&stream->free_list, + struct fotg210_itd, itd_list); + if (itd->frame == fotg210->now_frame) + goto alloc_itd; + list_del(&itd->itd_list); + itd_dma = itd->itd_dma; + } else { + alloc_itd: + spin_unlock_irqrestore(&fotg210->lock, flags); + itd = dma_pool_alloc(fotg210->itd_pool, mem_flags, + &itd_dma); + spin_lock_irqsave(&fotg210->lock, flags); + if (!itd) { + iso_sched_free(stream, sched); + spin_unlock_irqrestore(&fotg210->lock, flags); + return -ENOMEM; + } + } + + memset(itd, 0, sizeof(*itd)); + itd->itd_dma = itd_dma; + list_add(&itd->itd_list, &sched->td_list); + } + spin_unlock_irqrestore(&fotg210->lock, flags); + + /* temporarily store schedule info in hcpriv */ + urb->hcpriv = sched; + urb->error_count = 0; + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static inline int +itd_slot_ok( + struct fotg210_hcd *fotg210, + u32 mod, + u32 uframe, + u8 usecs, + u32 period +) +{ + uframe %= period; + do { + /* can't commit more than uframe_periodic_max usec */ + if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7) + > (fotg210->uframe_periodic_max - usecs)) + return 0; + + /* we know urb->interval is 2^N uframes */ + uframe += period; + } while (uframe < mod); + return 1; +} + +/* + * This scheduler plans almost as far into the future as it has actual + * periodic schedule slots. (Affected by TUNE_FLS, which defaults to + * "as small as possible" to be cache-friendlier.) That limits the size + * transfers you can stream reliably; avoid more than 64 msec per urb. + * Also avoid queue depths of less than fotg210's worst irq latency (affected + * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, + * and other factors); or more than about 230 msec total (for portability, + * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler! + */ + +#define SCHEDULE_SLOP 80 /* microframes */ + +static int +iso_stream_schedule( + struct fotg210_hcd *fotg210, + struct urb *urb, + struct fotg210_iso_stream *stream +) +{ + u32 now, next, start, period, span; + int status; + unsigned mod = fotg210->periodic_size << 3; + struct fotg210_iso_sched *sched = urb->hcpriv; + + period = urb->interval; + span = sched->span; + + if (span > mod - SCHEDULE_SLOP) { + fotg210_dbg(fotg210, "iso request %p too long\n", urb); + status = -EFBIG; + goto fail; + } + + now = fotg210_read_frame_index(fotg210) & (mod - 1); + + /* Typical case: reuse current schedule, stream is still active. + * Hopefully there are no gaps from the host falling behind + * (irq delays etc), but if there are we'll take the next + * slot in the schedule, implicitly assuming URB_ISO_ASAP. + */ + if (likely(!list_empty(&stream->td_list))) { + u32 excess; + + /* For high speed devices, allow scheduling within the + * isochronous scheduling threshold. For full speed devices + * and Intel PCI-based controllers, don't (work around for + * Intel ICH9 bug). + */ + if (!stream->highspeed && fotg210->fs_i_thresh) + next = now + fotg210->i_thresh; + else + next = now; + + /* Fell behind (by up to twice the slop amount)? + * We decide based on the time of the last currently-scheduled + * slot, not the time of the next available slot. + */ + excess = (stream->next_uframe - period - next) & (mod - 1); + if (excess >= mod - 2 * SCHEDULE_SLOP) + start = next + excess - mod + period * + DIV_ROUND_UP(mod - excess, period); + else + start = next + excess + period; + if (start - now >= mod) { + fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n", + urb, start - now - period, period, + mod); + status = -EFBIG; + goto fail; + } + } + + /* need to schedule; when's the next (u)frame we could start? + * this is bigger than fotg210->i_thresh allows; scheduling itself + * isn't free, the slop should handle reasonably slow cpus. it + * can also help high bandwidth if the dma and irq loads don't + * jump until after the queue is primed. + */ + else { + int done = 0; + start = SCHEDULE_SLOP + (now & ~0x07); + + /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ + + /* find a uframe slot with enough bandwidth. + * Early uframes are more precious because full-speed + * iso IN transfers can't use late uframes, + * and therefore they should be allocated last. + */ + next = start; + start += period; + do { + start--; + /* check schedule: enough space? */ + if (itd_slot_ok(fotg210, mod, start, + stream->usecs, period)) + done = 1; + } while (start > next && !done); + + /* no room in the schedule */ + if (!done) { + fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n", + urb, now, now + mod); + status = -ENOSPC; + goto fail; + } + } + + /* Tried to schedule too far into the future? */ + if (unlikely(start - now + span - period + >= mod - 2 * SCHEDULE_SLOP)) { + fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n", + urb, start - now, span - period, + mod - 2 * SCHEDULE_SLOP); + status = -EFBIG; + goto fail; + } + + stream->next_uframe = start & (mod - 1); + + /* report high speed start in uframes; full speed, in frames */ + urb->start_frame = stream->next_uframe; + if (!stream->highspeed) + urb->start_frame >>= 3; + + /* Make sure scan_isoc() sees these */ + if (fotg210->isoc_count == 0) + fotg210->next_frame = now >> 3; + return 0; + + fail: + iso_sched_free(stream, sched); + urb->hcpriv = NULL; + return status; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream, + struct fotg210_itd *itd) +{ + int i; + + /* it's been recently zeroed */ + itd->hw_next = FOTG210_LIST_END(fotg210); + itd->hw_bufp[0] = stream->buf0; + itd->hw_bufp[1] = stream->buf1; + itd->hw_bufp[2] = stream->buf2; + + for (i = 0; i < 8; i++) + itd->index[i] = -1; + + /* All other fields are filled when scheduling */ +} + +static inline void +itd_patch( + struct fotg210_hcd *fotg210, + struct fotg210_itd *itd, + struct fotg210_iso_sched *iso_sched, + unsigned index, + u16 uframe +) +{ + struct fotg210_iso_packet *uf = &iso_sched->packet[index]; + unsigned pg = itd->pg; + + uframe &= 0x07; + itd->index[uframe] = index; + + itd->hw_transaction[uframe] = uf->transaction; + itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12); + itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32)); + + /* iso_frame_desc[].offset must be strictly increasing */ + if (unlikely(uf->cross)) { + u64 bufp = uf->bufp + 4096; + + itd->pg = ++pg; + itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32)); + } +} + +static inline void +itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd) +{ + union fotg210_shadow *prev = &fotg210->pshadow[frame]; + __hc32 *hw_p = &fotg210->periodic[frame]; + union fotg210_shadow here = *prev; + __hc32 type = 0; + + /* skip any iso nodes which might belong to previous microframes */ + while (here.ptr) { + type = Q_NEXT_TYPE(fotg210, *hw_p); + if (type == cpu_to_hc32(fotg210, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(fotg210, prev, type); + hw_p = shadow_next_periodic(fotg210, &here, type); + here = *prev; + } + + itd->itd_next = here; + itd->hw_next = *hw_p; + prev->itd = itd; + itd->frame = frame; + wmb(); + *hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD); +} + +/* fit urb's itds into the selected schedule slot; activate as needed */ +static void itd_link_urb( + struct fotg210_hcd *fotg210, + struct urb *urb, + unsigned mod, + struct fotg210_iso_stream *stream +) +{ + int packet; + unsigned next_uframe, uframe, frame; + struct fotg210_iso_sched *iso_sched = urb->hcpriv; + struct fotg210_itd *itd; + + next_uframe = stream->next_uframe & (mod - 1); + + if (unlikely(list_empty(&stream->td_list))) { + fotg210_to_hcd(fotg210)->self.bandwidth_allocated + += stream->bandwidth; + fotg210_vdbg(fotg210, + "schedule devp %s ep%d%s-iso period %d start %d.%d\n", + urb->dev->devpath, stream->bEndpointAddress & 0x0f, + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", + urb->interval, + next_uframe >> 3, next_uframe & 0x7); + } + + /* fill iTDs uframe by uframe */ + for (packet = 0, itd = NULL; packet < urb->number_of_packets;) { + if (itd == NULL) { + /* ASSERT: we have all necessary itds */ + + /* ASSERT: no itds for this endpoint in this uframe */ + + itd = list_entry(iso_sched->td_list.next, + struct fotg210_itd, itd_list); + list_move_tail(&itd->itd_list, &stream->td_list); + itd->stream = stream; + itd->urb = urb; + itd_init(fotg210, stream, itd); + } + + uframe = next_uframe & 0x07; + frame = next_uframe >> 3; + + itd_patch(fotg210, itd, iso_sched, packet, uframe); + + next_uframe += stream->interval; + next_uframe &= mod - 1; + packet++; + + /* link completed itds into the schedule */ + if (((next_uframe >> 3) != frame) + || packet == urb->number_of_packets) { + itd_link(fotg210, frame & (fotg210->periodic_size - 1), + itd); + itd = NULL; + } + } + stream->next_uframe = next_uframe; + + /* don't need that schedule data any more */ + iso_sched_free(stream, iso_sched); + urb->hcpriv = NULL; + + ++fotg210->isoc_count; + enable_periodic(fotg210); +} + +#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\ + FOTG210_ISOC_XACTERR) + +/* Process and recycle a completed ITD. Return true iff its urb completed, + * and hence its completion callback probably added things to the hardware + * schedule. + * + * Note that we carefully avoid recycling this descriptor until after any + * completion callback runs, so that it won't be reused quickly. That is, + * assuming (a) no more than two urbs per frame on this endpoint, and also + * (b) only this endpoint's completions submit URBs. It seems some silicon + * corrupts things if you reuse completed descriptors very quickly... + */ +static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd) +{ + struct urb *urb = itd->urb; + struct usb_iso_packet_descriptor *desc; + u32 t; + unsigned uframe; + int urb_index = -1; + struct fotg210_iso_stream *stream = itd->stream; + struct usb_device *dev; + bool retval = false; + + /* for each uframe with a packet */ + for (uframe = 0; uframe < 8; uframe++) { + if (likely(itd->index[uframe] == -1)) + continue; + urb_index = itd->index[uframe]; + desc = &urb->iso_frame_desc[urb_index]; + + t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]); + itd->hw_transaction[uframe] = 0; + + /* report transfer status */ + if (unlikely(t & ISO_ERRS)) { + urb->error_count++; + if (t & FOTG210_ISOC_BUF_ERR) + desc->status = usb_pipein(urb->pipe) + ? -ENOSR /* hc couldn't read */ + : -ECOMM; /* hc couldn't write */ + else if (t & FOTG210_ISOC_BABBLE) + desc->status = -EOVERFLOW; + else /* (t & FOTG210_ISOC_XACTERR) */ + desc->status = -EPROTO; + + /* HC need not update length with this error */ + if (!(t & FOTG210_ISOC_BABBLE)) { + desc->actual_length = + fotg210_itdlen(urb, desc, t); + urb->actual_length += desc->actual_length; + } + } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) { + desc->status = 0; + desc->actual_length = fotg210_itdlen(urb, desc, t); + urb->actual_length += desc->actual_length; + } else { + /* URB was too late */ + desc->status = -EXDEV; + } + } + + /* handle completion now? */ + if (likely((urb_index + 1) != urb->number_of_packets)) + goto done; + + /* ASSERT: it's really the last itd for this urb + list_for_each_entry (itd, &stream->td_list, itd_list) + BUG_ON (itd->urb == urb); + */ + + /* give urb back to the driver; completion often (re)submits */ + dev = urb->dev; + fotg210_urb_done(fotg210, urb, 0); + retval = true; + urb = NULL; + + --fotg210->isoc_count; + disable_periodic(fotg210); + + if (unlikely(list_is_singular(&stream->td_list))) { + fotg210_to_hcd(fotg210)->self.bandwidth_allocated + -= stream->bandwidth; + fotg210_vdbg(fotg210, + "deschedule devp %s ep%d%s-iso\n", + dev->devpath, stream->bEndpointAddress & 0x0f, + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); + } + +done: + itd->urb = NULL; + + /* Add to the end of the free list for later reuse */ + list_move_tail(&itd->itd_list, &stream->free_list); + + /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */ + if (list_empty(&stream->td_list)) { + list_splice_tail_init(&stream->free_list, + &fotg210->cached_itd_list); + start_free_itds(fotg210); + } + + return retval; +} + +/*-------------------------------------------------------------------------*/ + +static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb, + gfp_t mem_flags) +{ + int status = -EINVAL; + unsigned long flags; + struct fotg210_iso_stream *stream; + + /* Get iso_stream head */ + stream = iso_stream_find(fotg210, urb); + if (unlikely(stream == NULL)) { + fotg210_dbg(fotg210, "can't get iso stream\n"); + return -ENOMEM; + } + if (unlikely(urb->interval != stream->interval && + fotg210_port_speed(fotg210, 0) == + USB_PORT_STAT_HIGH_SPEED)) { + fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n", + stream->interval, urb->interval); + goto done; + } + +#ifdef FOTG210_URB_TRACE + fotg210_dbg(fotg210, + "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + urb->transfer_buffer_length, + urb->number_of_packets, urb->interval, + stream); +#endif + + /* allocate ITDs w/o locking anything */ + status = itd_urb_transaction(stream, fotg210, urb, mem_flags); + if (unlikely(status < 0)) { + fotg210_dbg(fotg210, "can't init itds\n"); + goto done; + } + + /* schedule ... need to lock */ + spin_lock_irqsave(&fotg210->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb); + if (unlikely(status)) + goto done_not_linked; + status = iso_stream_schedule(fotg210, urb, stream); + if (likely(status == 0)) + itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream); + else + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); + done_not_linked: + spin_unlock_irqrestore(&fotg210->lock, flags); + done: + return status; +} + +/*-------------------------------------------------------------------------*/ + +static void scan_isoc(struct fotg210_hcd *fotg210) +{ + unsigned uf, now_frame, frame; + unsigned fmask = fotg210->periodic_size - 1; + bool modified, live; + + /* + * When running, scan from last scan point up to "now" + * else clean up by scanning everything that's left. + * Touches as few pages as possible: cache-friendly. + */ + if (fotg210->rh_state >= FOTG210_RH_RUNNING) { + uf = fotg210_read_frame_index(fotg210); + now_frame = (uf >> 3) & fmask; + live = true; + } else { + now_frame = (fotg210->next_frame - 1) & fmask; + live = false; + } + fotg210->now_frame = now_frame; + + frame = fotg210->next_frame; + for (;;) { + union fotg210_shadow q, *q_p; + __hc32 type, *hw_p; + +restart: + /* scan each element in frame's queue for completions */ + q_p = &fotg210->pshadow[frame]; + hw_p = &fotg210->periodic[frame]; + q.ptr = q_p->ptr; + type = Q_NEXT_TYPE(fotg210, *hw_p); + modified = false; + + while (q.ptr != NULL) { + switch (hc32_to_cpu(fotg210, type)) { + case Q_TYPE_ITD: + /* If this ITD is still active, leave it for + * later processing ... check the next entry. + * No need to check for activity unless the + * frame is current. + */ + if (frame == now_frame && live) { + rmb(); + for (uf = 0; uf < 8; uf++) { + if (q.itd->hw_transaction[uf] & + ITD_ACTIVE(fotg210)) + break; + } + if (uf < 8) { + q_p = &q.itd->itd_next; + hw_p = &q.itd->hw_next; + type = Q_NEXT_TYPE(fotg210, + q.itd->hw_next); + q = *q_p; + break; + } + } + + /* Take finished ITDs out of the schedule + * and process them: recycle, maybe report + * URB completion. HC won't cache the + * pointer for much longer, if at all. + */ + *q_p = q.itd->itd_next; + *hw_p = q.itd->hw_next; + type = Q_NEXT_TYPE(fotg210, q.itd->hw_next); + wmb(); + modified = itd_complete(fotg210, q.itd); + q = *q_p; + break; + default: + fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n", + type, frame, q.ptr); + /* FALL THROUGH */ + case Q_TYPE_QH: + case Q_TYPE_FSTN: + /* End of the iTDs and siTDs */ + q.ptr = NULL; + break; + } + + /* assume completion callbacks modify the queue */ + if (unlikely(modified && fotg210->isoc_count > 0)) + goto restart; + } + + /* Stop when we have reached the current frame */ + if (frame == now_frame) + break; + frame = (frame + 1) & fmask; + } + fotg210->next_frame = now_frame; +} +/*-------------------------------------------------------------------------*/ +/* + * Display / Set uframe_periodic_max + */ +static ssize_t show_uframe_periodic_max(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fotg210_hcd *fotg210; + int n; + + fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev))); + n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max); + return n; +} + + +static ssize_t store_uframe_periodic_max(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fotg210_hcd *fotg210; + unsigned uframe_periodic_max; + unsigned frame, uframe; + unsigned short allocated_max; + unsigned long flags; + ssize_t ret; + + fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev))); + if (kstrtouint(buf, 0, &uframe_periodic_max) < 0) + return -EINVAL; + + if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) { + fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n", + uframe_periodic_max); + return -EINVAL; + } + + ret = -EINVAL; + + /* + * lock, so that our checking does not race with possible periodic + * bandwidth allocation through submitting new urbs. + */ + spin_lock_irqsave(&fotg210->lock, flags); + + /* + * for request to decrease max periodic bandwidth, we have to check + * every microframe in the schedule to see whether the decrease is + * possible. + */ + if (uframe_periodic_max < fotg210->uframe_periodic_max) { + allocated_max = 0; + + for (frame = 0; frame < fotg210->periodic_size; ++frame) + for (uframe = 0; uframe < 7; ++uframe) + allocated_max = max(allocated_max, + periodic_usecs(fotg210, frame, uframe)); + + if (allocated_max > uframe_periodic_max) { + fotg210_info(fotg210, + "cannot decrease uframe_periodic_max becase " + "periodic bandwidth is already allocated " + "(%u > %u)\n", + allocated_max, uframe_periodic_max); + goto out_unlock; + } + } + + /* increasing is always ok */ + + fotg210_info(fotg210, "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n", + 100 * uframe_periodic_max/125, uframe_periodic_max); + + if (uframe_periodic_max != 100) + fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n"); + + fotg210->uframe_periodic_max = uframe_periodic_max; + ret = count; + +out_unlock: + spin_unlock_irqrestore(&fotg210->lock, flags); + return ret; +} + +static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, + store_uframe_periodic_max); + +static inline int create_sysfs_files(struct fotg210_hcd *fotg210) +{ + struct device *controller = fotg210_to_hcd(fotg210)->self.controller; + int i = 0; + + if (i) + goto out; + + i = device_create_file(controller, &dev_attr_uframe_periodic_max); +out: + return i; +} + +static inline void remove_sysfs_files(struct fotg210_hcd *fotg210) +{ + struct device *controller = fotg210_to_hcd(fotg210)->self.controller; + + device_remove_file(controller, &dev_attr_uframe_periodic_max); +} +/*-------------------------------------------------------------------------*/ + +/* On some systems, leaving remote wakeup enabled prevents system shutdown. + * The firmware seems to think that powering off is a wakeup event! + * This routine turns off remote wakeup and everything else, on all ports. + */ +static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210) +{ + u32 __iomem *status_reg = &fotg210->regs->port_status; + + fotg210_writel(fotg210, PORT_RWC_BITS, status_reg); +} + +/* + * Halt HC, turn off all ports, and let the BIOS use the companion controllers. + * Must be called with interrupts enabled and the lock not held. + */ +static void fotg210_silence_controller(struct fotg210_hcd *fotg210) +{ + fotg210_halt(fotg210); + + spin_lock_irq(&fotg210->lock); + fotg210->rh_state = FOTG210_RH_HALTED; + fotg210_turn_off_all_ports(fotg210); + spin_unlock_irq(&fotg210->lock); +} + +/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc). + * This forcibly disables dma and IRQs, helping kexec and other cases + * where the next system software may expect clean state. + */ +static void fotg210_shutdown(struct usb_hcd *hcd) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + + spin_lock_irq(&fotg210->lock); + fotg210->shutdown = true; + fotg210->rh_state = FOTG210_RH_STOPPING; + fotg210->enabled_hrtimer_events = 0; + spin_unlock_irq(&fotg210->lock); + + fotg210_silence_controller(fotg210); + + hrtimer_cancel(&fotg210->hrtimer); +} + +/*-------------------------------------------------------------------------*/ + +/* + * fotg210_work is called from some interrupts, timers, and so on. + * it calls driver completion functions, after dropping fotg210->lock. + */ +static void fotg210_work(struct fotg210_hcd *fotg210) +{ + /* another CPU may drop fotg210->lock during a schedule scan while + * it reports urb completions. this flag guards against bogus + * attempts at re-entrant schedule scanning. + */ + if (fotg210->scanning) { + fotg210->need_rescan = true; + return; + } + fotg210->scanning = true; + + rescan: + fotg210->need_rescan = false; + if (fotg210->async_count) + scan_async(fotg210); + if (fotg210->intr_count > 0) + scan_intr(fotg210); + if (fotg210->isoc_count > 0) + scan_isoc(fotg210); + if (fotg210->need_rescan) + goto rescan; + fotg210->scanning = false; + + /* the IO watchdog guards against hardware or driver bugs that + * misplace IRQs, and should let us run completely without IRQs. + * such lossage has been observed on both VT6202 and VT8235. + */ + turn_on_io_watchdog(fotg210); +} + +/* + * Called when the fotg210_hcd module is removed. + */ +static void fotg210_stop(struct usb_hcd *hcd) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + + fotg210_dbg(fotg210, "stop\n"); + + /* no more interrupts ... */ + + spin_lock_irq(&fotg210->lock); + fotg210->enabled_hrtimer_events = 0; + spin_unlock_irq(&fotg210->lock); + + fotg210_quiesce(fotg210); + fotg210_silence_controller(fotg210); + fotg210_reset(fotg210); + + hrtimer_cancel(&fotg210->hrtimer); + remove_sysfs_files(fotg210); + remove_debug_files(fotg210); + + /* root hub is shut down separately (first, when possible) */ + spin_lock_irq(&fotg210->lock); + end_free_itds(fotg210); + spin_unlock_irq(&fotg210->lock); + fotg210_mem_cleanup(fotg210); + +#ifdef FOTG210_STATS + fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n", + fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa, + fotg210->stats.lost_iaa); + fotg210_dbg(fotg210, "complete %ld unlink %ld\n", + fotg210->stats.complete, fotg210->stats.unlink); +#endif + + dbg_status(fotg210, "fotg210_stop completed", + fotg210_readl(fotg210, &fotg210->regs->status)); +} + +/* one-time init, only for memory state */ +static int hcd_fotg210_init(struct usb_hcd *hcd) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 temp; + int retval; + u32 hcc_params; + struct fotg210_qh_hw *hw; + + spin_lock_init(&fotg210->lock); + + /* + * keep io watchdog by default, those good HCDs could turn off it later + */ + fotg210->need_io_watchdog = 1; + + hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + fotg210->hrtimer.function = fotg210_hrtimer_func; + fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT; + + hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); + + /* + * by default set standard 80% (== 100 usec/uframe) max periodic + * bandwidth as required by USB 2.0 + */ + fotg210->uframe_periodic_max = 100; + + /* + * hw default: 1K periodic list heads, one per frame. + * periodic_size can shrink by USBCMD update if hcc_params allows. + */ + fotg210->periodic_size = DEFAULT_I_TDPS; + INIT_LIST_HEAD(&fotg210->intr_qh_list); + INIT_LIST_HEAD(&fotg210->cached_itd_list); + + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + switch (FOTG210_TUNE_FLS) { + case 0: + fotg210->periodic_size = 1024; + break; + case 1: + fotg210->periodic_size = 512; + break; + case 2: + fotg210->periodic_size = 256; + break; + default: + BUG(); + } + } + retval = fotg210_mem_init(fotg210, GFP_KERNEL); + if (retval < 0) + return retval; + + /* controllers may cache some of the periodic schedule ... */ + fotg210->i_thresh = 2; + + /* + * dedicate a qh for the async ring head, since we couldn't unlink + * a 'real' qh without stopping the async schedule [4.8]. use it + * as the 'reclamation list head' too. + * its dummy is used in hw_alt_next of many tds, to prevent the qh + * from automatically advancing to the next td after short reads. + */ + fotg210->async->qh_next.qh = NULL; + hw = fotg210->async->hw; + hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma); + hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD); + hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT); + hw->hw_qtd_next = FOTG210_LIST_END(fotg210); + fotg210->async->qh_state = QH_STATE_LINKED; + hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma); + + /* clear interrupt enables, set irq latency */ + if (log2_irq_thresh < 0 || log2_irq_thresh > 6) + log2_irq_thresh = 0; + temp = 1 << (16 + log2_irq_thresh); + if (HCC_CANPARK(hcc_params)) { + /* HW default park == 3, on hardware that supports it (like + * NVidia and ALI silicon), maximizes throughput on the async + * schedule by avoiding QH fetches between transfers. + * + * With fast usb storage devices and NForce2, "park" seems to + * make problems: throughput reduction (!), data errors... + */ + if (park) { + park = min_t(unsigned, park, 3); + temp |= CMD_PARK; + temp |= park << 8; + } + fotg210_dbg(fotg210, "park %d\n", park); + } + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + temp &= ~(3 << 2); + temp |= (FOTG210_TUNE_FLS << 2); + } + fotg210->command = temp; + + /* Accept arbitrarily long scatter-gather lists */ + if (!(hcd->driver->flags & HCD_LOCAL_MEM)) + hcd->self.sg_tablesize = ~0; + return 0; +} + +/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */ +static int fotg210_run(struct usb_hcd *hcd) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 temp; + u32 hcc_params; + + hcd->uses_new_polling = 1; + + /* EHCI spec section 4.1 */ + + fotg210_writel(fotg210, fotg210->periodic_dma, + &fotg210->regs->frame_list); + fotg210_writel(fotg210, (u32)fotg210->async->qh_dma, + &fotg210->regs->async_next); + + /* + * hcc_params controls whether fotg210->regs->segment must (!!!) + * be used; it constrains QH/ITD/SITD and QTD locations. + * pci_pool consistent memory always uses segment zero. + * streaming mappings for I/O buffers, like pci_map_single(), + * can return segments above 4GB, if the device allows. + * + * NOTE: the dma mask is visible through dma_supported(), so + * drivers can pass this info along ... like NETIF_F_HIGHDMA, + * Scsi_Host.highmem_io, and so forth. It's readonly to all + * host side drivers though. + */ + hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); + + /* + * Philips, Intel, and maybe others need CMD_RUN before the + * root hub will detect new devices (why?); NEC doesn't + */ + fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); + fotg210->command |= CMD_RUN; + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); + dbg_cmd(fotg210, "init", fotg210->command); + + /* + * Start, enabling full USB 2.0 functionality ... usb 1.1 devices + * are explicitly handed to companion controller(s), so no TT is + * involved with the root hub. (Except where one is integrated, + * and there's no companion controller unless maybe for USB OTG.) + * + * Turning on the CF flag will transfer ownership of all ports + * from the companions to the EHCI controller. If any of the + * companions are in the middle of a port reset at the time, it + * could cause trouble. Write-locking ehci_cf_port_reset_rwsem + * guarantees that no resets are in progress. After we set CF, + * a short delay lets the hardware catch up; new resets shouldn't + * be started before the port switching actions could complete. + */ + down_write(&ehci_cf_port_reset_rwsem); + fotg210->rh_state = FOTG210_RH_RUNNING; + /* unblock posted writes */ + fotg210_readl(fotg210, &fotg210->regs->command); + msleep(5); + up_write(&ehci_cf_port_reset_rwsem); + fotg210->last_periodic_enable = ktime_get_real(); + + temp = HC_VERSION(fotg210, + fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); + fotg210_info(fotg210, + "USB %x.%x started, EHCI %x.%02x\n", + ((fotg210->sbrn & 0xf0)>>4), (fotg210->sbrn & 0x0f), + temp >> 8, temp & 0xff); + + fotg210_writel(fotg210, INTR_MASK, + &fotg210->regs->intr_enable); /* Turn On Interrupts */ + + /* GRR this is run-once init(), being done every time the HC starts. + * So long as they're part of class devices, we can't do it init() + * since the class device isn't created that early. + */ + create_debug_files(fotg210); + create_sysfs_files(fotg210); + + return 0; +} + +static int fotg210_setup(struct usb_hcd *hcd) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + int retval; + + fotg210->regs = (void __iomem *)fotg210->caps + + HC_LENGTH(fotg210, + fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); + dbg_hcs_params(fotg210, "reset"); + dbg_hcc_params(fotg210, "reset"); + + /* cache this readonly data; minimize chip reads */ + fotg210->hcs_params = fotg210_readl(fotg210, + &fotg210->caps->hcs_params); + + fotg210->sbrn = HCD_USB2; + + /* data structure init */ + retval = hcd_fotg210_init(hcd); + if (retval) + return retval; + + retval = fotg210_halt(fotg210); + if (retval) + return retval; + + fotg210_reset(fotg210); + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static irqreturn_t fotg210_irq(struct usb_hcd *hcd) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 status, masked_status, pcd_status = 0, cmd; + int bh; + + spin_lock(&fotg210->lock); + + status = fotg210_readl(fotg210, &fotg210->regs->status); + + /* e.g. cardbus physical eject */ + if (status == ~(u32) 0) { + fotg210_dbg(fotg210, "device removed\n"); + goto dead; + } + + /* + * We don't use STS_FLR, but some controllers don't like it to + * remain on, so mask it out along with the other status bits. + */ + masked_status = status & (INTR_MASK | STS_FLR); + + /* Shared IRQ? */ + if (!masked_status || + unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) { + spin_unlock(&fotg210->lock); + return IRQ_NONE; + } + + /* clear (just) interrupts */ + fotg210_writel(fotg210, masked_status, &fotg210->regs->status); + cmd = fotg210_readl(fotg210, &fotg210->regs->command); + bh = 0; + +#ifdef VERBOSE_DEBUG + /* unrequested/ignored: Frame List Rollover */ + dbg_status(fotg210, "irq", status); +#endif + + /* INT, ERR, and IAA interrupt rates can be throttled */ + + /* normal [4.15.1.2] or error [4.15.1.1] completion */ + if (likely((status & (STS_INT|STS_ERR)) != 0)) { + if (likely((status & STS_ERR) == 0)) + COUNT(fotg210->stats.normal); + else + COUNT(fotg210->stats.error); + bh = 1; + } + + /* complete the unlinking of some qh [4.15.2.3] */ + if (status & STS_IAA) { + + /* Turn off the IAA watchdog */ + fotg210->enabled_hrtimer_events &= + ~BIT(FOTG210_HRTIMER_IAA_WATCHDOG); + + /* + * Mild optimization: Allow another IAAD to reset the + * hrtimer, if one occurs before the next expiration. + * In theory we could always cancel the hrtimer, but + * tests show that about half the time it will be reset + * for some other event anyway. + */ + if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG) + ++fotg210->next_hrtimer_event; + + /* guard against (alleged) silicon errata */ + if (cmd & CMD_IAAD) + fotg210_dbg(fotg210, "IAA with IAAD still set?\n"); + if (fotg210->async_iaa) { + COUNT(fotg210->stats.iaa); + end_unlink_async(fotg210); + } else + fotg210_dbg(fotg210, "IAA with nothing unlinked?\n"); + } + + /* remote wakeup [4.3.1] */ + if (status & STS_PCD) { + int pstatus; + u32 __iomem *status_reg = &fotg210->regs->port_status; + + /* kick root hub later */ + pcd_status = status; + + /* resume root hub? */ + if (fotg210->rh_state == FOTG210_RH_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + + pstatus = fotg210_readl(fotg210, status_reg); + + if (test_bit(0, &fotg210->suspended_ports) && + ((pstatus & PORT_RESUME) || + !(pstatus & PORT_SUSPEND)) && + (pstatus & PORT_PE) && + fotg210->reset_done[0] == 0) { + + /* start 20 msec resume signaling from this port, + * and make khubd collect PORT_STAT_C_SUSPEND to + * stop that signaling. Use 5 ms extra for safety, + * like usb_port_resume() does. + */ + fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25); + set_bit(0, &fotg210->resuming_ports); + fotg210_dbg(fotg210, "port 1 remote wakeup\n"); + mod_timer(&hcd->rh_timer, fotg210->reset_done[0]); + } + } + + /* PCI errors [4.15.2.4] */ + if (unlikely((status & STS_FATAL) != 0)) { + fotg210_err(fotg210, "fatal error\n"); + dbg_cmd(fotg210, "fatal", cmd); + dbg_status(fotg210, "fatal", status); +dead: + usb_hc_died(hcd); + + /* Don't let the controller do anything more */ + fotg210->shutdown = true; + fotg210->rh_state = FOTG210_RH_STOPPING; + fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE); + fotg210_writel(fotg210, fotg210->command, + &fotg210->regs->command); + fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable); + fotg210_handle_controller_death(fotg210); + + /* Handle completions when the controller stops */ + bh = 0; + } + + if (bh) + fotg210_work(fotg210); + spin_unlock(&fotg210->lock); + if (pcd_status) + usb_hcd_poll_rh_status(hcd); + return IRQ_HANDLED; +} + +/*-------------------------------------------------------------------------*/ + +/* + * non-error returns are a promise to giveback() the urb later + * we drop ownership so next owner (or urb unlink) can get it + * + * urb + dev is in hcd.self.controller.urb_list + * we're queueing TDs onto software and hardware lists + * + * hcd-specific init for hcpriv hasn't been done yet + * + * NOTE: control, bulk, and interrupt share the same code to append TDs + * to a (possibly active) QH, and the same QH scanning code. + */ +static int fotg210_urb_enqueue( + struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags +) { + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct list_head qtd_list; + + INIT_LIST_HEAD(&qtd_list); + + switch (usb_pipetype(urb->pipe)) { + case PIPE_CONTROL: + /* qh_completions() code doesn't handle all the fault cases + * in multi-TD control transfers. Even 1KB is rare anyway. + */ + if (urb->transfer_buffer_length > (16 * 1024)) + return -EMSGSIZE; + /* FALLTHROUGH */ + /* case PIPE_BULK: */ + default: + if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags)) + return -ENOMEM; + return submit_async(fotg210, urb, &qtd_list, mem_flags); + + case PIPE_INTERRUPT: + if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags)) + return -ENOMEM; + return intr_submit(fotg210, urb, &qtd_list, mem_flags); + + case PIPE_ISOCHRONOUS: + return itd_submit(fotg210, urb, mem_flags); + } +} + +/* remove from hardware lists + * completions normally happen asynchronously + */ + +static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_qh *qh; + unsigned long flags; + int rc; + + spin_lock_irqsave(&fotg210->lock, flags); + rc = usb_hcd_check_unlink_urb(hcd, urb, status); + if (rc) + goto done; + + switch (usb_pipetype(urb->pipe)) { + /* case PIPE_CONTROL: */ + /* case PIPE_BULK:*/ + default: + qh = (struct fotg210_qh *) urb->hcpriv; + if (!qh) + break; + switch (qh->qh_state) { + case QH_STATE_LINKED: + case QH_STATE_COMPLETING: + start_unlink_async(fotg210, qh); + break; + case QH_STATE_UNLINK: + case QH_STATE_UNLINK_WAIT: + /* already started */ + break; + case QH_STATE_IDLE: + /* QH might be waiting for a Clear-TT-Buffer */ + qh_completions(fotg210, qh); + break; + } + break; + + case PIPE_INTERRUPT: + qh = (struct fotg210_qh *) urb->hcpriv; + if (!qh) + break; + switch (qh->qh_state) { + case QH_STATE_LINKED: + case QH_STATE_COMPLETING: + start_unlink_intr(fotg210, qh); + break; + case QH_STATE_IDLE: + qh_completions(fotg210, qh); + break; + default: + fotg210_dbg(fotg210, "bogus qh %p state %d\n", + qh, qh->qh_state); + goto done; + } + break; + + case PIPE_ISOCHRONOUS: + /* itd... */ + + /* wait till next completion, do it then. */ + /* completion irqs can wait up to 1024 msec, */ + break; + } +done: + spin_unlock_irqrestore(&fotg210->lock, flags); + return rc; +} + +/*-------------------------------------------------------------------------*/ + +/* bulk qh holds the data toggle */ + +static void +fotg210_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + unsigned long flags; + struct fotg210_qh *qh, *tmp; + + /* ASSERT: any requests/urbs are being unlinked */ + /* ASSERT: nobody can be submitting urbs for this any more */ + +rescan: + spin_lock_irqsave(&fotg210->lock, flags); + qh = ep->hcpriv; + if (!qh) + goto done; + + /* endpoints can be iso streams. for now, we don't + * accelerate iso completions ... so spin a while. + */ + if (qh->hw == NULL) { + struct fotg210_iso_stream *stream = ep->hcpriv; + + if (!list_empty(&stream->td_list)) + goto idle_timeout; + + /* BUG_ON(!list_empty(&stream->free_list)); */ + kfree(stream); + goto done; + } + + if (fotg210->rh_state < FOTG210_RH_RUNNING) + qh->qh_state = QH_STATE_IDLE; + switch (qh->qh_state) { + case QH_STATE_LINKED: + case QH_STATE_COMPLETING: + for (tmp = fotg210->async->qh_next.qh; + tmp && tmp != qh; + tmp = tmp->qh_next.qh) + continue; + /* periodic qh self-unlinks on empty, and a COMPLETING qh + * may already be unlinked. + */ + if (tmp) + start_unlink_async(fotg210, qh); + /* FALL THROUGH */ + case QH_STATE_UNLINK: /* wait for hw to finish? */ + case QH_STATE_UNLINK_WAIT: +idle_timeout: + spin_unlock_irqrestore(&fotg210->lock, flags); + schedule_timeout_uninterruptible(1); + goto rescan; + case QH_STATE_IDLE: /* fully unlinked */ + if (qh->clearing_tt) + goto idle_timeout; + if (list_empty(&qh->qtd_list)) { + qh_destroy(fotg210, qh); + break; + } + /* else FALL THROUGH */ + default: + /* caller was supposed to have unlinked any requests; + * that's not our job. just leak this memory. + */ + fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n", + qh, ep->desc.bEndpointAddress, qh->qh_state, + list_empty(&qh->qtd_list) ? "" : "(has tds)"); + break; + } + done: + ep->hcpriv = NULL; + spin_unlock_irqrestore(&fotg210->lock, flags); +} + +static void +fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_qh *qh; + int eptype = usb_endpoint_type(&ep->desc); + int epnum = usb_endpoint_num(&ep->desc); + int is_out = usb_endpoint_dir_out(&ep->desc); + unsigned long flags; + + if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) + return; + + spin_lock_irqsave(&fotg210->lock, flags); + qh = ep->hcpriv; + + /* For Bulk and Interrupt endpoints we maintain the toggle state + * in the hardware; the toggle bits in udev aren't used at all. + * When an endpoint is reset by usb_clear_halt() we must reset + * the toggle bit in the QH. + */ + if (qh) { + usb_settoggle(qh->dev, epnum, is_out, 0); + if (!list_empty(&qh->qtd_list)) { + WARN_ONCE(1, "clear_halt for a busy endpoint\n"); + } else if (qh->qh_state == QH_STATE_LINKED || + qh->qh_state == QH_STATE_COMPLETING) { + + /* The toggle value in the QH can't be updated + * while the QH is active. Unlink it now; + * re-linking will call qh_refresh(). + */ + if (eptype == USB_ENDPOINT_XFER_BULK) + start_unlink_async(fotg210, qh); + else + start_unlink_intr(fotg210, qh); + } + } + spin_unlock_irqrestore(&fotg210->lock, flags); +} + +static int fotg210_get_frame(struct usb_hcd *hcd) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + return (fotg210_read_frame_index(fotg210) >> 3) % + fotg210->periodic_size; +} + +/*-------------------------------------------------------------------------*/ + +/* + * The EHCI in ChipIdea HDRC cannot be a separate module or device, + * because its registers (and irq) are shared between host/gadget/otg + * functions and in order to facilitate role switching we cannot + * give the fotg210 driver exclusive access to those. + */ +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_LICENSE("GPL"); + +static const struct hc_driver fotg210_fotg210_hc_driver = { + .description = hcd_name, + .product_desc = "Faraday USB2.0 Host Controller", + .hcd_priv_size = sizeof(struct fotg210_hcd), + + /* + * generic hardware linkage + */ + .irq = fotg210_irq, + .flags = HCD_MEMORY | HCD_USB2, + + /* + * basic lifecycle operations + */ + .reset = hcd_fotg210_init, + .start = fotg210_run, + .stop = fotg210_stop, + .shutdown = fotg210_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = fotg210_urb_enqueue, + .urb_dequeue = fotg210_urb_dequeue, + .endpoint_disable = fotg210_endpoint_disable, + .endpoint_reset = fotg210_endpoint_reset, + + /* + * scheduling support + */ + .get_frame_number = fotg210_get_frame, + + /* + * root hub support + */ + .hub_status_data = fotg210_hub_status_data, + .hub_control = fotg210_hub_control, + .bus_suspend = fotg210_bus_suspend, + .bus_resume = fotg210_bus_resume, + + .relinquish_port = fotg210_relinquish_port, + .port_handed_over = fotg210_port_handed_over, + + .clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete, +}; + +static void fotg210_init(struct fotg210_hcd *fotg210) +{ + u32 value; + + iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY, + &fotg210->regs->gmir); + + value = ioread32(&fotg210->regs->otgcsr); + value &= ~OTGCSR_A_BUS_DROP; + value |= OTGCSR_A_BUS_REQ; + iowrite32(value, &fotg210->regs->otgcsr); +} + +/** + * fotg210_hcd_probe - initialize faraday FOTG210 HCDs + * + * Allocates basic resources for this USB host controller, and + * then invokes the start() method for the HCD associated with it + * through the hotplug entry's driver_data. + */ +static int fotg210_hcd_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct usb_hcd *hcd; + struct resource *res; + int irq; + int retval = -ENODEV; + struct fotg210_hcd *fotg210; + + if (usb_disabled()) + return -ENODEV; + + pdev->dev.power.power_state = PMSG_ON; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(dev, + "Found HC with no IRQ. Check %s setup!\n", + dev_name(dev)); + return -ENODEV; + } + + irq = res->start; + + hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev, + dev_name(dev)); + if (!hcd) { + dev_err(dev, "failed to create hcd with err %d\n", retval); + retval = -ENOMEM; + goto fail_create_hcd; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, + "Found HC with no register addr. Check %s setup!\n", + dev_name(dev)); + retval = -ENODEV; + goto fail_request_resource; + } + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + hcd->has_tt = 1; + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, + fotg210_fotg210_hc_driver.description)) { + dev_dbg(dev, "controller already in use\n"); + retval = -EBUSY; + goto fail_request_resource; + } + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(dev, + "Found HC with no register addr. Check %s setup!\n", + dev_name(dev)); + retval = -ENODEV; + goto fail_request_resource; + } + + hcd->regs = ioremap_nocache(res->start, resource_size(res)); + if (hcd->regs == NULL) { + dev_dbg(dev, "error mapping memory\n"); + retval = -EFAULT; + goto fail_ioremap; + } + + fotg210 = hcd_to_fotg210(hcd); + + fotg210->caps = hcd->regs; + + retval = fotg210_setup(hcd); + if (retval) + goto fail_add_hcd; + + fotg210_init(fotg210); + + retval = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (retval) { + dev_err(dev, "failed to add hcd with err %d\n", retval); + goto fail_add_hcd; + } + + return retval; + +fail_add_hcd: + iounmap(hcd->regs); +fail_ioremap: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +fail_request_resource: + usb_put_hcd(hcd); +fail_create_hcd: + dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval); + return retval; +} + +/** + * fotg210_hcd_remove - shutdown processing for EHCI HCDs + * @dev: USB Host Controller being removed + * + */ +static int fotg210_hcd_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct usb_hcd *hcd = dev_get_drvdata(dev); + + if (!hcd) + return 0; + + usb_remove_hcd(hcd); + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + usb_put_hcd(hcd); + + return 0; +} + +static struct platform_driver fotg210_hcd_driver = { + .driver = { + .name = "fotg210-hcd", + }, + .probe = fotg210_hcd_probe, + .remove = fotg210_hcd_remove, +}; + +static int __init fotg210_hcd_init(void) +{ + int retval = 0; + + if (usb_disabled()) + return -ENODEV; + + pr_info("%s: " DRIVER_DESC "\n", hcd_name); + set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); + if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || + test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) + pr_warn(KERN_WARNING "Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n"); + + pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n", + hcd_name, + sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd), + sizeof(struct fotg210_itd)); + +#ifdef DEBUG + fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root); + if (!fotg210_debug_root) { + retval = -ENOENT; + goto err_debug; + } +#endif + + retval = platform_driver_register(&fotg210_hcd_driver); + if (retval < 0) + goto clean; + return retval; + + platform_driver_unregister(&fotg210_hcd_driver); +clean: +#ifdef DEBUG + debugfs_remove(fotg210_debug_root); + fotg210_debug_root = NULL; +err_debug: +#endif + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); + return retval; +} +module_init(fotg210_hcd_init); + +static void __exit fotg210_hcd_cleanup(void) +{ + platform_driver_unregister(&fotg210_hcd_driver); +#ifdef DEBUG + debugfs_remove(fotg210_debug_root); +#endif + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); +} +module_exit(fotg210_hcd_cleanup); diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h new file mode 100644 index 00000000000..8920f9d3256 --- /dev/null +++ b/drivers/usb/host/fotg210.h @@ -0,0 +1,750 @@ +#ifndef __LINUX_FOTG210_H +#define __LINUX_FOTG210_H + +/* definitions used for the EHCI driver */ + +/* + * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to + * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on + * the host controller implementation. + * + * To facilitate the strongest possible byte-order checking from "sparse" + * and so on, we use __leXX unless that's not practical. + */ +#define __hc32 __le32 +#define __hc16 __le16 + +/* statistics can be kept for tuning/monitoring */ +struct fotg210_stats { + /* irq usage */ + unsigned long normal; + unsigned long error; + unsigned long iaa; + unsigned long lost_iaa; + + /* termination of urbs from core */ + unsigned long complete; + unsigned long unlink; +}; + +/* fotg210_hcd->lock guards shared data against other CPUs: + * fotg210_hcd: async, unlink, periodic (and shadow), ... + * usb_host_endpoint: hcpriv + * fotg210_qh: qh_next, qtd_list + * fotg210_qtd: qtd_list + * + * Also, hold this lock when talking to HC registers or + * when updating hw_* fields in shared qh/qtd/... structures. + */ + +#define FOTG210_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */ + +/* + * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the + * controller may be doing DMA. Lower values mean there's no DMA. + */ +enum fotg210_rh_state { + FOTG210_RH_HALTED, + FOTG210_RH_SUSPENDED, + FOTG210_RH_RUNNING, + FOTG210_RH_STOPPING +}; + +/* + * Timer events, ordered by increasing delay length. + * Always update event_delays_ns[] and event_handlers[] (defined in + * ehci-timer.c) in parallel with this list. + */ +enum fotg210_hrtimer_event { + FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */ + FOTG210_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */ + FOTG210_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */ + FOTG210_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */ + FOTG210_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */ + FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */ + FOTG210_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */ + FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ + FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */ + FOTG210_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */ + FOTG210_HRTIMER_NUM_EVENTS /* Must come last */ +}; +#define FOTG210_HRTIMER_NO_EVENT 99 + +struct fotg210_hcd { /* one per controller */ + /* timing support */ + enum fotg210_hrtimer_event next_hrtimer_event; + unsigned enabled_hrtimer_events; + ktime_t hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS]; + struct hrtimer hrtimer; + + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + + /* glue to PCI and HCD framework */ + struct fotg210_caps __iomem *caps; + struct fotg210_regs __iomem *regs; + struct fotg210_dbg_port __iomem *debug; + + __u32 hcs_params; /* cached register copy */ + spinlock_t lock; + enum fotg210_rh_state rh_state; + + /* general schedule support */ + bool scanning:1; + bool need_rescan:1; + bool intr_unlinking:1; + bool async_unlinking:1; + bool shutdown:1; + struct fotg210_qh *qh_scan_next; + + /* async schedule support */ + struct fotg210_qh *async; + struct fotg210_qh *dummy; /* For AMD quirk use */ + struct fotg210_qh *async_unlink; + struct fotg210_qh *async_unlink_last; + struct fotg210_qh *async_iaa; + unsigned async_unlink_cycle; + unsigned async_count; /* async activity count */ + + /* periodic schedule support */ +#define DEFAULT_I_TDPS 1024 /* some HCs can do less */ + unsigned periodic_size; + __hc32 *periodic; /* hw periodic table */ + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned i_thresh; /* uframes HC might cache */ + + union fotg210_shadow *pshadow; /* mirror hw periodic table */ + struct fotg210_qh *intr_unlink; + struct fotg210_qh *intr_unlink_last; + unsigned intr_unlink_cycle; + unsigned now_frame; /* frame from HC hardware */ + unsigned next_frame; /* scan periodic, start here */ + unsigned intr_count; /* intr activity count */ + unsigned isoc_count; /* isoc activity count */ + unsigned periodic_count; /* periodic activity count */ + /* max periodic time per uframe */ + unsigned uframe_periodic_max; + + + /* list of itds completed while now_frame was still active */ + struct list_head cached_itd_list; + struct fotg210_itd *last_itd_to_free; + + /* per root hub port */ + unsigned long reset_done[FOTG210_MAX_ROOT_PORTS]; + + /* bit vectors (one bit per port) */ + unsigned long bus_suspended; /* which ports were + already suspended at the start of a bus suspend */ + unsigned long companion_ports; /* which ports are + dedicated to the companion controller */ + unsigned long owned_ports; /* which ports are + owned by the companion during a bus suspend */ + unsigned long port_c_suspend; /* which ports have + the change-suspend feature turned on */ + unsigned long suspended_ports; /* which ports are + suspended */ + unsigned long resuming_ports; /* which ports have + started to resume */ + + /* per-HC memory pools (could be per-bus, but ...) */ + struct dma_pool *qh_pool; /* qh per active urb */ + struct dma_pool *qtd_pool; /* one or more per qh */ + struct dma_pool *itd_pool; /* itd per iso urb */ + + unsigned random_frame; + unsigned long next_statechange; + ktime_t last_periodic_enable; + u32 command; + + /* SILICON QUIRKS */ + unsigned need_io_watchdog:1; + unsigned fs_i_thresh:1; /* Intel iso scheduling */ + + u8 sbrn; /* packed release number */ + + /* irq statistics */ +#ifdef FOTG210_STATS + struct fotg210_stats stats; +# define COUNT(x) ((x)++) +#else +# define COUNT(x) +#endif + + /* debug files */ +#ifdef DEBUG + struct dentry *debug_dir; +#endif +}; + +/* convert between an HCD pointer and the corresponding FOTG210_HCD */ +static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd) +{ + return (struct fotg210_hcd *)(hcd->hcd_priv); +} +static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210) +{ + return container_of((void *) fotg210, struct usb_hcd, hcd_priv); +} + +/*-------------------------------------------------------------------------*/ + +/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ + +/* Section 2.2 Host Controller Capability Registers */ +struct fotg210_caps { + /* these fields are specified as 8 and 16 bit registers, + * but some hosts can't perform 8 or 16 bit PCI accesses. + * some hosts treat caplength and hciversion as parts of a 32-bit + * register, others treat them as two separate registers, this + * affects the memory map for big endian controllers. + */ + u32 hc_capbase; +#define HC_LENGTH(fotg210, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \ + (fotg210_big_endian_capbase(fotg210) ? 24 : 0))) +#define HC_VERSION(fotg210, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \ + (fotg210_big_endian_capbase(fotg210) ? 0 : 16))) + u32 hcs_params; /* HCSPARAMS - offset 0x4 */ +#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ + + u32 hcc_params; /* HCCPARAMS - offset 0x8 */ +#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */ +#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/ + u8 portroute[8]; /* nibbles for routing - offset 0xC */ +}; + + +/* Section 2.3 Host Controller Operational Registers */ +struct fotg210_regs { + + /* USBCMD: offset 0x00 */ + u32 command; + +/* EHCI 1.1 addendum */ +/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ +#define CMD_PARK (1<<11) /* enable "park" on async qh */ +#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ +#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */ +#define CMD_ASE (1<<5) /* async schedule enable */ +#define CMD_PSE (1<<4) /* periodic schedule enable */ +/* 3:2 is periodic frame list size */ +#define CMD_RESET (1<<1) /* reset HC not bus */ +#define CMD_RUN (1<<0) /* start/stop HC */ + + /* USBSTS: offset 0x04 */ + u32 status; +#define STS_ASS (1<<15) /* Async Schedule Status */ +#define STS_PSS (1<<14) /* Periodic Schedule Status */ +#define STS_RECL (1<<13) /* Reclamation */ +#define STS_HALT (1<<12) /* Not running (any reason) */ +/* some bits reserved */ + /* these STS_* flags are also intr_enable bits (USBINTR) */ +#define STS_IAA (1<<5) /* Interrupted on async advance */ +#define STS_FATAL (1<<4) /* such as some PCI access errors */ +#define STS_FLR (1<<3) /* frame list rolled over */ +#define STS_PCD (1<<2) /* port change detect */ +#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */ +#define STS_INT (1<<0) /* "normal" completion (short, ...) */ + + /* USBINTR: offset 0x08 */ + u32 intr_enable; + + /* FRINDEX: offset 0x0C */ + u32 frame_index; /* current microframe number */ + /* CTRLDSSEGMENT: offset 0x10 */ + u32 segment; /* address bits 63:32 if needed */ + /* PERIODICLISTBASE: offset 0x14 */ + u32 frame_list; /* points to periodic list */ + /* ASYNCLISTADDR: offset 0x18 */ + u32 async_next; /* address of next async queue head */ + + u32 reserved1; + /* PORTSC: offset 0x20 */ + u32 port_status; +/* 31:23 reserved */ +#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ +#define PORT_RESET (1<<8) /* reset port */ +#define PORT_SUSPEND (1<<7) /* suspend port */ +#define PORT_RESUME (1<<6) /* resume it */ +#define PORT_PEC (1<<3) /* port enable change */ +#define PORT_PE (1<<2) /* port enable */ +#define PORT_CSC (1<<1) /* connect status change */ +#define PORT_CONNECT (1<<0) /* device connected */ +#define PORT_RWC_BITS (PORT_CSC | PORT_PEC) + u32 reserved2[19]; + + /* OTGCSR: offet 0x70 */ + u32 otgcsr; +#define OTGCSR_HOST_SPD_TYP (3 << 22) +#define OTGCSR_A_BUS_DROP (1 << 5) +#define OTGCSR_A_BUS_REQ (1 << 4) + + /* OTGISR: offset 0x74 */ + u32 otgisr; +#define OTGISR_OVC (1 << 10) + + u32 reserved3[15]; + + /* GMIR: offset 0xB4 */ + u32 gmir; +#define GMIR_INT_POLARITY (1 << 3) /*Active High*/ +#define GMIR_MHC_INT (1 << 2) +#define GMIR_MOTG_INT (1 << 1) +#define GMIR_MDEV_INT (1 << 0) +}; + +/* Appendix C, Debug port ... intended for use with special "debug devices" + * that can help if there's no serial console. (nonstandard enumeration.) + */ +struct fotg210_dbg_port { + u32 control; +#define DBGP_OWNER (1<<30) +#define DBGP_ENABLED (1<<28) +#define DBGP_DONE (1<<16) +#define DBGP_INUSE (1<<10) +#define DBGP_ERRCODE(x) (((x)>>7)&0x07) +# define DBGP_ERR_BAD 1 +# define DBGP_ERR_SIGNAL 2 +#define DBGP_ERROR (1<<6) +#define DBGP_GO (1<<5) +#define DBGP_OUT (1<<4) +#define DBGP_LEN(x) (((x)>>0)&0x0f) + u32 pids; +#define DBGP_PID_GET(x) (((x)>>16)&0xff) +#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) + u32 data03; + u32 data47; + u32 address; +#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) +}; + +#ifdef CONFIG_EARLY_PRINTK_DBGP +#include <linux/init.h> +extern int __init early_dbgp_init(char *s); +extern struct console early_dbgp_console; +#endif /* CONFIG_EARLY_PRINTK_DBGP */ + +struct usb_hcd; + +static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) +{ + return 1; /* Shouldn't this be 0? */ +} + +static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) +{ + return -1; +} + +#ifdef CONFIG_EARLY_PRINTK_DBGP +/* Call backs from fotg210 host driver to fotg210 debug driver */ +extern int dbgp_external_startup(struct usb_hcd *); +extern int dbgp_reset_prep(struct usb_hcd *hcd); +#else +static inline int dbgp_reset_prep(struct usb_hcd *hcd) +{ + return xen_dbgp_reset_prep(hcd); +} +static inline int dbgp_external_startup(struct usb_hcd *hcd) +{ + return xen_dbgp_external_startup(hcd); +} +#endif + +/*-------------------------------------------------------------------------*/ + +#define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma) + +/* + * EHCI Specification 0.95 Section 3.5 + * QTD: describe data transfer components (buffer, direction, ...) + * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram". + * + * These are associated only with "QH" (Queue Head) structures, + * used with control, bulk, and interrupt transfers. + */ +struct fotg210_qtd { + /* first part defined by EHCI spec */ + __hc32 hw_next; /* see EHCI 3.5.1 */ + __hc32 hw_alt_next; /* see EHCI 3.5.2 */ + __hc32 hw_token; /* see EHCI 3.5.3 */ +#define QTD_TOGGLE (1 << 31) /* data toggle */ +#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff) +#define QTD_IOC (1 << 15) /* interrupt on complete */ +#define QTD_CERR(tok) (((tok)>>10) & 0x3) +#define QTD_PID(tok) (((tok)>>8) & 0x3) +#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */ +#define QTD_STS_HALT (1 << 6) /* halted on error */ +#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */ +#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */ +#define QTD_STS_XACT (1 << 3) /* device gave illegal response */ +#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */ +#define QTD_STS_STS (1 << 1) /* split transaction state */ +#define QTD_STS_PING (1 << 0) /* issue PING? */ + +#define ACTIVE_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_ACTIVE) +#define HALT_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_HALT) +#define STATUS_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_STS) + + __hc32 hw_buf[5]; /* see EHCI 3.5.4 */ + __hc32 hw_buf_hi[5]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t qtd_dma; /* qtd address */ + struct list_head qtd_list; /* sw qtd list */ + struct urb *urb; /* qtd's urb */ + size_t length; /* length of buffer */ +} __aligned(32); + +/* mask NakCnt+T in qh->hw_alt_next */ +#define QTD_MASK(fotg210) cpu_to_hc32(fotg210, ~0x1f) + +#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1) + +/*-------------------------------------------------------------------------*/ + +/* type tag from {qh,itd,fstn}->hw_next */ +#define Q_NEXT_TYPE(fotg210, dma) ((dma) & cpu_to_hc32(fotg210, 3 << 1)) + +/* + * Now the following defines are not converted using the + * cpu_to_le32() macro anymore, since we have to support + * "dynamic" switching between be and le support, so that the driver + * can be used on one system with SoC EHCI controller using big-endian + * descriptors as well as a normal little-endian PCI EHCI controller. + */ +/* values for that type tag */ +#define Q_TYPE_ITD (0 << 1) +#define Q_TYPE_QH (1 << 1) +#define Q_TYPE_SITD (2 << 1) +#define Q_TYPE_FSTN (3 << 1) + +/* next async queue entry, or pointer to interrupt/periodic QH */ +#define QH_NEXT(fotg210, dma) \ + (cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH)) + +/* for periodic/async schedules and qtd lists, mark end of list */ +#define FOTG210_LIST_END(fotg210) \ + cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */ + +/* + * Entries in periodic shadow table are pointers to one of four kinds + * of data structure. That's dictated by the hardware; a type tag is + * encoded in the low bits of the hardware's periodic schedule. Use + * Q_NEXT_TYPE to get the tag. + * + * For entries in the async schedule, the type tag always says "qh". + */ +union fotg210_shadow { + struct fotg210_qh *qh; /* Q_TYPE_QH */ + struct fotg210_itd *itd; /* Q_TYPE_ITD */ + struct fotg210_fstn *fstn; /* Q_TYPE_FSTN */ + __hc32 *hw_next; /* (all types) */ + void *ptr; +}; + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.6 + * QH: describes control/bulk/interrupt endpoints + * See Fig 3-7 "Queue Head Structure Layout". + * + * These appear in both the async and (for interrupt) periodic schedules. + */ + +/* first part defined by EHCI spec */ +struct fotg210_qh_hw { + __hc32 hw_next; /* see EHCI 3.6.1 */ + __hc32 hw_info1; /* see EHCI 3.6.2 */ +#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */ +#define QH_HEAD (1 << 15) /* Head of async reclamation list */ +#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */ +#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */ +#define QH_LOW_SPEED (1 << 12) +#define QH_FULL_SPEED (0 << 12) +#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */ + __hc32 hw_info2; /* see EHCI 3.6.2 */ +#define QH_SMASK 0x000000ff +#define QH_CMASK 0x0000ff00 +#define QH_HUBADDR 0x007f0000 +#define QH_HUBPORT 0x3f800000 +#define QH_MULT 0xc0000000 + __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */ + + /* qtd overlay (hardware parts of a struct fotg210_qtd) */ + __hc32 hw_qtd_next; + __hc32 hw_alt_next; + __hc32 hw_token; + __hc32 hw_buf[5]; + __hc32 hw_buf_hi[5]; +} __aligned(32); + +struct fotg210_qh { + struct fotg210_qh_hw *hw; /* Must come first */ + /* the rest is HCD-private */ + dma_addr_t qh_dma; /* address of qh */ + union fotg210_shadow qh_next; /* ptr to qh; or periodic */ + struct list_head qtd_list; /* sw qtd list */ + struct list_head intr_node; /* list of intr QHs */ + struct fotg210_qtd *dummy; + struct fotg210_qh *unlink_next; /* next on unlink list */ + + unsigned unlink_cycle; + + u8 needs_rescan; /* Dequeue during giveback */ + u8 qh_state; +#define QH_STATE_LINKED 1 /* HC sees this */ +#define QH_STATE_UNLINK 2 /* HC may still see this */ +#define QH_STATE_IDLE 3 /* HC doesn't see this */ +#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */ +#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ + + u8 xacterrs; /* XactErr retry counter */ +#define QH_XACTERR_MAX 32 /* XactErr retry limit */ + + /* periodic schedule info */ + u8 usecs; /* intr bandwidth */ + u8 gap_uf; /* uframes split/csplit gap */ + u8 c_usecs; /* ... split completion bw */ + u16 tt_usecs; /* tt downstream bandwidth */ + unsigned short period; /* polling interval */ + unsigned short start; /* where polling starts */ +#define NO_FRAME ((unsigned short)~0) /* pick new start */ + + struct usb_device *dev; /* access to TT */ + unsigned is_out:1; /* bulk or intr OUT */ + unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ +}; + +/*-------------------------------------------------------------------------*/ + +/* description of one iso transaction (up to 3 KB data if highspeed) */ +struct fotg210_iso_packet { + /* These will be copied to iTD when scheduling */ + u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */ + __hc32 transaction; /* itd->hw_transaction[i] |= */ + u8 cross; /* buf crosses pages */ + /* for full speed OUT splits */ + u32 buf1; +}; + +/* temporary schedule data for packets from iso urbs (both speeds) + * each packet is one logical usb transaction to the device (not TT), + * beginning at stream->next_uframe + */ +struct fotg210_iso_sched { + struct list_head td_list; + unsigned span; + struct fotg210_iso_packet packet[0]; +}; + +/* + * fotg210_iso_stream - groups all (s)itds for this endpoint. + * acts like a qh would, if EHCI had them for ISO. + */ +struct fotg210_iso_stream { + /* first field matches fotg210_hq, but is NULL */ + struct fotg210_qh_hw *hw; + + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; /* queued itds */ + struct list_head free_list; /* list of unused itds */ + struct usb_device *udev; + struct usb_host_endpoint *ep; + + /* output of (re)scheduling */ + int next_uframe; + __hc32 splits; + + /* the rest is derived from the endpoint descriptor, + * trusting urb->interval == f(epdesc->bInterval) and + * including the extra info for hw_bufp[0..2] + */ + u8 usecs, c_usecs; + u16 interval; + u16 tt_usecs; + u16 maxp; + u16 raw_mask; + unsigned bandwidth; + + /* This is used to initialize iTD's hw_bufp fields */ + __hc32 buf0; + __hc32 buf1; + __hc32 buf2; + + /* this is used to initialize sITD's tt info */ + __hc32 address; +}; + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.3 + * Fig 3-4 "Isochronous Transaction Descriptor (iTD)" + * + * Schedule records for high speed iso xfers + */ +struct fotg210_itd { + /* first part defined by EHCI spec */ + __hc32 hw_next; /* see EHCI 3.3.1 */ + __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */ +#define FOTG210_ISOC_ACTIVE (1<<31) /* activate transfer this slot */ +#define FOTG210_ISOC_BUF_ERR (1<<30) /* Data buffer error */ +#define FOTG210_ISOC_BABBLE (1<<29) /* babble detected */ +#define FOTG210_ISOC_XACTERR (1<<28) /* XactErr - transaction error */ +#define FOTG210_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff) +#define FOTG210_ITD_IOC (1 << 15) /* interrupt on complete */ + +#define ITD_ACTIVE(fotg210) cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE) + + __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */ + __hc32 hw_bufp_hi[7]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t itd_dma; /* for this itd */ + union fotg210_shadow itd_next; /* ptr to periodic q entry */ + + struct urb *urb; + struct fotg210_iso_stream *stream; /* endpoint's queue */ + struct list_head itd_list; /* list of stream's itds */ + + /* any/all hw_transactions here may be used by that urb */ + unsigned frame; /* where scheduled */ + unsigned pg; + unsigned index[8]; /* in urb->iso_frame_desc */ +} __aligned(32); + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.96 Section 3.7 + * Periodic Frame Span Traversal Node (FSTN) + * + * Manages split interrupt transactions (using TT) that span frame boundaries + * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN + * makes the HC jump (back) to a QH to scan for fs/ls QH completions until + * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work. + */ +struct fotg210_fstn { + __hc32 hw_next; /* any periodic q entry */ + __hc32 hw_prev; /* qh or FOTG210_LIST_END */ + + /* the rest is HCD-private */ + dma_addr_t fstn_dma; + union fotg210_shadow fstn_next; /* ptr to periodic q entry */ +} __aligned(32); + +/*-------------------------------------------------------------------------*/ + +/* Prepare the PORTSC wakeup flags during controller suspend/resume */ + +#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \ + fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup); + +#define fotg210_prepare_ports_for_controller_resume(fotg210) \ + fotg210_adjust_port_wakeup_flags(fotg210, false, false); + +/*-------------------------------------------------------------------------*/ + +/* + * Some EHCI controllers have a Transaction Translator built into the + * root hub. This is a non-standard feature. Each controller will need + * to add code to the following inline functions, and call them as + * needed (mostly in root hub code). + */ + +static inline unsigned int +fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc) +{ + return (readl(&fotg210->regs->otgcsr) + & OTGCSR_HOST_SPD_TYP) >> 22; +} + +/* Returns the speed of a device attached to a port on the root hub. */ +static inline unsigned int +fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc) +{ + switch (fotg210_get_speed(fotg210, portsc)) { + case 0: + return 0; + case 1: + return USB_PORT_STAT_LOW_SPEED; + case 2: + default: + return USB_PORT_STAT_HIGH_SPEED; + } +} + +/*-------------------------------------------------------------------------*/ + +#define fotg210_has_fsl_portno_bug(e) (0) + +/* + * While most USB host controllers implement their registers in + * little-endian format, a minority (celleb companion chip) implement + * them in big endian format. + * + * This attempts to support either format at compile time without a + * runtime penalty, or both formats with the additional overhead + * of checking a flag bit. + * + */ + +#define fotg210_big_endian_mmio(e) 0 +#define fotg210_big_endian_capbase(e) 0 + +static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210, + __u32 __iomem *regs) +{ + return readl(regs); +} + +static inline void fotg210_writel(const struct fotg210_hcd *fotg210, + const unsigned int val, __u32 __iomem *regs) +{ + writel(val, regs); +} + +/* cpu to fotg210 */ +static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x) +{ + return cpu_to_le32(x); +} + +/* fotg210 to cpu */ +static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x) +{ + return le32_to_cpu(x); +} + +static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210, + const __hc32 *x) +{ + return le32_to_cpup(x); +} + +/*-------------------------------------------------------------------------*/ + +static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210) +{ + return fotg210_readl(fotg210, &fotg210->regs->frame_index); +} + +#define fotg210_itdlen(urb, desc, t) ({ \ + usb_pipein((urb)->pipe) ? \ + (desc)->length - FOTG210_ITD_LENGTH(t) : \ + FOTG210_ITD_LENGTH(t); \ +}) +/*-------------------------------------------------------------------------*/ + +#ifndef DEBUG +#define STUB_DEBUG_FILES +#endif /* DEBUG */ + +/*-------------------------------------------------------------------------*/ + +#endif /* __LINUX_FOTG210_H */ diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index 11e0b79ff9d..9e0020d9e4c 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -258,8 +258,9 @@ static int fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev) int fsl_usb2_mpc5121_init(struct platform_device *pdev) { - struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; + struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); struct clk *clk; + int err; char clk_name[10]; int base, clk_num; @@ -272,13 +273,16 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev) return -ENODEV; snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num); - clk = clk_get(&pdev->dev, clk_name); + clk = devm_clk_get(pdev->dev.parent, clk_name); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get clk\n"); return PTR_ERR(clk); } - - clk_enable(clk); + err = clk_prepare_enable(clk); + if (err) { + dev_err(&pdev->dev, "failed to enable clk\n"); + return err; + } pdata->clk = clk; if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) { @@ -298,14 +302,12 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev) static void fsl_usb2_mpc5121_exit(struct platform_device *pdev) { - struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; + struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); pdata->regs = NULL; - if (pdata->clk) { - clk_disable(pdata->clk); - clk_put(pdata->clk); - } + if (pdata->clk) + clk_disable_unprepare(pdata->clk); } static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = { diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 483990c716a..5b86ffb88f1 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -161,6 +161,13 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd) usb_hcd->uses_new_polling = 1; set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags); usb_hcd->state = HC_STATE_RUNNING; + + /* + * prevent USB core from suspending the root hub since + * bus_suspend and bus_resume are not yet supported. + */ + pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev); + result = 0; out: mutex_unlock(&wusbhc->mutex); diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c index 03dc4d9cbec..60a5de505ca 100644 --- a/drivers/usb/host/imx21-hcd.c +++ b/drivers/usb/host/imx21-hcd.c @@ -1860,7 +1860,7 @@ static int imx21_probe(struct platform_device *pdev) imx21 = hcd_to_imx21(hcd); imx21->hcd = hcd; imx21->dev = &pdev->dev; - imx21->pdata = pdev->dev.platform_data; + imx21->pdata = dev_get_platdata(&pdev->dev); if (!imx21->pdata) imx21->pdata = &default_pdata; diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index b64e661618b..c7d0f8f231b 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -1626,7 +1626,7 @@ static int isp116x_probe(struct platform_device *pdev) isp116x->addr_reg = addr_reg; spin_lock_init(&isp116x->lock); INIT_LIST_HEAD(&isp116x->async); - isp116x->board = pdev->dev.platform_data; + isp116x->board = dev_get_platdata(&pdev->dev); if (!isp116x->board) { ERR("Platform data structure not initialized\n"); diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h index 9a2c400e609..dd34b7a3396 100644 --- a/drivers/usb/host/isp116x.h +++ b/drivers/usb/host/isp116x.h @@ -325,11 +325,7 @@ struct isp116x_ep { /*-------------------------------------------------------------------------*/ -#ifdef DEBUG -#define DBG(stuff...) printk(KERN_DEBUG "116x: " stuff) -#else -#define DBG(stuff...) do{}while(0) -#endif +#define DBG(stuff...) pr_debug("116x: " stuff) #ifdef VERBOSE # define VDBG DBG @@ -358,15 +354,8 @@ struct isp116x_ep { #define isp116x_check_platform_delay(h) 0 #endif -#if defined(DEBUG) -#define IRQ_TEST() BUG_ON(!irqs_disabled()) -#else -#define IRQ_TEST() do{}while(0) -#endif - static inline void isp116x_write_addr(struct isp116x *isp116x, unsigned reg) { - IRQ_TEST(); writew(reg & 0xff, isp116x->addr_reg); isp116x_delay(isp116x, 300); } diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index b04e8ece4d3..6f29abad681 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -37,11 +37,7 @@ * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz. */ -#ifdef CONFIG_USB_DEBUG -# define ISP1362_DEBUG -#else -# undef ISP1362_DEBUG -#endif +#undef ISP1362_DEBUG /* * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and @@ -82,6 +78,8 @@ #include <linux/io.h> #include <linux/bitmap.h> #include <linux/prefetch.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> #include <asm/irq.h> #include <asm/byteorder.h> @@ -92,7 +90,6 @@ static int dbg_level; module_param(dbg_level, int, 0644); #else module_param(dbg_level, int, 0); -#define STUB_DEBUG_FILE #endif #include "../core/usb.h" @@ -350,8 +347,6 @@ static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep struct ptd *ptd = &ep->ptd; int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length; - _BUG_ON(ep->ptd_offset < 0); - prefetch(ptd); isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); if (len) @@ -1575,12 +1570,12 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, DBG(0, "ClearHubFeature: "); switch (wValue) { case C_HUB_OVER_CURRENT: - _DBG(0, "C_HUB_OVER_CURRENT\n"); + DBG(0, "C_HUB_OVER_CURRENT\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); case C_HUB_LOCAL_POWER: - _DBG(0, "C_HUB_LOCAL_POWER\n"); + DBG(0, "C_HUB_LOCAL_POWER\n"); break; default: goto error; @@ -1591,7 +1586,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, switch (wValue) { case C_HUB_OVER_CURRENT: case C_HUB_LOCAL_POWER: - _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n"); + DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n"); break; default: goto error; @@ -1622,36 +1617,36 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, switch (wValue) { case USB_PORT_FEAT_ENABLE: - _DBG(0, "USB_PORT_FEAT_ENABLE\n"); + DBG(0, "USB_PORT_FEAT_ENABLE\n"); tmp = RH_PS_CCS; break; case USB_PORT_FEAT_C_ENABLE: - _DBG(0, "USB_PORT_FEAT_C_ENABLE\n"); + DBG(0, "USB_PORT_FEAT_C_ENABLE\n"); tmp = RH_PS_PESC; break; case USB_PORT_FEAT_SUSPEND: - _DBG(0, "USB_PORT_FEAT_SUSPEND\n"); + DBG(0, "USB_PORT_FEAT_SUSPEND\n"); tmp = RH_PS_POCI; break; case USB_PORT_FEAT_C_SUSPEND: - _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n"); + DBG(0, "USB_PORT_FEAT_C_SUSPEND\n"); tmp = RH_PS_PSSC; break; case USB_PORT_FEAT_POWER: - _DBG(0, "USB_PORT_FEAT_POWER\n"); + DBG(0, "USB_PORT_FEAT_POWER\n"); tmp = RH_PS_LSDA; break; case USB_PORT_FEAT_C_CONNECTION: - _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n"); + DBG(0, "USB_PORT_FEAT_C_CONNECTION\n"); tmp = RH_PS_CSC; break; case USB_PORT_FEAT_C_OVER_CURRENT: - _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n"); + DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n"); tmp = RH_PS_OCIC; break; case USB_PORT_FEAT_C_RESET: - _DBG(0, "USB_PORT_FEAT_C_RESET\n"); + DBG(0, "USB_PORT_FEAT_C_RESET\n"); tmp = RH_PS_PRSC; break; default: @@ -1671,7 +1666,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, wIndex--; switch (wValue) { case USB_PORT_FEAT_SUSPEND: - _DBG(0, "USB_PORT_FEAT_SUSPEND\n"); + DBG(0, "USB_PORT_FEAT_SUSPEND\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS); isp1362_hcd->rhport[wIndex] = @@ -1679,7 +1674,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, spin_unlock_irqrestore(&isp1362_hcd->lock, flags); break; case USB_PORT_FEAT_POWER: - _DBG(0, "USB_PORT_FEAT_POWER\n"); + DBG(0, "USB_PORT_FEAT_POWER\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS); isp1362_hcd->rhport[wIndex] = @@ -1687,7 +1682,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, spin_unlock_irqrestore(&isp1362_hcd->lock, flags); break; case USB_PORT_FEAT_RESET: - _DBG(0, "USB_PORT_FEAT_RESET\n"); + DBG(0, "USB_PORT_FEAT_RESET\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH); @@ -1721,7 +1716,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, default: error: /* "protocol stall" on error */ - _DBG(0, "PROTOCOL STALL\n"); + DBG(0, "PROTOCOL STALL\n"); retval = -EPIPE; } @@ -1913,20 +1908,6 @@ static int isp1362_bus_resume(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ -#ifdef STUB_DEBUG_FILE - -static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd) -{ -} -static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd) -{ -} - -#else - -#include <linux/proc_fs.h> -#include <linux/seq_file.h> - static void dump_irq(struct seq_file *s, char *label, u16 mask) { seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask, @@ -2069,7 +2050,7 @@ static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd) isp1362_read_reg16(isp1362_hcd, HCATLDTCTO)); } -static int proc_isp1362_show(struct seq_file *s, void *unused) +static int isp1362_show(struct seq_file *s, void *unused) { struct isp1362_hcd *isp1362_hcd = s->private; struct isp1362_ep *ep; @@ -2173,41 +2154,31 @@ static int proc_isp1362_show(struct seq_file *s, void *unused) return 0; } -static int proc_isp1362_open(struct inode *inode, struct file *file) +static int isp1362_open(struct inode *inode, struct file *file) { - return single_open(file, proc_isp1362_show, PDE_DATA(inode)); + return single_open(file, isp1362_show, inode); } -static const struct file_operations proc_ops = { - .open = proc_isp1362_open, +static const struct file_operations debug_ops = { + .open = isp1362_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* expect just one isp1362_hcd per system */ -static const char proc_filename[] = "driver/isp1362"; - static void create_debug_file(struct isp1362_hcd *isp1362_hcd) { - struct proc_dir_entry *pde; - - pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, isp1362_hcd); - if (pde == NULL) { - pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename); - return; - } - isp1362_hcd->pde = pde; + isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO, + usb_debug_root, + isp1362_hcd, &debug_ops); } static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) { - if (isp1362_hcd->pde) - remove_proc_entry(proc_filename, NULL); + debugfs_remove(isp1362_hcd->debug_file); } -#endif - /*-------------------------------------------------------------------------*/ static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) @@ -2754,7 +2725,7 @@ static int isp1362_probe(struct platform_device *pdev) INIT_LIST_HEAD(&isp1362_hcd->periodic); INIT_LIST_HEAD(&isp1362_hcd->isoc); INIT_LIST_HEAD(&isp1362_hcd->remove_list); - isp1362_hcd->board = pdev->dev.platform_data; + isp1362_hcd->board = dev_get_platdata(&pdev->dev); #if USE_PLATFORM_DELAY if (!isp1362_hcd->board->delay) { dev_err(hcd->self.controller, "No platform delay function given\n"); diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h index 0f97820e65b..3b0b4847c3a 100644 --- a/drivers/usb/host/isp1362.h +++ b/drivers/usb/host/isp1362.h @@ -76,14 +76,14 @@ static inline void delayed_insw(unsigned int addr, void *buf, int len) #define ISP1362_REG_WRITE_OFFSET 0x80 -#ifdef ISP1362_DEBUG -typedef const unsigned int isp1362_reg_t; - #define REG_WIDTH_16 0x000 #define REG_WIDTH_32 0x100 #define REG_WIDTH_MASK 0x100 #define REG_NO_MASK 0x0ff +#ifdef ISP1362_DEBUG +typedef const unsigned int isp1362_reg_t; + #define REG_ACCESS_R 0x200 #define REG_ACCESS_W 0x400 #define REG_ACCESS_RW 0x600 @@ -91,9 +91,6 @@ typedef const unsigned int isp1362_reg_t; #define ISP1362_REG_NO(r) ((r) & REG_NO_MASK) -#define _BUG_ON(x) BUG_ON(x) -#define _WARN_ON(x) WARN_ON(x) - #define ISP1362_REG(name, addr, width, rw) \ static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw)) @@ -102,8 +99,6 @@ static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw)) #else typedef const unsigned char isp1362_reg_t; #define ISP1362_REG_NO(r) (r) -#define _BUG_ON(x) do {} while (0) -#define _WARN_ON(x) do {} while (0) #define ISP1362_REG(name, addr, width, rw) \ static isp1362_reg_t ISP1362_REG_##name = addr @@ -485,7 +480,7 @@ struct isp1362_hcd { struct isp1362_platform_data *board; - struct proc_dir_entry *pde; + struct dentry *debug_file; unsigned long stat1, stat2, stat4, stat8, stat16; /* HC registers */ @@ -587,21 +582,11 @@ static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd * ISP1362 HW Interface */ -#ifdef ISP1362_DEBUG #define DBG(level, fmt...) \ do { \ if (dbg_level > level) \ pr_debug(fmt); \ } while (0) -#define _DBG(level, fmt...) \ - do { \ - if (dbg_level > level) \ - printk(fmt); \ - } while (0) -#else -#define DBG(fmt...) do {} while (0) -#define _DBG DBG -#endif #ifdef VERBOSE # define VDBG(fmt...) DBG(3, fmt) @@ -645,9 +630,7 @@ static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd */ static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg) { - /*_BUG_ON((reg & ISP1362_REG_WRITE_OFFSET) && !(reg & REG_ACCESS_W));*/ REG_ACCESS_TEST(reg); - _BUG_ON(!irqs_disabled()); DUMMY_DELAY_ACCESS; writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg); DUMMY_DELAY_ACCESS; @@ -656,7 +639,6 @@ static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t re static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val) { - _BUG_ON(!irqs_disabled()); DUMMY_DELAY_ACCESS; writew(val, isp1362_hcd->data_reg); } @@ -665,7 +647,6 @@ static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd) { u16 val; - _BUG_ON(!irqs_disabled()); DUMMY_DELAY_ACCESS; val = readw(isp1362_hcd->data_reg); @@ -674,7 +655,6 @@ static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd) static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val) { - _BUG_ON(!irqs_disabled()); #if USE_32BIT DUMMY_DELAY_ACCESS; writel(val, isp1362_hcd->data_reg); @@ -690,7 +670,6 @@ static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd) { u32 val; - _BUG_ON(!irqs_disabled()); #if USE_32BIT DUMMY_DELAY_ACCESS; val = readl(isp1362_hcd->data_reg); @@ -713,8 +692,6 @@ static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 le if (!len) return; - _BUG_ON(!irqs_disabled()); - RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf); #if USE_32BIT if (len >= 4) { @@ -760,8 +737,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l return; } - _BUG_ON(!irqs_disabled()); - RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf); #if USE_32BIT if (len >= 4) { @@ -854,7 +829,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l isp1362_write_reg32(d, r, __v & ~m); \ } -#ifdef ISP1362_DEBUG #define isp1362_show_reg(d, r) { \ if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \ DBG(0, "%-12s[%02x]: %08x\n", #r, \ @@ -863,9 +837,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l DBG(0, "%-12s[%02x]: %04x\n", #r, \ ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \ } -#else -#define isp1362_show_reg(d, r) do {} while (0) -#endif static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd) { @@ -923,10 +894,6 @@ static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *is static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len) { - _BUG_ON(offset & 1); - _BUG_ON(offset >= ISP1362_BUF_SIZE); - _BUG_ON(len > ISP1362_BUF_SIZE); - _BUG_ON(offset + len > ISP1362_BUF_SIZE); len = (len + 1) & ~1; isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE); @@ -936,42 +903,32 @@ static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len) { - _BUG_ON(offset & 1); - isp1362_write_diraddr(isp1362_hcd, offset, len); DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %p\n", __func__, len, offset, buf); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); - _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA); isp1362_read_fifo(isp1362_hcd, buf, len); - _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); - _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); } static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len) { - _BUG_ON(offset & 1); - isp1362_write_diraddr(isp1362_hcd, offset, len); DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %p\n", __func__, len, offset, buf); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); - _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET); isp1362_write_fifo(isp1362_hcd, buf, len); - _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); - _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); } static void __attribute__((unused)) dump_data(char *buf, int len) @@ -1002,7 +959,7 @@ static void __attribute__((unused)) dump_data(char *buf, int len) } } -#if defined(ISP1362_DEBUG) && defined(PTD_TRACE) +#if defined(PTD_TRACE) static void dump_ptd(struct ptd *ptd) { diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index 3df49b169b5..df931e9ba5b 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c @@ -351,7 +351,7 @@ static int isp1760_plat_probe(struct platform_device *pdev) struct resource *mem_res; struct resource *irq_res; resource_size_t mem_size; - struct isp1760_platform_data *priv = pdev->dev.platform_data; + struct isp1760_platform_data *priv = dev_get_platdata(&pdev->dev); unsigned int devflags = 0; unsigned long irqflags = IRQF_SHARED; diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 9677f683120..caa3764a340 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -31,8 +31,8 @@ #define at91_for_each_port(index) \ for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++) -/* interface and function clocks; sometimes also an AHB clock */ -static struct clk *iclk, *fclk, *hclk; +/* interface, function and usb clocks; sometimes also an AHB clock */ +static struct clk *iclk, *fclk, *uclk, *hclk; static int clocked; extern int usb_disabled(void); @@ -41,6 +41,10 @@ extern int usb_disabled(void); static void at91_start_clock(void) { + if (IS_ENABLED(CONFIG_COMMON_CLK)) { + clk_set_rate(uclk, 48000000); + clk_prepare_enable(uclk); + } clk_prepare_enable(hclk); clk_prepare_enable(iclk); clk_prepare_enable(fclk); @@ -52,6 +56,8 @@ static void at91_stop_clock(void) clk_disable_unprepare(fclk); clk_disable_unprepare(iclk); clk_disable_unprepare(hclk); + if (IS_ENABLED(CONFIG_COMMON_CLK)) + clk_disable_unprepare(uclk); clocked = 0; } @@ -162,6 +168,14 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, retval = PTR_ERR(hclk); goto err5; } + if (IS_ENABLED(CONFIG_COMMON_CLK)) { + uclk = clk_get(&pdev->dev, "usb_clk"); + if (IS_ERR(uclk)) { + dev_err(&pdev->dev, "failed to get uclk\n"); + retval = PTR_ERR(uclk); + goto err6; + } + } at91_start_hc(pdev); ohci_hcd_init(hcd_to_ohci(hcd)); @@ -173,6 +187,9 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, /* Error handling */ at91_stop_hc(pdev); + if (IS_ENABLED(CONFIG_COMMON_CLK)) + clk_put(uclk); + err6: clk_put(hclk); err5: clk_put(fclk); @@ -212,12 +229,12 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd, release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); + if (IS_ENABLED(CONFIG_COMMON_CLK)) + clk_put(uclk); clk_put(hclk); clk_put(fclk); clk_put(iclk); fclk = iclk = hclk = NULL; - - dev_set_drvdata(&pdev->dev, NULL); } /*-------------------------------------------------------------------------*/ @@ -225,7 +242,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd, static int ohci_at91_reset (struct usb_hcd *hcd) { - struct at91_usbh_data *board = hcd->self.controller->platform_data; + struct at91_usbh_data *board = dev_get_platdata(hcd->self.controller); struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; @@ -280,7 +297,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) */ static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf) { - struct at91_usbh_data *pdata = hcd->self.controller->platform_data; + struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller); int length = ohci_hub_status_data(hcd, buf); int port; @@ -301,7 +318,7 @@ static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf) static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { - struct at91_usbh_data *pdata = hcd->self.controller->platform_data; + struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller); struct usb_hub_descriptor *desc; int ret = -EINVAL; u32 *data = (u32 *)buf; @@ -461,7 +478,7 @@ static const struct hc_driver ohci_at91_hc_driver = { static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data) { struct platform_device *pdev = data; - struct at91_usbh_data *pdata = pdev->dev.platform_data; + struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev); int val, gpio, port; /* From the GPIO notifying the over-current situation, find @@ -567,7 +584,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) if (ret) return ret; - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (pdata) { at91_for_each_port(i) { @@ -643,7 +660,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) { - struct at91_usbh_data *pdata = pdev->dev.platform_data; + struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev); int i; if (pdata) { diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 6aaa9c9c8eb..9be59f11e05 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -85,7 +85,7 @@ static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub, static int ohci_da8xx_init(struct usb_hcd *hcd) { struct device *dev = hcd->self.controller; - struct da8xx_ohci_root_hub *hub = dev->platform_data; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); int result; u32 rh_a; @@ -171,7 +171,7 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct device *dev = hcd->self.controller; - struct da8xx_ohci_root_hub *hub = dev->platform_data; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); int temp; switch (typeReq) { @@ -292,7 +292,7 @@ static const struct hc_driver ohci_da8xx_hc_driver = { static int usb_hcd_da8xx_probe(const struct hc_driver *driver, struct platform_device *pdev) { - struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev); struct usb_hcd *hcd; struct resource *mem; int error, irq; @@ -380,7 +380,7 @@ err0: static inline void usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev) { - struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev); hub->ocic_notify(NULL); usb_remove_hcd(hcd); diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c index 8704e9fa5a8..84a20d5223b 100644 --- a/drivers/usb/host/ohci-ep93xx.c +++ b/drivers/usb/host/ohci-ep93xx.c @@ -30,83 +30,6 @@ static struct clk *usb_host_clock; -static void ep93xx_start_hc(struct device *dev) -{ - clk_enable(usb_host_clock); -} - -static void ep93xx_stop_hc(struct device *dev) -{ - clk_disable(usb_host_clock); -} - -static int usb_hcd_ep93xx_probe(const struct hc_driver *driver, - struct platform_device *pdev) -{ - int retval; - struct usb_hcd *hcd; - - if (pdev->resource[1].flags != IORESOURCE_IRQ) { - dev_dbg(&pdev->dev, "resource[1] is not IORESOURCE_IRQ\n"); - return -ENOMEM; - } - - hcd = usb_create_hcd(driver, &pdev->dev, "ep93xx"); - if (hcd == NULL) - return -ENOMEM; - - hcd->rsrc_start = pdev->resource[0].start; - hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; - if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - usb_put_hcd(hcd); - retval = -EBUSY; - goto err1; - } - - hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); - if (hcd->regs == NULL) { - dev_dbg(&pdev->dev, "ioremap failed\n"); - retval = -ENOMEM; - goto err2; - } - - usb_host_clock = clk_get(&pdev->dev, NULL); - if (IS_ERR(usb_host_clock)) { - dev_dbg(&pdev->dev, "clk_get failed\n"); - retval = PTR_ERR(usb_host_clock); - goto err3; - } - - ep93xx_start_hc(&pdev->dev); - - ohci_hcd_init(hcd_to_ohci(hcd)); - - retval = usb_add_hcd(hcd, pdev->resource[1].start, 0); - if (retval == 0) - return retval; - - ep93xx_stop_hc(&pdev->dev); -err3: - iounmap(hcd->regs); -err2: - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); -err1: - usb_put_hcd(hcd); - - return retval; -} - -static void usb_hcd_ep93xx_remove(struct usb_hcd *hcd, - struct platform_device *pdev) -{ - usb_remove_hcd(hcd); - ep93xx_stop_hc(&pdev->dev); - clk_put(usb_host_clock); - iounmap(hcd->regs); - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - usb_put_hcd(hcd); -} - static int ohci_ep93xx_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); @@ -147,15 +70,57 @@ static struct hc_driver ohci_ep93xx_hc_driver = { .start_port_reset = ohci_start_port_reset, }; -extern int usb_disabled(void); - static int ohci_hcd_ep93xx_drv_probe(struct platform_device *pdev) { + struct usb_hcd *hcd; + struct resource *res; + int irq; int ret; - ret = -ENODEV; - if (!usb_disabled()) - ret = usb_hcd_ep93xx_probe(&ohci_ep93xx_hc_driver, pdev); + if (usb_disabled()) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + hcd = usb_create_hcd(&ohci_ep93xx_hc_driver, &pdev->dev, "ep93xx"); + if (!hcd) + return -ENOMEM; + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + hcd->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(hcd->regs)) { + ret = PTR_ERR(hcd->regs); + goto err_put_hcd; + } + + usb_host_clock = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(usb_host_clock)) { + ret = PTR_ERR(usb_host_clock); + goto err_put_hcd; + } + + clk_enable(usb_host_clock); + + ohci_hcd_init(hcd_to_ohci(hcd)); + + ret = usb_add_hcd(hcd, irq, 0); + if (ret) + goto err_clk_disable; + + return 0; + +err_clk_disable: + clk_disable(usb_host_clock); +err_put_hcd: + usb_put_hcd(hcd); return ret; } @@ -164,7 +129,9 @@ static int ohci_hcd_ep93xx_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); - usb_hcd_ep93xx_remove(hcd, pdev); + usb_remove_hcd(hcd); + clk_disable(usb_host_clock); + usb_put_hcd(hcd); return 0; } @@ -179,7 +146,7 @@ static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_ msleep(5); ohci->next_statechange = jiffies; - ep93xx_stop_hc(&pdev->dev); + clk_disable(usb_host_clock); return 0; } @@ -192,7 +159,7 @@ static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev) msleep(5); ohci->next_statechange = jiffies; - ep93xx_start_hc(&pdev->dev); + clk_enable(usb_host_clock); ohci_resume(hcd, false); return 0; diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index b0b542c14e3..dc6ee9adacf 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -100,7 +100,7 @@ static const struct hc_driver exynos_ohci_hc_driver = { static int exynos_ohci_probe(struct platform_device *pdev) { - struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data; + struct exynos4_ohci_platdata *pdata = dev_get_platdata(&pdev->dev); struct exynos_ohci_hcd *exynos_ohci; struct usb_hcd *hcd; struct ohci_hcd *ohci; diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index a9d3437da22..8f6b695af6a 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -938,8 +938,8 @@ static void ohci_stop (struct usb_hcd *hcd) if (quirk_nec(ohci)) flush_work(&ohci->nec_work); - ohci_usb_reset (ohci); ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); + ohci_usb_reset(ohci); free_irq(hcd->irq, hcd); hcd->irq = 0; diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 8747fa6a51b..31d3a12eb48 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -191,7 +191,7 @@ static void start_hnp(struct ohci_hcd *ohci) static int ohci_omap_init(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); - struct omap_usb_config *config = hcd->self.controller->platform_data; + struct omap_usb_config *config = dev_get_platdata(hcd->self.controller); int need_transceiver = (config->otg != 0); int ret; @@ -427,7 +427,7 @@ ohci_omap_start (struct usb_hcd *hcd) if (!host_enabled) return 0; - config = hcd->self.controller->platform_data; + config = dev_get_platdata(hcd->self.controller); if (config->otg || config->rwc) { ohci->hc_control = OHCI_CTRL_RWC; writel(OHCI_CTRL_RWC, &ohci->regs->control); diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c index 8f713571a0b..a09af26f69e 100644 --- a/drivers/usb/host/ohci-omap3.c +++ b/drivers/usb/host/ohci-omap3.c @@ -231,14 +231,6 @@ static int ohci_hcd_omap3_remove(struct platform_device *pdev) return 0; } -static void ohci_hcd_omap3_shutdown(struct platform_device *pdev) -{ - struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - static const struct of_device_id omap_ohci_dt_ids[] = { { .compatible = "ti,ohci-omap3" }, { } @@ -249,7 +241,7 @@ MODULE_DEVICE_TABLE(of, omap_ohci_dt_ids); static struct platform_driver ohci_hcd_omap3_driver = { .probe = ohci_hcd_omap3_probe, .remove = ohci_hcd_omap3_remove, - .shutdown = ohci_hcd_omap3_shutdown, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ohci-omap3", .of_match_table = omap_ohci_dt_ids, diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 08613e24189..ec337c2bd5e 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -289,7 +289,7 @@ static struct pci_driver ohci_pci_driver = { .remove = usb_hcd_pci_remove, .shutdown = usb_hcd_pci_shutdown, -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM .driver = { .pm = &usb_hcd_pci_pm_ops }, @@ -304,6 +304,13 @@ static int __init ohci_pci_init(void) pr_info("%s: " DRIVER_DESC "\n", hcd_name); ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); + +#ifdef CONFIG_PM + /* Entries for the PCI suspend/resume callbacks are special */ + ohci_pci_hc_driver.pci_suspend = ohci_suspend; + ohci_pci_hc_driver.pci_resume = ohci_resume; +#endif + return pci_register_driver(&ohci_pci_driver); } module_init(ohci_pci_init); diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c index bc30475c3a2..a4c6410f0ed 100644 --- a/drivers/usb/host/ohci-platform.c +++ b/drivers/usb/host/ohci-platform.c @@ -33,7 +33,7 @@ static const char hcd_name[] = "ohci-platform"; static int ohci_platform_reset(struct usb_hcd *hcd) { struct platform_device *pdev = to_platform_device(hcd->self.controller); - struct usb_ohci_pdata *pdata = pdev->dev.platform_data; + struct usb_ohci_pdata *pdata = dev_get_platdata(&pdev->dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); if (pdata->big_endian_desc) @@ -59,7 +59,7 @@ static int ohci_platform_probe(struct platform_device *dev) { struct usb_hcd *hcd; struct resource *res_mem; - struct usb_ohci_pdata *pdata = dev->dev.platform_data; + struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev); int irq; int err = -ENOMEM; @@ -124,7 +124,7 @@ err_power: static int ohci_platform_remove(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); - struct usb_ohci_pdata *pdata = dev->dev.platform_data; + struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev); usb_remove_hcd(hcd); usb_put_hcd(hcd); @@ -139,7 +139,7 @@ static int ohci_platform_remove(struct platform_device *dev) static int ohci_platform_suspend(struct device *dev) { - struct usb_ohci_pdata *pdata = dev->platform_data; + struct usb_ohci_pdata *pdata = dev_get_platdata(dev); struct platform_device *pdev = container_of(dev, struct platform_device, dev); @@ -152,7 +152,7 @@ static int ohci_platform_suspend(struct device *dev) static int ohci_platform_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct usb_ohci_pdata *pdata = dev->platform_data; + struct usb_ohci_pdata *pdata = dev_get_platdata(dev); struct platform_device *pdev = container_of(dev, struct platform_device, dev); diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 8294e2fcc2f..75f5a1e2f01 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -200,15 +200,6 @@ static int ohci_hcd_ppc_of_remove(struct platform_device *op) return 0; } -static void ohci_hcd_ppc_of_shutdown(struct platform_device *op) -{ - struct usb_hcd *hcd = platform_get_drvdata(op); - - if (hcd->driver->shutdown) - hcd->driver->shutdown(hcd); -} - - static const struct of_device_id ohci_hcd_ppc_of_match[] = { #ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE { @@ -243,7 +234,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match); static struct platform_driver ohci_hcd_ppc_of_driver = { .probe = ohci_hcd_ppc_of_probe, .remove = ohci_hcd_ppc_of_remove, - .shutdown = ohci_hcd_ppc_of_shutdown, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ppc-of-ohci", .owner = THIS_MODULE, diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index 3a9c01d8b79..93371a235e8 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -219,7 +219,7 @@ static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev) struct pxaohci_platform_data *inf; uint32_t uhchr; - inf = dev->platform_data; + inf = dev_get_platdata(dev); clk_prepare_enable(ohci->clk); @@ -256,7 +256,7 @@ static void pxa27x_stop_hc(struct pxa27x_ohci *ohci, struct device *dev) struct pxaohci_platform_data *inf; uint32_t uhccoms; - inf = dev->platform_data; + inf = dev_get_platdata(dev); if (cpu_is_pxa3xx()) pxa3xx_u2d_stop_hc(&ohci_to_hcd(&ohci->ohci)->self); @@ -364,7 +364,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device if (retval) return retval; - inf = pdev->dev.platform_data; + inf = dev_get_platdata(&pdev->dev); if (!inf) return -ENODEV; @@ -577,7 +577,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); - struct pxaohci_platform_data *inf = dev->platform_data; + struct pxaohci_platform_data *inf = dev_get_platdata(dev); int status; if (time_before(jiffies, ohci->ohci.next_statechange)) diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c index e125770b893..4919afa4125 100644 --- a/drivers/usb/host/ohci-s3c2410.c +++ b/drivers/usb/host/ohci-s3c2410.c @@ -38,12 +38,12 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc); static struct s3c2410_hcd_info *to_s3c2410_info(struct usb_hcd *hcd) { - return hcd->self.controller->platform_data; + return dev_get_platdata(hcd->self.controller); } static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd) { - struct s3c2410_hcd_info *info = dev->dev.platform_data; + struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev); dev_dbg(&dev->dev, "s3c2410_start_hc:\n"); @@ -63,7 +63,7 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd) static void s3c2410_stop_hc(struct platform_device *dev) { - struct s3c2410_hcd_info *info = dev->dev.platform_data; + struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev); dev_dbg(&dev->dev, "s3c2410_stop_hc:\n"); @@ -339,10 +339,11 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver, struct platform_device *dev) { struct usb_hcd *hcd = NULL; + struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev); int retval; - s3c2410_usb_set_power(dev->dev.platform_data, 1, 1); - s3c2410_usb_set_power(dev->dev.platform_data, 2, 1); + s3c2410_usb_set_power(info, 1, 1); + s3c2410_usb_set_power(info, 2, 1); hcd = usb_create_hcd(driver, &dev->dev, "s3c24xx"); if (hcd == NULL) diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c index 197d514fe0d..22540ab71f5 100644 --- a/drivers/usb/host/ohci-tilegx.c +++ b/drivers/usb/host/ohci-tilegx.c @@ -95,7 +95,7 @@ static const struct hc_driver ohci_tilegx_hc_driver = { static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; - struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data; + struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); pte_t pte = { 0 }; int my_cpu = smp_processor_id(); int ret; @@ -175,7 +175,7 @@ err_hcd: static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); - struct tilegx_usb_platform_data* pdata = pdev->dev.platform_data; + struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); usb_remove_hcd(hcd); usb_put_hcd(hcd); diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index b9848e4d3d4..2c76ef1320e 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -735,32 +735,6 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, return -ETIMEDOUT; } -#define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31 -#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31 - -bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev) -{ - return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && - pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI; -} - -/* The Intel Lynx Point chipset also has switchable ports. */ -bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev) -{ - return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && - pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI); -} - -bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) -{ - return usb_is_intel_ppt_switchable_xhci(pdev) || - usb_is_intel_lpt_switchable_xhci(pdev); -} -EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci); - /* * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that * share some number of ports. These ports can be switched between either @@ -779,9 +753,23 @@ EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci); * terminations before switching the USB 2.0 wires over, so that USB 3.0 * devices connect at SuperSpeed, rather than at USB 2.0 speeds. */ -void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) +void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) { u32 ports_available; + bool ehci_found = false; + struct pci_dev *companion = NULL; + + /* make sure an intel EHCI controller exists */ + for_each_pci_dev(companion) { + if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && + companion->vendor == PCI_VENDOR_ID_INTEL) { + ehci_found = true; + break; + } + } + + if (!ehci_found) + return; /* Don't switchover the ports if the user hasn't compiled the xHCI * driver. Otherwise they will see "dead" USB ports that don't power @@ -840,7 +828,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over " "to xHCI: 0x%x\n", ports_available); } -EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); +EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports); void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) { @@ -921,8 +909,8 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); hc_init: - if (usb_is_intel_switchable_xhci(pdev)) - usb_enable_xhci_ports(pdev); + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(pdev); op_reg_base = base + XHCI_HC_LENGTH(readl(base)); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 4b8a2092432..ed6700d00fe 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -8,11 +8,11 @@ int usb_amd_find_chipset_info(void); void usb_amd_dev_put(void); void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); -bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); -void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); void sb800_prefetch(struct device *dev, int on); #else +struct pci_dev; static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} static inline void usb_amd_dev_put(void) {} diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index a6fd8f5371d..2ad004ae747 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -2393,7 +2393,7 @@ static const struct dev_pm_ops r8a66597_dev_pm_ops = { static int r8a66597_remove(struct platform_device *pdev) { - struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev); + struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); del_timer_sync(&r8a66597->rh_timer); @@ -2466,8 +2466,8 @@ static int r8a66597_probe(struct platform_device *pdev) } r8a66597 = hcd_to_r8a66597(hcd); memset(r8a66597, 0, sizeof(struct r8a66597)); - dev_set_drvdata(&pdev->dev, r8a66597); - r8a66597->pdata = pdev->dev.platform_data; + platform_set_drvdata(pdev, r8a66597); + r8a66597->pdata = dev_get_platdata(&pdev->dev); r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; if (r8a66597->pdata->on_chip) { diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index b2ec7fe758d..5477bf5df21 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -48,6 +48,8 @@ #include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <linux/prefetch.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> #include <asm/io.h> #include <asm/irq.h> @@ -63,11 +65,6 @@ MODULE_ALIAS("platform:sl811-hcd"); #define DRIVER_VERSION "19 May 2005" - -#ifndef DEBUG -# define STUB_DEBUG_FILE -#endif - /* for now, use only one transfer register bank */ #undef USE_B @@ -100,7 +97,8 @@ static void port_power(struct sl811 *sl811, int is_on) if (sl811->board && sl811->board->port_power) { /* switch VBUS, at 500mA unless hub power budget gets set */ - DBG("power %s\n", is_on ? "on" : "off"); + dev_dbg(hcd->self.controller, "power %s\n", + is_on ? "on" : "off"); sl811->board->port_power(hcd->self.controller, is_on); } @@ -282,7 +280,7 @@ static inline void sofirq_on(struct sl811 *sl811) { if (sl811->irq_enable & SL11H_INTMASK_SOFINTR) return; - VDBG("sof irq on\n"); + dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq on\n"); sl811->irq_enable |= SL11H_INTMASK_SOFINTR; } @@ -290,7 +288,7 @@ static inline void sofirq_off(struct sl811 *sl811) { if (!(sl811->irq_enable & SL11H_INTMASK_SOFINTR)) return; - VDBG("sof irq off\n"); + dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq off\n"); sl811->irq_enable &= ~SL11H_INTMASK_SOFINTR; } @@ -338,7 +336,8 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank) } if (unlikely(list_empty(&ep->hep->urb_list))) { - DBG("empty %p queue?\n", ep); + dev_dbg(sl811_to_hcd(sl811)->self.controller, + "empty %p queue?\n", ep); return NULL; } @@ -391,7 +390,8 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank) status_packet(sl811, ep, urb, bank, control); break; default: - DBG("bad ep%p pid %02x\n", ep, ep->nextpid); + dev_dbg(sl811_to_hcd(sl811)->self.controller, + "bad ep%p pid %02x\n", ep, ep->nextpid); ep = NULL; } return ep; @@ -447,7 +447,8 @@ static void finish_request( } /* periodic deschedule */ - DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); + dev_dbg(sl811_to_hcd(sl811)->self.controller, + "deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { struct sl811h_ep *temp; struct sl811h_ep **prev = &sl811->periodic[i]; @@ -593,7 +594,8 @@ static inline u8 checkdone(struct sl811 *sl811) ctl = sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG)); if (ctl & SL11H_HCTLMASK_ARM) sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0); - DBG("%s DONE_A: ctrl %02x sts %02x\n", + dev_dbg(sl811_to_hcd(sl811)->self.controller, + "%s DONE_A: ctrl %02x sts %02x\n", (ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost", ctl, sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG))); @@ -604,7 +606,8 @@ static inline u8 checkdone(struct sl811 *sl811) ctl = sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG)); if (ctl & SL11H_HCTLMASK_ARM) sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0); - DBG("%s DONE_B: ctrl %02x sts %02x\n", + dev_dbg(sl811_to_hcd(sl811)->self.controller, + "%s DONE_B: ctrl %02x sts %02x\n", (ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost", ctl, sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG))); @@ -665,7 +668,7 @@ retry: * this one has nothing scheduled. */ if (sl811->next_periodic) { - // ERR("overrun to slot %d\n", index); + // dev_err(hcd->self.controller, "overrun to slot %d\n", index); sl811->stat_overrun++; } if (sl811->periodic[index]) @@ -723,7 +726,7 @@ retry: } else if (irqstat & SL11H_INTMASK_RD) { if (sl811->port1 & USB_PORT_STAT_SUSPEND) { - DBG("wakeup\n"); + dev_dbg(hcd->self.controller, "wakeup\n"); sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16; sl811->stat_wake++; } else @@ -852,8 +855,9 @@ static int sl811h_urb_enqueue( if (ep->maxpacket > H_MAXPACKET) { /* iso packets up to 240 bytes could work... */ - DBG("dev %d ep%d maxpacket %d\n", - udev->devnum, epnum, ep->maxpacket); + dev_dbg(hcd->self.controller, + "dev %d ep%d maxpacket %d\n", udev->devnum, + epnum, ep->maxpacket); retval = -EINVAL; kfree(ep); goto fail; @@ -917,7 +921,8 @@ static int sl811h_urb_enqueue( * to share the faster parts of the tree without needing * dummy/placeholder nodes */ - DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); + dev_dbg(hcd->self.controller, "schedule qh%d/%p branch %d\n", + ep->period, ep, ep->branch); for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { struct sl811h_ep **prev = &sl811->periodic[i]; struct sl811h_ep *here = *prev; @@ -976,7 +981,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } else if (sl811->active_a == ep) { if (time_before_eq(sl811->jiffies_a, jiffies)) { /* happens a lot with lowspeed?? */ - DBG("giveup on DONE_A: ctrl %02x sts %02x\n", + dev_dbg(hcd->self.controller, + "giveup on DONE_A: ctrl %02x sts %02x\n", sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG)), sl811_read(sl811, @@ -990,7 +996,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } else if (sl811->active_b == ep) { if (time_before_eq(sl811->jiffies_a, jiffies)) { /* happens a lot with lowspeed?? */ - DBG("giveup on DONE_B: ctrl %02x sts %02x\n", + dev_dbg(hcd->self.controller, + "giveup on DONE_B: ctrl %02x sts %02x\n", sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG)), sl811_read(sl811, @@ -1008,7 +1015,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) if (urb) finish_request(sl811, ep, urb, 0); else - VDBG("dequeue, urb %p active %s; wait4irq\n", urb, + dev_dbg(sl811_to_hcd(sl811)->self.controller, + "dequeue, urb %p active %s; wait4irq\n", urb, (sl811->active_a == ep) ? "A" : "B"); } else retval = -EINVAL; @@ -1029,7 +1037,7 @@ sl811h_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) if (!list_empty(&hep->urb_list)) msleep(3); if (!list_empty(&hep->urb_list)) - WARNING("ep %p not empty?\n", ep); + dev_warn(hcd->self.controller, "ep %p not empty?\n", ep); kfree(ep); hep->hcpriv = NULL; @@ -1132,7 +1140,7 @@ sl811h_timer(unsigned long _sl811) switch (signaling) { case SL11H_CTL1MASK_SE0: - DBG("end reset\n"); + dev_dbg(sl811_to_hcd(sl811)->self.controller, "end reset\n"); sl811->port1 = (USB_PORT_STAT_C_RESET << 16) | USB_PORT_STAT_POWER; sl811->ctrl1 = 0; @@ -1141,11 +1149,12 @@ sl811h_timer(unsigned long _sl811) irqstat &= ~SL11H_INTMASK_RD; break; case SL11H_CTL1MASK_K: - DBG("end resume\n"); + dev_dbg(sl811_to_hcd(sl811)->self.controller, "end resume\n"); sl811->port1 &= ~USB_PORT_STAT_SUSPEND; break; default: - DBG("odd timer signaling: %02x\n", signaling); + dev_dbg(sl811_to_hcd(sl811)->self.controller, + "odd timer signaling: %02x\n", signaling); break; } sl811_write(sl811, SL11H_IRQ_STATUS, irqstat); @@ -1243,7 +1252,7 @@ sl811h_hub_control( break; /* 20 msec of resume/K signaling, other irqs blocked */ - DBG("start resume...\n"); + dev_dbg(hcd->self.controller, "start resume...\n"); sl811->irq_enable = 0; sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable); @@ -1281,7 +1290,8 @@ sl811h_hub_control( #ifndef VERBOSE if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ #endif - DBG("GetPortStatus %08x\n", sl811->port1); + dev_dbg(hcd->self.controller, "GetPortStatus %08x\n", + sl811->port1); break; case SetPortFeature: if (wIndex != 1 || wLength != 0) @@ -1293,7 +1303,7 @@ sl811h_hub_control( if (!(sl811->port1 & USB_PORT_STAT_ENABLE)) goto error; - DBG("suspend...\n"); + dev_dbg(hcd->self.controller,"suspend...\n"); sl811->ctrl1 &= ~SL11H_CTL1MASK_SOF_ENA; sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); break; @@ -1338,7 +1348,7 @@ static int sl811h_bus_suspend(struct usb_hcd *hcd) { // SOFs off - DBG("%s\n", __func__); + dev_dbg(hcd->self.controller, "%s\n", __func__); return 0; } @@ -1346,7 +1356,7 @@ static int sl811h_bus_resume(struct usb_hcd *hcd) { // SOFs on - DBG("%s\n", __func__); + dev_dbg(hcd->self.controller, "%s\n", __func__); return 0; } @@ -1360,16 +1370,6 @@ sl811h_bus_resume(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ -#ifdef STUB_DEBUG_FILE - -static inline void create_debug_file(struct sl811 *sl811) { } -static inline void remove_debug_file(struct sl811 *sl811) { } - -#else - -#include <linux/proc_fs.h> -#include <linux/seq_file.h> - static void dump_irq(struct seq_file *s, char *label, u8 mask) { seq_printf(s, "%s %02x%s%s%s%s%s%s\n", label, mask, @@ -1381,7 +1381,7 @@ static void dump_irq(struct seq_file *s, char *label, u8 mask) (mask & SL11H_INTMASK_DP) ? " dp" : ""); } -static int proc_sl811h_show(struct seq_file *s, void *unused) +static int sl811h_show(struct seq_file *s, void *unused) { struct sl811 *sl811 = s->private; struct sl811h_ep *ep; @@ -1492,34 +1492,31 @@ static int proc_sl811h_show(struct seq_file *s, void *unused) return 0; } -static int proc_sl811h_open(struct inode *inode, struct file *file) +static int sl811h_open(struct inode *inode, struct file *file) { - return single_open(file, proc_sl811h_show, PDE_DATA(inode)); + return single_open(file, sl811h_show, inode->i_private); } -static const struct file_operations proc_ops = { - .open = proc_sl811h_open, +static const struct file_operations debug_ops = { + .open = sl811h_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* expect just one sl811 per system */ -static const char proc_filename[] = "driver/sl811h"; - static void create_debug_file(struct sl811 *sl811) { - sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811); + sl811->debug_file = debugfs_create_file("sl811h", S_IRUGO, + usb_debug_root, sl811, + &debug_ops); } static void remove_debug_file(struct sl811 *sl811) { - if (sl811->pde) - remove_proc_entry(proc_filename, NULL); + debugfs_remove(sl811->debug_file); } -#endif - /*-------------------------------------------------------------------------*/ static void @@ -1648,7 +1645,7 @@ sl811h_probe(struct platform_device *dev) /* refuse to confuse usbcore */ if (dev->dev.dma_mask) { - DBG("no we won't dma\n"); + dev_dbg(&dev->dev, "no we won't dma\n"); return -EINVAL; } @@ -1694,7 +1691,7 @@ sl811h_probe(struct platform_device *dev) spin_lock_init(&sl811->lock); INIT_LIST_HEAD(&sl811->async); - sl811->board = dev->dev.platform_data; + sl811->board = dev_get_platdata(&dev->dev); init_timer(&sl811->timer); sl811->timer.function = sl811h_timer; sl811->timer.data = (unsigned long) sl811; @@ -1716,7 +1713,7 @@ sl811h_probe(struct platform_device *dev) break; default: /* reject case 0, SL11S is less functional */ - DBG("chiprev %02x\n", tmp); + dev_dbg(&dev->dev, "chiprev %02x\n", tmp); retval = -ENXIO; goto err6; } @@ -1747,7 +1744,7 @@ sl811h_probe(struct platform_device *dev) if (!ioaddr) iounmap(addr_reg); err2: - DBG("init error, %d\n", retval); + dev_dbg(&dev->dev, "init error, %d\n", retval); return retval; } diff --git a/drivers/usb/host/sl811.h b/drivers/usb/host/sl811.h index b6b8c1f233d..1e23ef49bec 100644 --- a/drivers/usb/host/sl811.h +++ b/drivers/usb/host/sl811.h @@ -122,7 +122,7 @@ struct sl811 { void __iomem *addr_reg; void __iomem *data_reg; struct sl811_platform_data *board; - struct proc_dir_entry *pde; + struct dentry *debug_file; unsigned long stat_insrmv; unsigned long stat_wake; @@ -242,25 +242,8 @@ sl811_read_buf(struct sl811 *sl811, int addr, void *buf, size_t count) /*-------------------------------------------------------------------------*/ -#ifdef DEBUG -#define DBG(stuff...) printk(KERN_DEBUG "sl811: " stuff) -#else -#define DBG(stuff...) do{}while(0) -#endif - -#ifdef VERBOSE -# define VDBG DBG -#else -# define VDBG(stuff...) do{}while(0) -#endif - #ifdef PACKET_TRACE -# define PACKET VDBG +# define PACKET pr_debug("sl811: "stuff) #else # define PACKET(stuff...) do{}while(0) #endif - -#define ERR(stuff...) printk(KERN_ERR "sl811: " stuff) -#define WARNING(stuff...) printk(KERN_WARNING "sl811: " stuff) -#define INFO(stuff...) printk(KERN_INFO "sl811: " stuff) - diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 5c124bf5d01..e402beb5a06 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -1809,9 +1809,9 @@ static int u132_hcd_start(struct usb_hcd *hcd) struct platform_device *pdev = to_platform_device(hcd->self.controller); u16 vendor = ((struct u132_platform_data *) - (pdev->dev.platform_data))->vendor; + dev_get_platdata(&pdev->dev))->vendor; u16 device = ((struct u132_platform_data *) - (pdev->dev.platform_data))->device; + dev_get_platdata(&pdev->dev))->device; mutex_lock(&u132->sw_lock); msleep(10); if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) { @@ -3034,7 +3034,7 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev) int addrs = MAX_U132_ADDRS; int udevs = MAX_U132_UDEVS; int endps = MAX_U132_ENDPS; - u132->board = pdev->dev.platform_data; + u132->board = dev_get_platdata(&pdev->dev); u132->platform_dev = pdev; u132->power = 0; u132->reset = 0; diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 5d5e58fdecc..73503a81ee8 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -580,3 +580,17 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci, xhci_dbg_slot_ctx(xhci, ctx); xhci_dbg_ep_ctx(xhci, ctx, last_ep); } + +void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + xhci_dbg(xhci, "%pV\n", &vaf); + trace(&vaf); + va_end(args); +} diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index 8d7a1324e2f..9fe3225e6c6 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -71,7 +71,7 @@ /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */ #define XHCI_HLC (1 << 19) -#define XHCI_BLC (1 << 19) +#define XHCI_BLC (1 << 20) /* command register values to disable interrupts and halt the HC */ /* start/stop HC execution - do not write unless HC is halted*/ diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 1d3545943c5..fae697ed0b7 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -24,6 +24,7 @@ #include <asm/unaligned.h> #include "xhci.h" +#include "xhci-trace.h" #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ @@ -461,8 +462,15 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array, } } +/* Updates Link Status for USB 2.1 port */ +static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg) +{ + if ((status_reg & PORT_PLS_MASK) == XDEV_U2) + *status |= USB_PORT_STAT_L1; +} + /* Updates Link Status for super Speed port */ -static void xhci_hub_report_link_state(u32 *status, u32 status_reg) +static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) { u32 pls = status_reg & PORT_PLS_MASK; @@ -528,12 +536,128 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex) xhci->port_status_u0 |= 1 << wIndex; if (xhci->port_status_u0 == all_ports_seen_u0) { del_timer_sync(&xhci->comp_mode_recovery_timer); - xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n"); - xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "All USB3 ports have entered U0 already!"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance Mode Recovery Timer Deleted."); } } } +/* + * Converts a raw xHCI port status into the format that external USB 2.0 or USB + * 3.0 hubs use. + * + * Possible side effects: + * - Mark a port as being done with device resume, + * and ring the endpoint doorbells. + * - Stop the Synopsys redriver Compliance Mode polling. + */ +static u32 xhci_get_port_status(struct usb_hcd *hcd, + struct xhci_bus_state *bus_state, + __le32 __iomem **port_array, + u16 wIndex, u32 raw_port_status) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + u32 status = 0; + int slot_id; + + /* wPortChange bits */ + if (raw_port_status & PORT_CSC) + status |= USB_PORT_STAT_C_CONNECTION << 16; + if (raw_port_status & PORT_PEC) + status |= USB_PORT_STAT_C_ENABLE << 16; + if ((raw_port_status & PORT_OCC)) + status |= USB_PORT_STAT_C_OVERCURRENT << 16; + if ((raw_port_status & PORT_RC)) + status |= USB_PORT_STAT_C_RESET << 16; + /* USB3.0 only */ + if (hcd->speed == HCD_USB3) { + if ((raw_port_status & PORT_PLC)) + status |= USB_PORT_STAT_C_LINK_STATE << 16; + if ((raw_port_status & PORT_WRC)) + status |= USB_PORT_STAT_C_BH_RESET << 16; + } + + if (hcd->speed != HCD_USB3) { + if ((raw_port_status & PORT_PLS_MASK) == XDEV_U3 + && (raw_port_status & PORT_POWER)) + status |= USB_PORT_STAT_SUSPEND; + } + if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && + !DEV_SUPERSPEED(raw_port_status)) { + if ((raw_port_status & PORT_RESET) || + !(raw_port_status & PORT_PE)) + return 0xffffffff; + if (time_after_eq(jiffies, + bus_state->resume_done[wIndex])) { + xhci_dbg(xhci, "Resume USB2 port %d\n", + wIndex + 1); + bus_state->resume_done[wIndex] = 0; + clear_bit(wIndex, &bus_state->resuming_ports); + xhci_set_link_state(xhci, port_array, wIndex, + XDEV_U0); + xhci_dbg(xhci, "set port %d resume\n", + wIndex + 1); + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + wIndex + 1); + if (!slot_id) { + xhci_dbg(xhci, "slot_id is zero\n"); + return 0xffffffff; + } + xhci_ring_device(xhci, slot_id); + bus_state->port_c_suspend |= 1 << wIndex; + bus_state->suspended_ports &= ~(1 << wIndex); + } else { + /* + * The resume has been signaling for less than + * 20ms. Report the port status as SUSPEND, + * let the usbcore check port status again + * and clear resume signaling later. + */ + status |= USB_PORT_STAT_SUSPEND; + } + } + if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 + && (raw_port_status & PORT_POWER) + && (bus_state->suspended_ports & (1 << wIndex))) { + bus_state->suspended_ports &= ~(1 << wIndex); + if (hcd->speed != HCD_USB3) + bus_state->port_c_suspend |= 1 << wIndex; + } + if (raw_port_status & PORT_CONNECT) { + status |= USB_PORT_STAT_CONNECTION; + status |= xhci_port_speed(raw_port_status); + } + if (raw_port_status & PORT_PE) + status |= USB_PORT_STAT_ENABLE; + if (raw_port_status & PORT_OC) + status |= USB_PORT_STAT_OVERCURRENT; + if (raw_port_status & PORT_RESET) + status |= USB_PORT_STAT_RESET; + if (raw_port_status & PORT_POWER) { + if (hcd->speed == HCD_USB3) + status |= USB_SS_PORT_STAT_POWER; + else + status |= USB_PORT_STAT_POWER; + } + /* Update Port Link State */ + if (hcd->speed == HCD_USB3) { + xhci_hub_report_usb3_link_state(&status, raw_port_status); + /* + * Verify if all USB3 Ports Have entered U0 already. + * Delete Compliance Mode Timer if so. + */ + xhci_del_comp_mod_timer(xhci, raw_port_status, wIndex); + } else { + xhci_hub_report_usb2_link_state(&status, raw_port_status); + } + if (bus_state->port_c_suspend & (1 << wIndex)) + status |= 1 << USB_PORT_FEAT_C_SUSPEND; + + return status; +} + int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { @@ -598,104 +722,20 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, if (!wIndex || wIndex > max_ports) goto error; wIndex--; - status = 0; temp = xhci_readl(xhci, port_array[wIndex]); if (temp == 0xffffffff) { retval = -ENODEV; break; } - xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp); - - /* wPortChange bits */ - if (temp & PORT_CSC) - status |= USB_PORT_STAT_C_CONNECTION << 16; - if (temp & PORT_PEC) - status |= USB_PORT_STAT_C_ENABLE << 16; - if ((temp & PORT_OCC)) - status |= USB_PORT_STAT_C_OVERCURRENT << 16; - if ((temp & PORT_RC)) - status |= USB_PORT_STAT_C_RESET << 16; - /* USB3.0 only */ - if (hcd->speed == HCD_USB3) { - if ((temp & PORT_PLC)) - status |= USB_PORT_STAT_C_LINK_STATE << 16; - if ((temp & PORT_WRC)) - status |= USB_PORT_STAT_C_BH_RESET << 16; - } + status = xhci_get_port_status(hcd, bus_state, port_array, + wIndex, temp); + if (status == 0xffffffff) + goto error; - if (hcd->speed != HCD_USB3) { - if ((temp & PORT_PLS_MASK) == XDEV_U3 - && (temp & PORT_POWER)) - status |= USB_PORT_STAT_SUSPEND; - } - if ((temp & PORT_PLS_MASK) == XDEV_RESUME && - !DEV_SUPERSPEED(temp)) { - if ((temp & PORT_RESET) || !(temp & PORT_PE)) - goto error; - if (time_after_eq(jiffies, - bus_state->resume_done[wIndex])) { - xhci_dbg(xhci, "Resume USB2 port %d\n", - wIndex + 1); - bus_state->resume_done[wIndex] = 0; - clear_bit(wIndex, &bus_state->resuming_ports); - xhci_set_link_state(xhci, port_array, wIndex, - XDEV_U0); - xhci_dbg(xhci, "set port %d resume\n", - wIndex + 1); - slot_id = xhci_find_slot_id_by_port(hcd, xhci, - wIndex + 1); - if (!slot_id) { - xhci_dbg(xhci, "slot_id is zero\n"); - goto error; - } - xhci_ring_device(xhci, slot_id); - bus_state->port_c_suspend |= 1 << wIndex; - bus_state->suspended_ports &= ~(1 << wIndex); - } else { - /* - * The resume has been signaling for less than - * 20ms. Report the port status as SUSPEND, - * let the usbcore check port status again - * and clear resume signaling later. - */ - status |= USB_PORT_STAT_SUSPEND; - } - } - if ((temp & PORT_PLS_MASK) == XDEV_U0 - && (temp & PORT_POWER) - && (bus_state->suspended_ports & (1 << wIndex))) { - bus_state->suspended_ports &= ~(1 << wIndex); - if (hcd->speed != HCD_USB3) - bus_state->port_c_suspend |= 1 << wIndex; - } - if (temp & PORT_CONNECT) { - status |= USB_PORT_STAT_CONNECTION; - status |= xhci_port_speed(temp); - } - if (temp & PORT_PE) - status |= USB_PORT_STAT_ENABLE; - if (temp & PORT_OC) - status |= USB_PORT_STAT_OVERCURRENT; - if (temp & PORT_RESET) - status |= USB_PORT_STAT_RESET; - if (temp & PORT_POWER) { - if (hcd->speed == HCD_USB3) - status |= USB_SS_PORT_STAT_POWER; - else - status |= USB_PORT_STAT_POWER; - } - /* Update Port Link State for super speed ports*/ - if (hcd->speed == HCD_USB3) { - xhci_hub_report_link_state(&status, temp); - /* - * Verify if all USB3 Ports Have entered U0 already. - * Delete Compliance Mode Timer if so. - */ - xhci_del_comp_mod_timer(xhci, temp, wIndex); - } - if (bus_state->port_c_suspend & (1 << wIndex)) - status |= 1 << USB_PORT_FEAT_C_SUSPEND; + xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", + wIndex, temp); xhci_dbg(xhci, "Get port status returned 0x%x\n", status); + put_unaligned(cpu_to_le32(status), (__le32 *) buf); break; case SetPortFeature: diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index df6978abd7e..53b972c2a09 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -24,8 +24,10 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/dmapool.h> +#include <linux/dma-mapping.h> #include "xhci.h" +#include "xhci-trace.h" /* * Allocates a generic ring segment from the ring pool, sets the dma address, @@ -347,7 +349,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, return -ENOMEM; xhci_link_rings(xhci, ring, first, last, num_segs); - xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, + "ring expansion succeed, now has %d segments", ring->num_segs); return 0; @@ -481,17 +484,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring( return ep->ring; } -/* Only use this when you know stream_info is valid */ -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING -static struct xhci_ring *dma_to_stream_ring( - struct xhci_stream_info *stream_info, - u64 address) -{ - return radix_tree_lookup(&stream_info->trb_address_map, - address >> TRB_SEGMENT_SHIFT); -} -#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ - struct xhci_ring *xhci_stream_id_to_ring( struct xhci_virt_device *dev, unsigned int ep_index, @@ -509,58 +501,6 @@ struct xhci_ring *xhci_stream_id_to_ring( return ep->stream_info->stream_rings[stream_id]; } -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING -static int xhci_test_radix_tree(struct xhci_hcd *xhci, - unsigned int num_streams, - struct xhci_stream_info *stream_info) -{ - u32 cur_stream; - struct xhci_ring *cur_ring; - u64 addr; - - for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { - struct xhci_ring *mapped_ring; - int trb_size = sizeof(union xhci_trb); - - cur_ring = stream_info->stream_rings[cur_stream]; - for (addr = cur_ring->first_seg->dma; - addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE; - addr += trb_size) { - mapped_ring = dma_to_stream_ring(stream_info, addr); - if (cur_ring != mapped_ring) { - xhci_warn(xhci, "WARN: DMA address 0x%08llx " - "didn't map to stream ID %u; " - "mapped to ring %p\n", - (unsigned long long) addr, - cur_stream, - mapped_ring); - return -EINVAL; - } - } - /* One TRB after the end of the ring segment shouldn't return a - * pointer to the current ring (although it may be a part of a - * different ring). - */ - mapped_ring = dma_to_stream_ring(stream_info, addr); - if (mapped_ring != cur_ring) { - /* One TRB before should also fail */ - addr = cur_ring->first_seg->dma - trb_size; - mapped_ring = dma_to_stream_ring(stream_info, addr); - } - if (mapped_ring == cur_ring) { - xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx " - "mapped to valid stream ID %u; " - "mapped ring = %p\n", - (unsigned long long) addr, - cur_stream, - mapped_ring); - return -EINVAL; - } - } - return 0; -} -#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ - /* * Change an endpoint's internal structure so it supports stream IDs. The * number of requested streams includes stream 0, which cannot be used by device @@ -687,13 +627,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, * was any other way, the host controller would assume the ring is * "empty" and wait forever for data to be queued to that stream ID). */ -#if XHCI_DEBUG - /* Do a little test on the radix tree to make sure it returns the - * correct values. - */ - if (xhci_test_radix_tree(xhci, num_streams, stream_info)) - goto cleanup_rings; -#endif return stream_info; @@ -731,7 +664,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. */ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; - xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Setting number of stream ctx array entries to %u", 1 << (max_primary_streams + 1)); ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) @@ -1613,7 +1547,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) struct device *dev = xhci_to_hcd(xhci)->self.controller; int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); - xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Allocating %d scratchpad buffers", num_sp); if (!num_sp) return 0; @@ -1770,11 +1705,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) dma_free_coherent(&pdev->dev, size, xhci->erst.entries, xhci->erst.erst_dma_addr); xhci->erst.entries = NULL; - xhci_dbg(xhci, "Freed ERST\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST"); if (xhci->event_ring) xhci_ring_free(xhci, xhci->event_ring); xhci->event_ring = NULL; - xhci_dbg(xhci, "Freed event ring\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring"); if (xhci->lpm_command) xhci_free_command(xhci, xhci->lpm_command); @@ -1782,7 +1717,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; - xhci_dbg(xhci, "Freed command ring\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); list_for_each_entry_safe(cur_cd, next_cd, &xhci->cancel_cmd_list, cancel_cmd_list) { list_del(&cur_cd->cancel_cmd_list); @@ -1795,22 +1730,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) if (xhci->segment_pool) dma_pool_destroy(xhci->segment_pool); xhci->segment_pool = NULL; - xhci_dbg(xhci, "Freed segment pool\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool"); if (xhci->device_pool) dma_pool_destroy(xhci->device_pool); xhci->device_pool = NULL; - xhci_dbg(xhci, "Freed device context pool\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool"); if (xhci->small_streams_pool) dma_pool_destroy(xhci->small_streams_pool); xhci->small_streams_pool = NULL; - xhci_dbg(xhci, "Freed small stream array pool\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Freed small stream array pool"); if (xhci->medium_streams_pool) dma_pool_destroy(xhci->medium_streams_pool); xhci->medium_streams_pool = NULL; - xhci_dbg(xhci, "Freed medium stream array pool\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Freed medium stream array pool"); if (xhci->dcbaa) dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), @@ -2036,8 +1973,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) * there might be more events to service. */ temp &= ~ERST_EHB; - xhci_dbg(xhci, "// Write event ring dequeue pointer, " - "preserving EHB bit\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Write event ring dequeue pointer, " + "preserving EHB bit"); xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, &xhci->ir_set->erst_dequeue); } @@ -2060,8 +1998,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, temp = xhci_readl(xhci, addr + 2); port_offset = XHCI_EXT_PORT_OFF(temp); port_count = XHCI_EXT_PORT_COUNT(temp); - xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " - "count = %u, revision = 0x%x\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Ext Cap %p, port offset = %u, " + "count = %u, revision = 0x%x", addr, port_offset, port_count, major_revision); /* Port count includes the current port offset */ if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) @@ -2075,15 +2014,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, /* Check the host's USB2 LPM capability */ if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && (temp & XHCI_L1C)) { - xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xHCI 0.96: support USB2 software lpm"); xhci->sw_lpm_support = 1; } if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { - xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xHCI 1.0: support USB2 software lpm"); xhci->sw_lpm_support = 1; if (temp & XHCI_HLC) { - xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xHCI 1.0: support USB2 hardware lpm"); xhci->hw_lpm_support = 1; } } @@ -2207,18 +2149,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) xhci_warn(xhci, "No ports on the roothubs?\n"); return -ENODEV; } - xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Found %u USB 2.0 ports and %u USB 3.0 ports.", xhci->num_usb2_ports, xhci->num_usb3_ports); /* Place limits on the number of roothub ports so that the hub * descriptors aren't longer than the USB core will allocate. */ if (xhci->num_usb3_ports > 15) { - xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Limiting USB 3.0 roothub ports to 15."); xhci->num_usb3_ports = 15; } if (xhci->num_usb2_ports > USB_MAXCHILDREN) { - xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Limiting USB 2.0 roothub ports to %u.", USB_MAXCHILDREN); xhci->num_usb2_ports = USB_MAXCHILDREN; } @@ -2243,8 +2188,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) xhci->usb2_ports[port_index] = &xhci->op_regs->port_status_base + NUM_PORT_REGS*i; - xhci_dbg(xhci, "USB 2.0 port at index %u, " - "addr = %p\n", i, + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "USB 2.0 port at index %u, " + "addr = %p", i, xhci->usb2_ports[port_index]); port_index++; if (port_index == xhci->num_usb2_ports) @@ -2263,8 +2209,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) xhci->usb3_ports[port_index] = &xhci->op_regs->port_status_base + NUM_PORT_REGS*i; - xhci_dbg(xhci, "USB 3.0 port at index %u, " - "addr = %p\n", i, + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "USB 3.0 port at index %u, " + "addr = %p", i, xhci->usb3_ports[port_index]); port_index++; if (port_index == xhci->num_usb3_ports) @@ -2288,32 +2235,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) INIT_LIST_HEAD(&xhci->cancel_cmd_list); page_size = xhci_readl(xhci, &xhci->op_regs->page_size); - xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Supported page size register = 0x%x", page_size); for (i = 0; i < 16; i++) { if ((0x1 & page_size) != 0) break; page_size = page_size >> 1; } if (i < 16) - xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Supported page size of %iK", (1 << (i+12)) / 1024); else xhci_warn(xhci, "WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ xhci->page_shift = 12; xhci->page_size = 1 << xhci->page_shift; - xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "HCD page size set to %iK", xhci->page_size / 1024); /* * Program the Number of Device Slots Enabled field in the CONFIG * register with the max value of slots the HC can handle. */ val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); - xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", - (unsigned int) val); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// xHC can handle at most %d device slots.", val); val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); val |= (val2 & ~HCS_SLOTS_MASK); - xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", - (unsigned int) val); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Setting Max device slots reg = 0x%x.", val); xhci_writel(xhci, val, &xhci->op_regs->config_reg); /* @@ -2326,7 +2276,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); xhci->dcbaa->dma = dma; - xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Device context base array address = 0x%llx (DMA), %p (virt)", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); @@ -2365,8 +2316,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); if (!xhci->cmd_ring) goto fail; - xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); - xhci_dbg(xhci, "First segment DMA is 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Allocated command ring at %p", xhci->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx", (unsigned long long)xhci->cmd_ring->first_seg->dma); /* Set the address in the Command Ring Control register */ @@ -2374,7 +2326,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; - xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Setting command ring address to 0x%x", val); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_dbg_cmd_ptrs(xhci); @@ -2390,8 +2343,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val = xhci_readl(xhci, &xhci->cap_regs->db_off); val &= DBOFF_MASK; - xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" - " from cap regs base addr\n", val); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Doorbell array is located at offset 0x%x" + " from cap regs base addr", val); xhci->dba = (void __iomem *) xhci->cap_regs + val; xhci_dbg_regs(xhci); xhci_print_run_regs(xhci); @@ -2402,7 +2356,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * Event ring setup: Allocate a normal ring, but also setup * the event ring segment table (ERST). Section 4.9.3. */ - xhci_dbg(xhci, "// Allocating event ring\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, flags); if (!xhci->event_ring) @@ -2415,13 +2369,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) GFP_KERNEL); if (!xhci->erst.entries) goto fail; - xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Allocated event ring segment table at 0x%llx", (unsigned long long)dma); memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); xhci->erst.num_entries = ERST_NUM_SEGS; xhci->erst.erst_dma_addr = dma; - xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx", xhci->erst.num_entries, xhci->erst.entries, (unsigned long long)xhci->erst.erst_dma_addr); @@ -2439,13 +2395,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val = xhci_readl(xhci, &xhci->ir_set->erst_size); val &= ERST_SIZE_MASK; val |= ERST_NUM_SEGS; - xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Write ERST size = %i to ir_set 0 (some bits preserved)", val); xhci_writel(xhci, val, &xhci->ir_set->erst_size); - xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Set ERST entries to point to event ring."); /* set the segment table base address */ - xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Set ERST base address for ir_set 0 = 0x%llx", (unsigned long long)xhci->erst.erst_dma_addr); val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 &= ERST_PTR_MASK; @@ -2454,7 +2413,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); - xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Wrote ERST address to ir_set 0."); xhci_print_ir_set(xhci, 0); /* diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index cc24e39b97d..c2d495057eb 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include "xhci.h" +#include "xhci-trace.h" /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 @@ -64,16 +65,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; - xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" - " endpoint cmd after reset endpoint\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "QUIRK: Fresco Logic xHC needs configure" + " endpoint cmd after reset endpoint"); } /* Fresco Logic confirms: all revisions of this chip do not * support MSI, even though some of them claim to in their PCI * capabilities. */ xhci->quirks |= XHCI_BROKEN_MSI; - xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u " - "has broken MSI implementation\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "QUIRK: Fresco Logic revision %u " + "has broken MSI implementation", pdev->revision); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } @@ -93,7 +96,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { - xhci->quirks |= XHCI_SPURIOUS_SUCCESS; xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; xhci->quirks |= XHCI_SW_BW_CHECKING; @@ -111,7 +113,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "QUIRK: Resetting on resume"); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_VIA) @@ -250,13 +253,15 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) * writers. * * Unconditionally switch the ports back to xHCI after a system resume. - * We can't tell whether the EHCI or xHCI controller will be resumed - * first, so we have to do the port switchover in both drivers. Writing - * a '1' to the port switchover registers should have no effect if the - * port was already switched over. + * It should not matter whether the EHCI or xHCI controller is + * resumed first. It's enough to do the switchover in xHCI because + * USB core won't notice anything as the hub driver doesn't start + * running again until after all the devices (including both EHCI and + * xHCI host controllers) have been resumed. */ - if (usb_is_intel_switchable_xhci(pdev)) - usb_enable_xhci_ports(pdev); + + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(pdev); retval = xhci_resume(xhci, hibernated); return retval; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 51e22bf8950..d9c169f470d 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -14,6 +14,8 @@ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/of.h> +#include <linux/dma-mapping.h> #include "xhci.h" @@ -24,7 +26,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) * here that the generic code does not try to make a pci_dev from our * dev struct in order to setup MSI */ - xhci->quirks |= XHCI_BROKEN_MSI; + xhci->quirks |= XHCI_PLAT; } /* called during probe() after chip reset completes */ @@ -104,6 +106,15 @@ static int xhci_plat_probe(struct platform_device *pdev) if (!res) return -ENODEV; + /* Initialize dma_mask and coherent_dma_mask to 32-bits */ + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + else + dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; @@ -186,11 +197,46 @@ static int xhci_plat_remove(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM +static int xhci_plat_suspend(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + return xhci_suspend(xhci); +} + +static int xhci_plat_resume(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + return xhci_resume(xhci, 0); +} + +static const struct dev_pm_ops xhci_plat_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume) +}; +#define DEV_PM_OPS (&xhci_plat_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM */ + +#ifdef CONFIG_OF +static const struct of_device_id usb_xhci_of_match[] = { + { .compatible = "xhci-platform" }, + { }, +}; +MODULE_DEVICE_TABLE(of, usb_xhci_of_match); +#endif + static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, .driver = { .name = "xhci-hcd", + .pm = DEV_PM_OPS, + .of_match_table = of_match_ptr(usb_xhci_of_match), }, }; MODULE_ALIAS("platform:xhci-hcd"); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1e57eafa691..411da1fc7ae 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -67,6 +67,7 @@ #include <linux/scatterlist.h> #include <linux/slab.h> #include "xhci.h" +#include "xhci-trace.h" static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, @@ -434,7 +435,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, /* A ring has pending URBs if its TD list is not empty */ if (!(ep->ep_state & EP_HAS_STREAMS)) { - if (!(list_empty(&ep->ring->td_list))) + if (ep->ring && !(list_empty(&ep->ring->td_list))) xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); return; } @@ -555,7 +556,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, return; } state->new_cycle_state = 0; - xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Finding segment containing stopped TRB."); state->new_deq_seg = find_trb_seg(cur_td->start_seg, dev->eps[ep_index].stopped_trb, &state->new_cycle_state); @@ -565,12 +567,14 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, } /* Dig out the cycle state saved by the xHC during the stop ep cmd */ - xhci_dbg(xhci, "Finding endpoint context\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Finding endpoint context"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); state->new_deq_ptr = cur_td->last_trb; - xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Finding segment containing last TRB in TD."); state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_ptr, &state->new_cycle_state); @@ -597,13 +601,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, if (ep_ring->first_seg == ep_ring->first_seg->next && state->new_deq_ptr < dev->eps[ep_index].stopped_trb) state->new_cycle_state ^= 0x1; - xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Cycle state = 0x%x", state->new_cycle_state); /* Don't update the ring cycle state for the producer (us). */ - xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "New dequeue segment = %p (virtual)", state->new_deq_seg); addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); - xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "New dequeue pointer = 0x%llx (DMA)", (unsigned long long) addr); } @@ -631,9 +638,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, if (flip_cycle) cur_trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); - xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); - xhci_dbg(xhci, "Address = %p (0x%llx dma); " - "in seg %p (0x%llx dma)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Cancel (unchain) link TRB"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Address = %p (0x%llx dma); " + "in seg %p (0x%llx dma)", cur_trb, (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), cur_seg, @@ -651,7 +660,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, cpu_to_le32(TRB_CYCLE); cur_trb->generic.field[3] |= cpu_to_le32( TRB_TYPE(TRB_TR_NOOP)); - xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "TRB to noop at offset 0x%llx", (unsigned long long) xhci_trb_virt_to_dma(cur_seg, cur_trb)); } @@ -672,8 +682,9 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; - xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " - "new deq ptr = %p (0x%llx dma), new cycle = %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " + "new deq ptr = %p (0x%llx dma), new cycle = %u", deq_state->new_deq_seg, (unsigned long long)deq_state->new_deq_seg->dma, deq_state->new_deq_ptr, @@ -793,7 +804,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, */ list_for_each(entry, &ep->cancelled_td_list) { cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); - xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Removing canceled TD starting at 0x%llx (dma).", (unsigned long long)xhci_trb_virt_to_dma( cur_td->start_seg, cur_td->first_trb)); ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); @@ -913,14 +925,16 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) ep->stop_cmds_pending--; if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " - "xHCI as DYING, exiting.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Stop EP timer ran, but another timer marked " + "xHCI as DYING, exiting."); spin_unlock_irqrestore(&xhci->lock, flags); return; } if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { - xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " - "exiting.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Stop EP timer ran, but no command pending, " + "exiting."); spin_unlock_irqrestore(&xhci->lock, flags); return; } @@ -962,8 +976,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) ring = temp_ep->ring; if (!ring) continue; - xhci_dbg(xhci, "Killing URBs for slot ID %u, " - "ep index %u\n", i, j); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Killing URBs for slot ID %u, " + "ep index %u", i, j); while (!list_empty(&ring->td_list)) { cur_td = list_first_entry(&ring->td_list, struct xhci_td, @@ -986,9 +1001,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) } } spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "Calling usb_hc_died()\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Calling usb_hc_died()"); usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); - xhci_dbg(xhci, "xHCI host controller is dead.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "xHCI host controller is dead."); } @@ -1092,7 +1109,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ep_state &= EP_STATE_MASK; slot_state = le32_to_cpu(slot_ctx->dev_state); slot_state = GET_SLOT_STATE(slot_state); - xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Slot state = %u, EP state = %u", slot_state, ep_state); break; case COMP_EBADSLT: @@ -1112,7 +1130,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, * cancelling URBs, which might not be an error... */ } else { - xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Successful Set TR Deq Ptr cmd, deq = @%08llx", le64_to_cpu(ep_ctx->deq)); if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, dev->eps[ep_index].queued_deq_ptr) == @@ -1150,7 +1169,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, /* This command will only fail if the endpoint wasn't halted, * but we don't care. */ - xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Ignoring reset ep completion code of %u", GET_COMP_CODE(le32_to_cpu(event->status))); /* HW with the reset endpoint quirk needs to have a configure endpoint @@ -1158,7 +1178,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, * because the HW can't handle two commands being queued in a row. */ if (xhci->quirks & XHCI_RESET_EP_QUIRK) { - xhci_dbg(xhci, "Queueing configure endpoint command\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Queueing configure endpoint command"); xhci_queue_configure_endpoint(xhci, xhci->devs[slot_id]->in_ctx->dma, slot_id, false); @@ -1377,6 +1398,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, return; } + trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic, + (struct xhci_generic_trb *) event); + if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) || (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) { /* If the return value is 0, we think the trb pointed by @@ -1444,8 +1468,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; if (!(ep_state & EP_HALTED)) goto bandwidth_change; - xhci_dbg(xhci, "Completed config ep cmd - " - "last ep index = %d, state = %d\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Completed config ep cmd - " + "last ep index = %d, state = %d", ep_index, ep_state); /* Clear internal halted state and restart ring(s) */ xhci->devs[slot_id]->eps[ep_index].ep_state &= @@ -1454,7 +1479,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, break; } bandwidth_change: - xhci_dbg(xhci, "Completed config ep cmd\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Completed config ep cmd"); xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); complete(&xhci->devs[slot_id]->cmd_completion); @@ -1497,7 +1523,8 @@ bandwidth_change: xhci->error_bitmask |= 1 << 6; break; } - xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "NEC firmware version %2x.%02x", NEC_FW_MAJOR(le32_to_cpu(event->status)), NEC_FW_MINOR(le32_to_cpu(event->status))); break; @@ -2877,8 +2904,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, return -ENOMEM; } - xhci_dbg(xhci, "ERROR no room on ep ring, " - "try ring expansion\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, + "ERROR no room on ep ring, try ring expansion"); num_trbs_needed = num_trbs - ep_ring->num_trbs_free; if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, mem_flags)) { @@ -3060,14 +3087,10 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, * to set the polling interval (once the API is added). */ if (xhci_interval != ep_interval) { - if (printk_ratelimit()) - dev_dbg(&urb->dev->dev, "Driver uses different interval" - " (%d microframe%s) than xHCI " - "(%d microframe%s)\n", - ep_interval, - ep_interval == 1 ? "" : "s", - xhci_interval, - xhci_interval == 1 ? "" : "s"); + dev_dbg_ratelimited(&urb->dev->dev, + "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", + ep_interval, ep_interval == 1 ? "" : "s", + xhci_interval, xhci_interval == 1 ? "" : "s"); urb->interval = xhci_interval; /* Convert back to frames for LS/FS devices */ if (urb->dev->speed == USB_SPEED_LOW || @@ -3849,14 +3872,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, * to set the polling interval (once the API is added). */ if (xhci_interval != ep_interval) { - if (printk_ratelimit()) - dev_dbg(&urb->dev->dev, "Driver uses different interval" - " (%d microframe%s) than xHCI " - "(%d microframe%s)\n", - ep_interval, - ep_interval == 1 ? "" : "s", - xhci_interval, - xhci_interval == 1 ? "" : "s"); + dev_dbg_ratelimited(&urb->dev->dev, + "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", + ep_interval, ep_interval == 1 ? "" : "s", + xhci_interval, xhci_interval == 1 ? "" : "s"); urb->interval = xhci_interval; /* Convert back to frames for LS/FS devices */ if (urb->dev->speed == USB_SPEED_LOW || diff --git a/drivers/usb/host/xhci-trace.c b/drivers/usb/host/xhci-trace.c new file mode 100644 index 00000000000..7cf30c83dcf --- /dev/null +++ b/drivers/usb/host/xhci-trace.c @@ -0,0 +1,15 @@ +/* + * xHCI host controller driver + * + * Copyright (C) 2013 Xenia Ragiadakou + * + * Author: Xenia Ragiadakou + * Email : burzalodowa@gmail.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define CREATE_TRACE_POINTS +#include "xhci-trace.h" diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h new file mode 100644 index 00000000000..20364cc8d2f --- /dev/null +++ b/drivers/usb/host/xhci-trace.h @@ -0,0 +1,151 @@ +/* + * xHCI host controller driver + * + * Copyright (C) 2013 Xenia Ragiadakou + * + * Author: Xenia Ragiadakou + * Email : burzalodowa@gmail.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xhci-hcd + +#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __XHCI_TRACE_H + +#include <linux/tracepoint.h> +#include "xhci.h" + +#define XHCI_MSG_MAX 500 + +DECLARE_EVENT_CLASS(xhci_log_msg, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf), + TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)), + TP_fast_assign( + vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va); + ), + TP_printk("%s", __get_str(msg)) +); + +DEFINE_EVENT(xhci_log_msg, xhci_dbg_address, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(xhci_log_msg, xhci_dbg_init, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DECLARE_EVENT_CLASS(xhci_log_ctx, + TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + unsigned int ep_num), + TP_ARGS(xhci, ctx, ep_num), + TP_STRUCT__entry( + __field(int, ctx_64) + __field(unsigned, ctx_type) + __field(dma_addr_t, ctx_dma) + __field(u8 *, ctx_va) + __field(unsigned, ctx_ep_num) + __field(int, slot_id) + __dynamic_array(u32, ctx_data, + ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) * + ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1)) + ), + TP_fast_assign( + struct usb_device *udev; + + udev = to_usb_device(xhci_to_hcd(xhci)->self.controller); + __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params); + __entry->ctx_type = ctx->type; + __entry->ctx_dma = ctx->dma; + __entry->ctx_va = ctx->bytes; + __entry->slot_id = udev->slot_id; + __entry->ctx_ep_num = ep_num; + memcpy(__get_dynamic_array(ctx_data), ctx->bytes, + ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) * + ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1)); + ), + TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p", + __entry->ctx_64, __entry->ctx_type, + (unsigned long long) __entry->ctx_dma, __entry->ctx_va + ) +); + +DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx, + TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + unsigned int ep_num), + TP_ARGS(xhci, ctx, ep_num) +); + +DECLARE_EVENT_CLASS(xhci_log_event, + TP_PROTO(void *trb_va, struct xhci_generic_trb *ev), + TP_ARGS(trb_va, ev), + TP_STRUCT__entry( + __field(void *, va) + __field(u64, dma) + __field(u32, status) + __field(u32, flags) + __dynamic_array(__le32, trb, 4) + ), + TP_fast_assign( + __entry->va = trb_va; + __entry->dma = le64_to_cpu(((u64)ev->field[1]) << 32 | + ev->field[0]); + __entry->status = le32_to_cpu(ev->field[2]); + __entry->flags = le32_to_cpu(ev->field[3]); + memcpy(__get_dynamic_array(trb), trb_va, + sizeof(struct xhci_generic_trb)); + ), + TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x", + (unsigned long long) __entry->dma, __entry->va, + __entry->status, __entry->flags + ) +); + +DEFINE_EVENT(xhci_log_event, xhci_cmd_completion, + TP_PROTO(void *trb_va, struct xhci_generic_trb *ev), + TP_ARGS(trb_va, ev) +); + +#endif /* __XHCI_TRACE_H */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE xhci-trace + +#include <trace/define_trace.h> diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2c49f00260c..49b6edb84a7 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -27,8 +27,10 @@ #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/dmi.h> +#include <linux/dma-mapping.h> #include "xhci.h" +#include "xhci-trace.h" #define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" @@ -100,7 +102,7 @@ void xhci_quiesce(struct xhci_hcd *xhci) int xhci_halt(struct xhci_hcd *xhci) { int ret; - xhci_dbg(xhci, "// Halt the HC\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); xhci_quiesce(xhci); ret = xhci_handshake(xhci, &xhci->op_regs->status, @@ -124,7 +126,7 @@ static int xhci_start(struct xhci_hcd *xhci) temp = xhci_readl(xhci, &xhci->op_regs->command); temp |= (CMD_RUN); - xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", temp); xhci_writel(xhci, temp, &xhci->op_regs->command); @@ -162,7 +164,7 @@ int xhci_reset(struct xhci_hcd *xhci) return 0; } - xhci_dbg(xhci, "// Reset the HC\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); command = xhci_readl(xhci, &xhci->op_regs->command); command |= CMD_RESET; xhci_writel(xhci, command, &xhci->op_regs->command); @@ -172,7 +174,8 @@ int xhci_reset(struct xhci_hcd *xhci) if (ret) return ret; - xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Wait for controller to be ready for doorbell rings"); /* * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. @@ -214,14 +217,16 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) ret = pci_enable_msi(pdev); if (ret) { - xhci_dbg(xhci, "failed to allocate MSI entry\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "failed to allocate MSI entry"); return ret; } ret = request_irq(pdev->irq, xhci_msi_irq, 0, "xhci_hcd", xhci_to_hcd(xhci)); if (ret) { - xhci_dbg(xhci, "disable MSI interrupt\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "disable MSI interrupt"); pci_disable_msi(pdev); } @@ -284,7 +289,8 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); if (ret) { - xhci_dbg(xhci, "Failed to enable MSI-X\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Failed to enable MSI-X"); goto free_entries; } @@ -300,7 +306,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) return ret; disable_msix: - xhci_dbg(xhci, "disable MSI-X interrupt\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); xhci_free_irq(xhci); pci_disable_msix(pdev); free_entries: @@ -329,7 +335,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci) return; } -static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) +static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) { int i; @@ -342,9 +348,14 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) static int xhci_try_enable_msi(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct pci_dev *pdev; int ret; + /* The xhci platform device has set up IRQs through usb_add_hcd. */ + if (xhci->quirks & XHCI_PLAT) + return 0; + + pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); /* * Some Fresco Logic host controllers advertise MSI, but fail to * generate interrupts. Don't even try to enable MSI. @@ -417,9 +428,11 @@ static void compliance_mode_recovery(unsigned long arg) * Compliance Mode Detected. Letting USB Core * handle the Warm Reset */ - xhci_dbg(xhci, "Compliance mode detected->port %d\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance mode detected->port %d", i + 1); - xhci_dbg(xhci, "Attempting compliance mode recovery\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Attempting compliance mode recovery"); hcd = xhci->shared_hcd; if (hcd->state == HC_STATE_SUSPENDED) @@ -457,7 +470,8 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) set_timer_slack(&xhci->comp_mode_recovery_timer, msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); add_timer(&xhci->comp_mode_recovery_timer); - xhci_dbg(xhci, "Compliance mode recovery timer initialized\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance mode recovery timer initialized"); } /* @@ -505,16 +519,18 @@ int xhci_init(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int retval = 0; - xhci_dbg(xhci, "xhci_init\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); spin_lock_init(&xhci->lock); if (xhci->hci_version == 0x95 && link_quirk) { - xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "QUIRK: Not clearing Link TRB chain bits."); xhci->quirks |= XHCI_LINK_TRB_QUIRK; } else { - xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xHCI doesn't need link TRB QUIRK"); } retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg(xhci, "Finished xhci_init\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); /* Initializing Compliance Mode Recovery Data If Needed */ if (xhci_compliance_mode_recovery_timer_quirk_check()) { @@ -528,57 +544,6 @@ int xhci_init(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING -static void xhci_event_ring_work(unsigned long arg) -{ - unsigned long flags; - int temp; - u64 temp_64; - struct xhci_hcd *xhci = (struct xhci_hcd *) arg; - int i, j; - - xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); - - spin_lock_irqsave(&xhci->lock, flags); - temp = xhci_readl(xhci, &xhci->op_regs->status); - xhci_dbg(xhci, "op reg status = 0x%x\n", temp); - if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || - (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_dbg(xhci, "HW died, polling stopped.\n"); - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } - - temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); - xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); - xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); - xhci->error_bitmask = 0; - xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_segment(xhci, xhci->event_ring->deq_seg); - xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - temp_64 &= ~ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); - xhci_dbg(xhci, "Command ring:\n"); - xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); - xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); - xhci_dbg_cmd_ptrs(xhci); - for (i = 0; i < MAX_HC_SLOTS; ++i) { - if (!xhci->devs[i]) - continue; - for (j = 0; j < 31; ++j) { - xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); - } - } - spin_unlock_irqrestore(&xhci->lock, flags); - - if (!xhci->zombie) - mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); - else - xhci_dbg(xhci, "Quit polling the event ring.\n"); -} -#endif - static int xhci_run_finished(struct xhci_hcd *xhci) { if (xhci_start(xhci)) { @@ -591,7 +556,8 @@ static int xhci_run_finished(struct xhci_hcd *xhci) if (xhci->quirks & XHCI_NEC_HOST) xhci_ring_cmd_db(xhci); - xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Finished xhci_run for USB3 roothub"); return 0; } @@ -622,23 +588,12 @@ int xhci_run(struct usb_hcd *hcd) if (!usb_hcd_is_primary_hcd(hcd)) return xhci_run_finished(xhci); - xhci_dbg(xhci, "xhci_run\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); ret = xhci_try_enable_msi(hcd); if (ret) return ret; -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING - init_timer(&xhci->event_ring_timer); - xhci->event_ring_timer.data = (unsigned long) xhci; - xhci->event_ring_timer.function = xhci_event_ring_work; - /* Poll the event ring */ - xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; - xhci->zombie = 0; - xhci_dbg(xhci, "Setting event ring polling timer\n"); - add_timer(&xhci->event_ring_timer); -#endif - xhci_dbg(xhci, "Command ring memory map follows:\n"); xhci_debug_ring(xhci, xhci->cmd_ring); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); @@ -651,9 +606,11 @@ int xhci_run(struct usb_hcd *hcd) xhci_dbg_ring_ptrs(xhci, xhci->event_ring); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "ERST deq = 64'h%0lx", (long unsigned int) temp_64); - xhci_dbg(xhci, "// Set the interrupt modulation register\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Set the interrupt modulation register"); temp = xhci_readl(xhci, &xhci->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; temp |= (u32) 160; @@ -662,12 +619,13 @@ int xhci_run(struct usb_hcd *hcd) /* Set the HCD state before we enable the irqs */ temp = xhci_readl(xhci, &xhci->op_regs->command); temp |= (CMD_EIE); - xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", - temp); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Enable interrupts, cmd = 0x%x.", temp); xhci_writel(xhci, temp, &xhci->op_regs->command); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); - xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); xhci_writel(xhci, ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); @@ -677,7 +635,8 @@ int xhci_run(struct usb_hcd *hcd) xhci_queue_vendor_command(xhci, 0, 0, 0, TRB_TYPE(TRB_NEC_GET_FW)); - xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Finished xhci_run for USB2 roothub"); return 0; } @@ -725,24 +684,20 @@ void xhci_stop(struct usb_hcd *hcd) xhci_cleanup_msix(xhci); -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING - /* Tell the event ring poll function not to reschedule */ - xhci->zombie = 1; - del_timer_sync(&xhci->event_ring_timer); -#endif - /* Deleting Compliance Mode Recovery Timer */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { del_timer_sync(&xhci->comp_mode_recovery_timer); - xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "%s: compliance mode recovery timer deleted", __func__); } if (xhci->quirks & XHCI_AMD_PLL_FIX) usb_amd_dev_put(); - xhci_dbg(xhci, "// Disabling event ring interrupts\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Disabling event ring interrupts"); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); @@ -750,10 +705,11 @@ void xhci_stop(struct usb_hcd *hcd) &xhci->ir_set->irq_pending); xhci_print_ir_set(xhci, 0); - xhci_dbg(xhci, "cleaning up memory\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); xhci_mem_cleanup(xhci); - xhci_dbg(xhci, "xhci_stop completed - status = %x\n", - xhci_readl(xhci, &xhci->op_regs->status)); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xhci_stop completed - status = %x", + xhci_readl(xhci, &xhci->op_regs->status)); } /* @@ -778,8 +734,9 @@ void xhci_shutdown(struct usb_hcd *hcd) xhci_cleanup_msix(xhci); - xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", - xhci_readl(xhci, &xhci->op_regs->status)); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "xhci_shutdown completed - status = %x", + xhci_readl(xhci, &xhci->op_regs->status)); } #ifdef CONFIG_PM @@ -820,7 +777,8 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) xhci->cmd_ring->dequeue) & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; - xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Setting command ring address to 0x%llx", (long unsigned long) val_64); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); } @@ -933,7 +891,8 @@ int xhci_suspend(struct xhci_hcd *xhci) if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { del_timer_sync(&xhci->comp_mode_recovery_timer); - xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "%s: compliance mode recovery timer deleted", __func__); } @@ -998,7 +957,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); - xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Compliance Mode Recovery Timer deleted!"); } /* Let the USB core know _both_ roothubs lost power. */ @@ -1011,12 +971,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING - /* Tell the event ring poll function not to reschedule */ - xhci->zombie = 1; - del_timer_sync(&xhci->event_ring_timer); -#endif - xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); @@ -1170,35 +1124,33 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_virt_device *virt_dev; if (!hcd || (check_ep && !ep) || !udev) { - printk(KERN_DEBUG "xHCI %s called with invalid args\n", - func); + pr_debug("xHCI %s called with invalid args\n", func); return -EINVAL; } if (!udev->parent) { - printk(KERN_DEBUG "xHCI %s called for root hub\n", - func); + pr_debug("xHCI %s called for root hub\n", func); return 0; } xhci = hcd_to_xhci(hcd); - if (xhci->xhc_state & XHCI_STATE_HALTED) - return -ENODEV; - if (check_virt_dev) { if (!udev->slot_id || !xhci->devs[udev->slot_id]) { - printk(KERN_DEBUG "xHCI %s called with unaddressed " - "device\n", func); + xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", + func); return -EINVAL; } virt_dev = xhci->devs[udev->slot_id]; if (virt_dev->udev != udev) { - printk(KERN_DEBUG "xHCI %s called with udev and " + xhci_dbg(xhci, "xHCI %s called with udev and " "virt_dev does not match\n", func); return -EINVAL; } } + if (xhci->xhc_state & XHCI_STATE_HALTED) + return -ENODEV; + return 1; } @@ -1228,12 +1180,16 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); if (hw_max_packet_size != max_packet_size) { - xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); - xhci_dbg(xhci, "Max packet size in usb_device = %d\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Max Packet Size for ep 0 changed."); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Max packet size in usb_device = %d", max_packet_size); - xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Max packet size in xHCI HW = %d", hw_max_packet_size); - xhci_dbg(xhci, "Issuing evaluate context command.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Issuing evaluate context command."); /* Set up the input context flags for the command */ /* FIXME: This won't work if a non-default control endpoint @@ -1498,7 +1454,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) goto done; temp = xhci_readl(xhci, &xhci->op_regs->status); if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_dbg(xhci, "HW died, freeing TD.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "HW died, freeing TD."); urb_priv = urb->hcpriv; for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { td = urb_priv->td[i]; @@ -1516,8 +1473,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } if ((xhci->xhc_state & XHCI_STATE_DYING) || (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " - "non-responsive xHCI host.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Ep 0x%x: URB %p to be canceled on " + "non-responsive xHCI host.", urb->ep->desc.bEndpointAddress, urb); /* Let the stop endpoint command watchdog timer (which set this * state) finish cleaning up the endpoint TD lists. We must @@ -1538,8 +1496,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) urb_priv = urb->hcpriv; i = urb_priv->td_cnt; if (i < urb_priv->length) - xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, " - "starting at offset 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Cancel URB %p, dev %s, ep 0x%x, " + "starting at offset 0x%llx", urb, urb->dev->devpath, urb->ep->desc.bEndpointAddress, (unsigned long long) xhci_trb_virt_to_dma( @@ -1851,7 +1810,8 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, ret = -ENODEV; break; case COMP_SUCCESS: - dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Successful Endpoint Configure command"); ret = 0; break; default: @@ -1897,7 +1857,8 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, ret = -EINVAL; break; case COMP_SUCCESS: - dev_dbg(&udev->dev, "Successful evaluate context command\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Successful evaluate context command"); ret = 0; break; default: @@ -1963,14 +1924,16 @@ static int xhci_reserve_host_resources(struct xhci_hcd *xhci, added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { - xhci_dbg(xhci, "Not enough ep ctxs: " - "%u active, need to add %u, limit is %u.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Not enough ep ctxs: " + "%u active, need to add %u, limit is %u.", xhci->num_active_eps, added_eps, xhci->limit_active_eps); return -ENOMEM; } xhci->num_active_eps += added_eps; - xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Adding %u ep ctxs, %u now active.", added_eps, xhci->num_active_eps); return 0; } @@ -1988,7 +1951,8 @@ static void xhci_free_host_resources(struct xhci_hcd *xhci, num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); xhci->num_active_eps -= num_failed_eps; - xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Removing %u failed ep ctxs, %u now active.", num_failed_eps, xhci->num_active_eps); } @@ -2007,7 +1971,8 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); xhci->num_active_eps -= num_dropped_eps; if (num_dropped_eps) - xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Removing %u dropped ep ctxs, %u now active.", num_dropped_eps, xhci->num_active_eps); } @@ -2168,18 +2133,21 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, * that the HS bus has enough bandwidth if we are activing a new TT. */ if (virt_dev->tt_info) { - xhci_dbg(xhci, "Recalculating BW for rootport %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Recalculating BW for rootport %u", virt_dev->real_port); if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { xhci_warn(xhci, "Not enough bandwidth on HS bus for " "newly activated TT.\n"); return -ENOMEM; } - xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Recalculating BW for TT slot %u port %u", virt_dev->tt_info->slot_id, virt_dev->tt_info->ttport); } else { - xhci_dbg(xhci, "Recalculating BW for rootport %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Recalculating BW for rootport %u", virt_dev->real_port); } @@ -2287,8 +2255,9 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, xhci->rh_bw[port_index].num_active_tts; } - xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " - "Available: %u " "percent\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Final bandwidth: %u, Limit: %u, Reserved: %u, " + "Available: %u " "percent", bw_used, max_bandwidth, bw_reserved, (max_bandwidth - bw_used - bw_reserved) * 100 / max_bandwidth); @@ -2658,7 +2627,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) xhci_free_host_resources(xhci, ctrl_ctx); spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "FIXME allocate a new ring segment"); return -ENOMEM; } xhci_ring_cmd_db(xhci); @@ -2871,7 +2841,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, struct xhci_dequeue_state deq_state; struct xhci_virt_ep *ep; - xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Cleaning up stalled endpoint ring"); ep = &xhci->devs[udev->slot_id]->eps[ep_index]; /* We need to move the HW's dequeue pointer past this TD, * or it will attempt to resend it on the next doorbell ring. @@ -2884,7 +2855,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, * issue a configure endpoint command later. */ if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { - xhci_dbg(xhci, "Queueing new dequeue state\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Queueing new dequeue state"); xhci_queue_new_dequeue_state(xhci, udev->slot_id, ep_index, ep->stopped_stream, &deq_state); } else { @@ -2893,8 +2865,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, * XXX: No idea how this hardware will react when stream rings * are enabled. */ - xhci_dbg(xhci, "Setting up input context for " - "configure endpoint command\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Setting up input context for " + "configure endpoint command"); xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, ep_index, &deq_state); } @@ -2926,16 +2899,19 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ep_index = xhci_get_endpoint_index(&ep->desc); virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; if (!virt_ep->stopped_td) { - xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", - ep->desc.bEndpointAddress); + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Endpoint 0x%x not halted, refusing to reset.", + ep->desc.bEndpointAddress); return; } if (usb_endpoint_xfer_control(&ep->desc)) { - xhci_dbg(xhci, "Control endpoint stall already handled.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Control endpoint stall already handled."); return; } - xhci_dbg(xhci, "Queueing reset endpoint command\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Queueing reset endpoint command"); spin_lock_irqsave(&xhci->lock, flags); ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); /* @@ -3075,8 +3051,8 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, /* Are streams already being freed for the endpoint? */ if (ep_state & EP_GETTING_NO_STREAMS) { xhci_warn(xhci, "WARN Can't disable streams for " - "endpoint 0x%x\n, " - "streams are being disabled already.", + "endpoint 0x%x, " + "streams are being disabled already\n", eps[i]->desc.bEndpointAddress); return 0; } @@ -3084,8 +3060,8 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, if (!(ep_state & EP_HAS_STREAMS) && !(ep_state & EP_GETTING_STREAMS)) { xhci_warn(xhci, "WARN Can't disable streams for " - "endpoint 0x%x\n, " - "streams are already disabled!", + "endpoint 0x%x, " + "streams are already disabled!\n", eps[i]->desc.bEndpointAddress); xhci_warn(xhci, "WARN xhci_free_streams() called " "with non-streams endpoint\n"); @@ -3373,8 +3349,9 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, } xhci->num_active_eps -= num_dropped_eps; if (num_dropped_eps) - xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " - "%u now active.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Dropped %u ep ctxs, flags = 0x%x, " + "%u now active.", num_dropped_eps, drop_flags, xhci->num_active_eps); } @@ -3508,10 +3485,10 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) switch (ret) { case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ case COMP_CTX_STATE: /* 0.96 completion code for same thing */ - xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", + xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", slot_id, xhci_get_slot_state(xhci, virt_dev->out_ctx)); - xhci_info(xhci, "Not freeing device rings.\n"); + xhci_dbg(xhci, "Not freeing device rings.\n"); /* Don't treat this as an error. May change my mind later. */ ret = 0; goto command_cleanup; @@ -3584,6 +3561,16 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) u32 state; int i, ret; +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * We called pm_runtime_get_noresume when the device was attached. + * Decrement the counter here to allow controller to runtime suspend + * if no devices remain. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_put_noidle(hcd->self.controller); +#endif + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); /* If the host is halted due to driver unload, we still need to free the * device. @@ -3636,13 +3623,15 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) { if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { - xhci_dbg(xhci, "Not enough ep ctxs: " - "%u active, need to add 1, limit is %u.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Not enough ep ctxs: " + "%u active, need to add 1, limit is %u.", xhci->num_active_eps, xhci->limit_active_eps); return -ENOMEM; } xhci->num_active_eps += 1; - xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Adding 1 ep ctx, %u now active.", xhci->num_active_eps); return 0; } @@ -3707,6 +3696,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) goto disable_slot; } udev->slot_id = xhci->slot_id; + +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * If resetting upon resume, we can't put the controller into runtime + * suspend if there is a device attached. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_get_noresume(hcd->self.controller); +#endif + /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; @@ -3742,7 +3741,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) union xhci_trb *cmd_trb; if (!udev->slot_id) { - xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Bad Slot ID %d", udev->slot_id); return -EINVAL; } @@ -3781,6 +3781,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); + trace_xhci_address_ctx(xhci, virt_dev->in_ctx, + slot_ctx->dev_info >> 27); spin_lock_irqsave(&xhci->lock, flags); cmd_trb = xhci->cmd_ring->dequeue; @@ -3788,7 +3790,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) udev->slot_id); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "FIXME: allocate a command ring segment"); return ret; } xhci_ring_cmd_db(xhci); @@ -3828,13 +3831,15 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ret = -ENODEV; break; case COMP_SUCCESS: - xhci_dbg(xhci, "Successful Address Device command\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Successful Address Device command"); break; default: xhci_err(xhci, "ERROR: unexpected command completion " "code 0x%x.\n", virt_dev->cmd_status); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); + trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); ret = -EINVAL; break; } @@ -3842,16 +3847,21 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) return ret; } temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); - xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); - xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", - udev->slot_id, - &xhci->dcbaa->dev_context_ptrs[udev->slot_id], - (unsigned long long) - le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); - xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Op regs DCBAA ptr = %#016llx", temp_64); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Slot ID %d dcbaa entry @%p = %#016llx", + udev->slot_id, + &xhci->dcbaa->dev_context_ptrs[udev->slot_id], + (unsigned long long) + le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Output Context DMA address = %#08llx", (unsigned long long)virt_dev->out_ctx->dma); xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); + trace_xhci_address_ctx(xhci, virt_dev->in_ctx, + slot_ctx->dev_info >> 27); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); /* @@ -3859,6 +3869,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) * address given back to us by the HC. */ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + trace_xhci_address_ctx(xhci, virt_dev->out_ctx, + slot_ctx->dev_info >> 27); /* Use kernel assigned address for devices; store xHC assigned * address locally. */ virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) @@ -3867,7 +3879,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ctrl_ctx->add_flags = 0; ctrl_ctx->drop_flags = 0; - xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); + xhci_dbg_trace(xhci, trace_xhci_dbg_address, + "Internal device address = %d", virt_dev->address); return 0; } @@ -3898,7 +3911,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) * Issue an Evaluate Context command to change the Maximum Exit Latency in the * slot context. If that succeeds, store the new MEL in the xhci_virt_device. */ -static int xhci_change_max_exit_latency(struct xhci_hcd *xhci, +static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, struct usb_device *udev, u16 max_exit_latency) { struct xhci_virt_device *virt_dev; @@ -3933,7 +3946,8 @@ static int xhci_change_max_exit_latency(struct xhci_hcd *xhci, slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); - xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, + "Set up evaluate context for LPM MEL change."); xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, command->in_ctx, 0); @@ -4353,7 +4367,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, state_name, sel); else dev_dbg(&udev->dev, "Device-initiated %s disabled " - "due to long PEL %llu\n ms", + "due to long PEL %llu ms\n", state_name, pel); return USB3_LPM_DISABLED; } @@ -4837,10 +4851,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) struct xhci_hcd *xhci; struct device *dev = hcd->self.controller; int retval; - u32 temp; /* Accept arbitrarily long scatter-gather lists */ hcd->self.sg_tablesize = ~0; + + /* support to build packet from discontinuous buffers */ + hcd->self.no_sg_constraint = 1; + /* XHCI controllers don't stop the ep queue on short packets :| */ hcd->self.no_stop_on_short = 1; @@ -4865,14 +4882,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. */ - xhci = hcd_to_xhci(hcd); - temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); - if (HCC_64BIT_ADDR(temp)) { - xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); - dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); - } else { - dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); - } return 0; } @@ -4892,6 +4901,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) get_quirks(dev, xhci); + /* In xhci controllers which follow xhci 1.0 spec gives a spurious + * success event after a short transfer. This quirk will ignore such + * spurious event. + */ + if (xhci->hci_version > 0x96) + xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) @@ -4904,12 +4920,12 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) goto error; xhci_dbg(xhci, "Reset complete\n"); - temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); - if (HCC_64BIT_ADDR(temp)) { + /* Set dma_mask and coherent_dma_mask to 64-bits, + * if xHC supports 64-bit addressing */ + if (HCC_64BIT_ADDR(xhci->hcc_params) && + !dma_set_mask(dev, DMA_BIT_MASK(64))) { xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); - dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); - } else { - dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); } xhci_dbg(xhci, "Calling HCD init\n"); @@ -4934,12 +4950,12 @@ static int __init xhci_hcd_init(void) retval = xhci_register_pci(); if (retval < 0) { - printk(KERN_DEBUG "Problem registering PCI driver."); + pr_debug("Problem registering PCI driver.\n"); return retval; } retval = xhci_register_plat(); if (retval < 0) { - printk(KERN_DEBUG "Problem registering platform driver."); + pr_debug("Problem registering platform driver.\n"); goto unreg_pci; } /* diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index c338741a675..46aa1489414 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1490,11 +1490,6 @@ struct xhci_hcd { struct dma_pool *small_streams_pool; struct dma_pool *medium_streams_pool; -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING - /* Poll the rings - for debugging */ - struct timer_list event_ring_timer; - int zombie; -#endif /* Host controller watchdog timer structures */ unsigned int xhc_state; @@ -1542,6 +1537,7 @@ struct xhci_hcd { #define XHCI_SPURIOUS_REBOOT (1 << 13) #define XHCI_COMP_MODE_QUIRK (1 << 14) #define XHCI_AVOID_BEI (1 << 15) +#define XHCI_PLAT (1 << 16) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1579,16 +1575,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) return xhci->main_hcd; } -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING -#define XHCI_DEBUG 1 -#else -#define XHCI_DEBUG 0 -#endif - #define xhci_dbg(xhci, fmt, args...) \ - do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0) -#define xhci_info(xhci, fmt, args...) \ - do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0) + dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_err(xhci, fmt, args...) \ dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_warn(xhci, fmt, args...) \ @@ -1660,6 +1648,8 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, void xhci_dbg_ep_rings(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_virt_ep *ep); +void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), + const char *fmt, ...); /* xHCI memory management */ void xhci_mem_cleanup(struct xhci_hcd *xhci); |