diff options
Diffstat (limited to 'drivers/usb/host')
78 files changed, 9519 insertions, 938 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 9b43b226817..f865be2276d 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -90,14 +90,25 @@ config USB_EHCI_TT_NEWSCHED config USB_EHCI_BIG_ENDIAN_MMIO bool - depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX) + depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX) default y config USB_EHCI_BIG_ENDIAN_DESC bool - depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX) + depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX) default y +config XPS_USB_HCD_XILINX + bool "Use Xilinx usb host EHCI controller core" + depends on USB_EHCI_HCD && (PPC32 || MICROBLAZE) + select USB_EHCI_BIG_ENDIAN_DESC + select USB_EHCI_BIG_ENDIAN_MMIO + ---help--- + Xilinx xps USB host controller core is EHCI compilant and has + transaction translator built-in. It can be configured to either + support both high speed and full speed devices, or high speed + devices only. + config USB_EHCI_FSL bool "Support for Freescale on-chip EHCI USB controller" depends on USB_EHCI_HCD && FSL_SOC @@ -105,6 +116,13 @@ config USB_EHCI_FSL ---help--- Variation of ARC USB block used in some Freescale chips. +config USB_EHCI_MXC + bool "Support for Freescale on-chip EHCI USB controller" + depends on USB_EHCI_HCD && ARCH_MXC + select USB_EHCI_ROOT_HUB_TT + ---help--- + Variation of ARC USB block used in some Freescale chips. + config USB_EHCI_HCD_PPC_OF bool "EHCI support for PPC USB controller on OF platform bus" depends on USB_EHCI_HCD && PPC_OF @@ -189,6 +207,21 @@ config USB_OHCI_HCD To compile this driver as a module, choose M here: the module will be called ohci-hcd. +config USB_OHCI_HCD_OMAP1 + bool "OHCI support for OMAP1/2 chips" + depends on USB_OHCI_HCD && (ARCH_OMAP1 || ARCH_OMAP2) + default y + ---help--- + Enables support for the OHCI controller on OMAP1/2 chips. + +config USB_OHCI_HCD_OMAP3 + bool "OHCI support for OMAP3 and later chips" + depends on USB_OHCI_HCD && (ARCH_OMAP3 || ARCH_OMAP4) + default y + ---help--- + Enables support for the on-chip OHCI controller on + OMAP3 and later chips. + config USB_OHCI_HCD_PPC_SOC bool "OHCI support for on-chip PPC USB controller" depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx) @@ -381,3 +414,14 @@ config USB_HWA_HCD To compile this driver a module, choose M here: the module will be called "hwa-hc". + +config USB_IMX21_HCD + tristate "iMX21 HCD support" + depends on USB && ARM && MACH_MX21 + help + This driver enables support for the on-chip USB host in the + iMX21 processor. + + To compile this driver as a module, choose M here: the + module will be called "imx21-hcd". + diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index f58b2494c44..b6315aa47f7 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -12,7 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \ ifeq ($(CONFIG_FHCI_DEBUG),y) fhci-objs += fhci-dbg.o endif -xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o +xhci-hcd-objs := xhci.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o obj-$(CONFIG_USB_WHCI_HCD) += whci/ @@ -25,10 +25,12 @@ obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o obj-$(CONFIG_USB_FHCI_HCD) += fhci.o -obj-$(CONFIG_USB_XHCI_HCD) += xhci.o +obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o +obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o + diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 87c1b7c34c0..51bd0edf544 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c @@ -149,7 +149,7 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev) goto fail_request_resource; } hcd->rsrc_start = res->start; - hcd->rsrc_len = res->end - res->start + 1; + hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index ed77be76d6b..faa61748db7 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c @@ -69,6 +69,15 @@ static void au1xxx_stop_ehc(void) au_sync(); } +static int au1xxx_ehci_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int ret = ehci_init(hcd); + + ehci->need_io_watchdog = 0; + return ret; +} + static const struct hc_driver ehci_au1xxx_hc_driver = { .description = hcd_name, .product_desc = "Au1xxx EHCI", @@ -86,7 +95,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = { * FIXME -- ehci_init() doesn't do enough here. * See ehci-ppc-soc for a complete implementation. */ - .reset = ehci_init, + .reset = au1xxx_ehci_setup, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, @@ -121,6 +130,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct ehci_hcd *ehci; + struct resource *res; int ret; if (usb_disabled()) @@ -144,8 +154,9 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) if (!hcd) return -ENOMEM; - hcd->rsrc_start = pdev->resource[0].start; - hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed"); @@ -213,26 +224,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev) msleep(10); /* Root hub was already suspended. Disable irq emission and - * mark HW unaccessible, bail out if RH has been resumed. Use - * the spinlock to properly synchronize with possible pending - * RH suspend or resume activity. - * - * This is still racy as hcd->state is manipulated outside of - * any locks =P But that will be a different fix. + * mark HW unaccessible. The PM and USB cores make sure that + * the root hub is either suspended or stopped. */ spin_lock_irqsave(&ehci->lock, flags); - if (hcd->state != HC_STATE_SUSPENDED) { - rc = -EINVAL; - goto bail; - } + ehci_prepare_ports_for_controller_suspend(ehci); ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); au1xxx_stop_ehc(); - -bail: spin_unlock_irqrestore(&ehci->lock, flags); // could save FLADJ in case of Vaux power loss @@ -262,6 +264,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev) if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) { int mask = INTR_MASK; + ehci_prepare_ports_for_controller_resume(ehci); if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); @@ -297,7 +300,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ehci_pmops = { +static const struct dev_pm_ops au1xxx_ehci_pmops = { .suspend = ehci_hcd_au1xxx_drv_suspend, .resume = ehci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 991174937db..5cd967d2893 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2005 MontaVista Software + * Copyright 2005-2009 MontaVista Software, Inc. + * Copyright 2008 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -17,17 +18,20 @@ * * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided * by Hunter Wu. + * Power Management support by Dave Liu <daveliu@freescale.com>, + * Jerry Huang <Chang-Ming.Huang@freescale.com> and + * Anton Vorontsov <avorontsov@ru.mvista.com>. */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/pm.h> #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include "ehci-fsl.h" -/* FIXME: Power Management is un-ported so temporarily disable it */ -#undef CONFIG_PM - - /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ @@ -40,8 +44,8 @@ * Allocates basic resources for this USB host controller. * */ -int usb_hcd_fsl_probe(const struct hc_driver *driver, - struct platform_device *pdev) +static int usb_hcd_fsl_probe(const struct hc_driver *driver, + struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata; struct usb_hcd *hcd; @@ -147,7 +151,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver, * Reverses the effect of usb_hcd_fsl_probe(). * */ -void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev) +static void usb_hcd_fsl_remove(struct usb_hcd *hcd, + struct platform_device *pdev) { usb_remove_hcd(hcd); iounmap(hcd->regs); @@ -284,10 +289,83 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) return retval; } +struct ehci_fsl { + struct ehci_hcd ehci; + +#ifdef CONFIG_PM + /* Saved USB PHY settings, need to restore after deep sleep. */ + u32 usb_ctrl; +#endif +}; + +#ifdef CONFIG_PM + +static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + return container_of(ehci, struct ehci_fsl, ehci); +} + +static int ehci_fsl_drv_suspend(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + void __iomem *non_ehci = hcd->regs; + + ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd)); + if (!fsl_deep_sleep()) + return 0; + + ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL); + return 0; +} + +static int ehci_fsl_drv_resume(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + void __iomem *non_ehci = hcd->regs; + + ehci_prepare_ports_for_controller_resume(ehci); + if (!fsl_deep_sleep()) + return 0; + + usb_root_hub_lost_power(hcd->self.root_hub); + + /* Restore USB PHY settings and enable the controller. */ + out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl); + + ehci_reset(ehci); + ehci_fsl_reinit(ehci); + + return 0; +} + +static int ehci_fsl_drv_restore(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + + usb_root_hub_lost_power(hcd->self.root_hub); + return 0; +} + +static struct dev_pm_ops ehci_fsl_pm_ops = { + .suspend = ehci_fsl_drv_suspend, + .resume = ehci_fsl_drv_resume, + .restore = ehci_fsl_drv_restore, +}; + +#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops) +#else +#define EHCI_FSL_PM_OPS NULL +#endif /* CONFIG_PM */ + static const struct hc_driver ehci_fsl_hc_driver = { .description = hcd_name, .product_desc = "Freescale On-Chip EHCI Host Controller", - .hcd_priv_size = sizeof(struct ehci_hcd), + .hcd_priv_size = sizeof(struct ehci_fsl), /* * generic hardware linkage @@ -354,6 +432,7 @@ static struct platform_driver ehci_fsl_driver = { .remove = ehci_fsl_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { - .name = "fsl-ehci", + .name = "fsl-ehci", + .pm = EHCI_FSL_PM_OPS, }, }; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 9835e071394..a3ef2a9d9dc 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -23,19 +23,19 @@ #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> +#include <linux/ktime.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> - -#include "../core/hcd.h" +#include <linux/slab.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -209,7 +209,7 @@ static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, if (error) { ehci_halt(ehci); ehci_to_hcd(ehci)->state = HC_STATE_HALT; - ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n", + ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n", ptr, mask, done, error); } @@ -542,13 +542,14 @@ static int ehci_init(struct usb_hcd *hcd) */ ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->cached_itd_list); + INIT_LIST_HEAD(&ehci->cached_sitd_list); if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) return retval; /* controllers may cache some of the periodic schedule ... */ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_ISOC_CACHE(hcc_params)) // full frame cache - ehci->i_thresh = 8; + ehci->i_thresh = 2 + 8; else // N microframes cached ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); @@ -604,6 +605,8 @@ static int ehci_init(struct usb_hcd *hcd) } ehci->command = temp; + /* Accept arbitrarily long scatter-gather lists */ + hcd->self.sg_tablesize = ~0; return 0; } @@ -676,6 +679,7 @@ static int ehci_run (struct usb_hcd *hcd) ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ msleep(5); up_write(&ehci_cf_port_reset_rwsem); + ehci->last_periodic_enable = ktime_get_real(); temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); ehci_info (ehci, @@ -783,9 +787,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) /* start 20 msec resume signaling from this port, * and make khubd collect PORT_STAT_C_SUSPEND to - * stop that signaling. + * stop that signaling. Use 5 ms extra for safety, + * like usb_port_resume() does. */ - ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); + ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); mod_timer(&hcd->rh_timer, ehci->reset_done[i]); } @@ -990,7 +995,7 @@ rescan: /* endpoints can be iso streams. for now, we don't * accelerate iso completions ... so spin a while. */ - if (qh->hw->hw_info1 == 0) { + if (qh->hw == NULL) { ehci_vdbg (ehci, "iso delay\n"); goto idle_timeout; } @@ -1103,11 +1108,21 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ehci_fsl_driver #endif +#ifdef CONFIG_USB_EHCI_MXC +#include "ehci-mxc.c" +#define PLATFORM_DRIVER ehci_mxc_driver +#endif + #ifdef CONFIG_SOC_AU1200 #include "ehci-au1xxx.c" #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver #endif +#ifdef CONFIG_ARCH_OMAP3 +#include "ehci-omap.c" +#define PLATFORM_DRIVER ehci_hcd_omap_driver +#endif + #ifdef CONFIG_PPC_PS3 #include "ehci-ps3.c" #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver @@ -1118,6 +1133,11 @@ MODULE_LICENSE ("GPL"); #define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver #endif +#ifdef CONFIG_XPS_USB_HCD_XILINX +#include "ehci-xilinx-of.c" +#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver +#endif + #ifdef CONFIG_PLAT_ORION #include "ehci-orion.c" #define PLATFORM_DRIVER ehci_orion_driver @@ -1139,7 +1159,8 @@ MODULE_LICENSE ("GPL"); #endif #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ - !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) + !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \ + !defined(XILINX_OF_PLATFORM_DRIVER) #error "missing bus glue for ehci-hcd" #endif @@ -1193,10 +1214,20 @@ static int __init ehci_hcd_init(void) if (retval < 0) goto clean3; #endif + +#ifdef XILINX_OF_PLATFORM_DRIVER + retval = of_register_platform_driver(&XILINX_OF_PLATFORM_DRIVER); + if (retval < 0) + goto clean4; +#endif return retval; +#ifdef XILINX_OF_PLATFORM_DRIVER + /* of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); */ +clean4: +#endif #ifdef OF_PLATFORM_DRIVER - /* of_unregister_platform_driver(&OF_PLATFORM_DRIVER); */ + of_unregister_platform_driver(&OF_PLATFORM_DRIVER); clean3: #endif #ifdef PS3_SYSTEM_BUS_DRIVER @@ -1223,6 +1254,9 @@ module_init(ehci_hcd_init); static void __exit ehci_hcd_cleanup(void) { +#ifdef XILINX_OF_PLATFORM_DRIVER + of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); +#endif #ifdef OF_PLATFORM_DRIVER of_unregister_platform_driver(&OF_PLATFORM_DRIVER); #endif diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 1b6f1c0e5ce..e7d3d8def28 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) ehci->owned_ports = 0; } +static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, + bool suspending) +{ + int port; + u32 temp; + + /* If remote wakeup is enabled for the root hub but disabled + * for the controller, we must adjust all the port wakeup flags + * when the controller is suspended or resumed. In all other + * cases they don't need to be changed. + */ + if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || + device_may_wakeup(ehci_to_hcd(ehci)->self.controller)) + return; + + /* clear phy low-power mode before changing wakeup flags */ + if (ehci->has_hostpc) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg; + + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + + HOSTPC0 + 4 * port); + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); + } + msleep(5); + } + + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *reg = &ehci->regs->port_status[port]; + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + u32 t2 = t1 & ~PORT_WAKE_BITS; + + /* If we are suspending the controller, clear the flags. + * If we are resuming the controller, set the wakeup flags. + */ + if (!suspending) { + if (t1 & PORT_CONNECT) + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + else + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + } + ehci_vdbg(ehci, "port %d, %08x -> %08x\n", + port + 1, t1, t2); + ehci_writel(ehci, t2, reg); + } + + /* enter phy low-power mode again */ + if (ehci->has_hostpc) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg; + + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + + HOSTPC0 + 4 * port); + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg); + } + } +} + static int ehci_bus_suspend (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); int port; int mask; - u32 __iomem *hostpc_reg = NULL; + int changed; ehci_dbg(ehci, "suspend root hub\n"); @@ -120,9 +183,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) del_timer_sync(&ehci->watchdog); del_timer_sync(&ehci->iaa_watchdog); - port = HCS_N_PORTS (ehci->hcs_params); spin_lock_irq (&ehci->lock); + /* Once the controller is stopped, port resumes that are already + * in progress won't complete. Hence if remote wakeup is enabled + * for the root hub and any ports are in the middle of a resume or + * remote wakeup, we must fail the suspend. + */ + if (hcd->self.root_hub->do_remote_wakeup) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (ehci->reset_done[port] != 0) { + spin_unlock_irq(&ehci->lock); + ehci_dbg(ehci, "suspend failed because " + "port %d is resuming\n", + port + 1); + return -EBUSY; + } + } + } + /* stop schedules, clean any completed work */ if (HC_IS_RUNNING(hcd->state)) { ehci_quiesce (ehci); @@ -138,14 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) */ ehci->bus_suspended = 0; ehci->owned_ports = 0; + changed = 0; + port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *reg = &ehci->regs->port_status [port]; u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; - u32 t2 = t1; + u32 t2 = t1 & ~PORT_WAKE_BITS; - if (ehci->has_hostpc) - hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs - + HOSTPC0 + 4 * (port & 0xff)); /* keep track of which ports we suspend */ if (t1 & PORT_OWNER) set_bit(port, &ehci->owned_ports); @@ -154,38 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) set_bit(port, &ehci->bus_suspended); } - /* enable remote wakeup on all ports */ + /* enable remote wakeup on all ports, if told to do so */ if (hcd->self.root_hub->do_remote_wakeup) { /* only enable appropriate wake bits, otherwise the * hardware can not go phy low power mode. If a race * condition happens here(connection change during bits * set), the port change detection will finally fix it. */ - if (t1 & PORT_CONNECT) { + if (t1 & PORT_CONNECT) t2 |= PORT_WKOC_E | PORT_WKDISC_E; - t2 &= ~PORT_WKCONN_E; - } else { + else t2 |= PORT_WKOC_E | PORT_WKCONN_E; - t2 &= ~PORT_WKDISC_E; - } - } else - t2 &= ~PORT_WAKE_BITS; + } if (t1 != t2) { ehci_vdbg (ehci, "port %d, %08x -> %08x\n", port + 1, t1, t2); ehci_writel(ehci, t2, reg); - if (hostpc_reg) { - u32 t3; + changed = 1; + } + } - msleep(5);/* 5ms for HCD enter low pwr mode */ - t3 = ehci_readl(ehci, hostpc_reg); - ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); - t3 = ehci_readl(ehci, hostpc_reg); - ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", + if (changed && ehci->has_hostpc) { + spin_unlock_irq(&ehci->lock); + msleep(5); /* 5 ms for HCD to enter low-power mode */ + spin_lock_irq(&ehci->lock); + + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg; + u32 t3; + + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + + HOSTPC0 + 4 * port); + t3 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); + t3 = ehci_readl(ehci, hostpc_reg); + ehci_dbg(ehci, "Port %d phy low-power mode %s\n", port, (t3 & HOSTPC_PHCD) ? "succeeded" : "failed"); - } } } @@ -236,7 +322,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd) } if (unlikely(ehci->debug)) { - if (ehci->debug && !dbgp_reset_prep()) + if (!dbgp_reset_prep()) ehci->debug = NULL; else dbgp_external_startup(); @@ -271,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd) msleep(8); spin_lock_irq(&ehci->lock); + /* clear phy low-power mode before resume */ + if (ehci->bus_suspended && ehci->has_hostpc) { + i = HCS_N_PORTS(ehci->hcs_params); + while (i--) { + if (test_bit(i, &ehci->bus_suspended)) { + u32 __iomem *hostpc_reg; + + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + + HOSTPC0 + 4 * i); + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp & ~HOSTPC_PHCD, + hostpc_reg); + } + } + spin_unlock_irq(&ehci->lock); + msleep(5); + spin_lock_irq(&ehci->lock); + } + /* manually resume the ports we suspended during bus_suspend() */ i = HCS_N_PORTS (ehci->hcs_params); while (i--) { @@ -639,7 +744,7 @@ static int ehci_hub_control ( * Even if OWNER is set, so the port is owned by the * companion controller, khubd needs to be able to clear * the port-change status bits (especially - * USB_PORT_FEAT_C_CONNECTION). + * USB_PORT_STAT_C_CONNECTION). */ switch (wValue) { @@ -655,16 +760,25 @@ static int ehci_hub_control ( goto error; if (ehci->no_selective_suspend) break; - if (temp & PORT_SUSPEND) { - if ((temp & PORT_PE) == 0) - goto error; - /* resume signaling for 20 msec */ - temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); - ehci_writel(ehci, temp | PORT_RESUME, - status_reg); - ehci->reset_done [wIndex] = jiffies - + msecs_to_jiffies (20); + if (!(temp & PORT_SUSPEND)) + break; + if ((temp & PORT_PE) == 0) + goto error; + + /* clear phy low-power mode before resume */ + if (hostpc_reg) { + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, + hostpc_reg); + spin_unlock_irqrestore(&ehci->lock, flags); + msleep(5);/* wait to leave low-power mode */ + spin_lock_irqsave(&ehci->lock, flags); } + /* resume signaling for 20 msec */ + temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); + ehci_writel(ehci, temp | PORT_RESUME, status_reg); + ehci->reset_done[wIndex] = jiffies + + msecs_to_jiffies(20); break; case USB_PORT_FEAT_C_SUSPEND: clear_bit(wIndex, &ehci->port_c_suspend); @@ -709,12 +823,12 @@ static int ehci_hub_control ( // wPortChange bits if (temp & PORT_CSC) - status |= 1 << USB_PORT_FEAT_C_CONNECTION; + status |= USB_PORT_STAT_C_CONNECTION << 16; if (temp & PORT_PEC) - status |= 1 << USB_PORT_FEAT_C_ENABLE; + status |= USB_PORT_STAT_C_ENABLE << 16; if ((temp & PORT_OCC) && !ignore_oc){ - status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; + status |= USB_PORT_STAT_C_OVERCURRENT << 16; /* * Hubs should disable port power on over-current. @@ -771,7 +885,7 @@ static int ehci_hub_control ( if ((temp & PORT_RESET) && time_after_eq(jiffies, ehci->reset_done[wIndex])) { - status |= 1 << USB_PORT_FEAT_C_RESET; + status |= USB_PORT_STAT_C_RESET << 16; ehci->reset_done [wIndex] = 0; /* force reset to complete */ @@ -781,7 +895,7 @@ static int ehci_hub_control ( * this bit; seems too long to spin routinely... */ retval = handshake(ehci, status_reg, - PORT_RESET, 0, 750); + PORT_RESET, 0, 1000); if (retval != 0) { ehci_err (ehci, "port %d reset error %d\n", wIndex + 1, retval); @@ -813,7 +927,7 @@ static int ehci_hub_control ( */ if (temp & PORT_CONNECT) { - status |= 1 << USB_PORT_FEAT_CONNECTION; + status |= USB_PORT_STAT_CONNECTION; // status may be from integrated TT if (ehci->has_hostpc) { temp1 = ehci_readl(ehci, hostpc_reg); @@ -822,11 +936,11 @@ static int ehci_hub_control ( status |= ehci_port_speed(ehci, temp); } if (temp & PORT_PE) - status |= 1 << USB_PORT_FEAT_ENABLE; + status |= USB_PORT_STAT_ENABLE; /* maybe the port was unsuspended without our knowledge */ if (temp & (PORT_SUSPEND|PORT_RESUME)) { - status |= 1 << USB_PORT_FEAT_SUSPEND; + status |= USB_PORT_STAT_SUSPEND; } else if (test_bit(wIndex, &ehci->suspended_ports)) { clear_bit(wIndex, &ehci->suspended_ports); ehci->reset_done[wIndex] = 0; @@ -835,13 +949,13 @@ static int ehci_hub_control ( } if (temp & PORT_OC) - status |= 1 << USB_PORT_FEAT_OVER_CURRENT; + status |= USB_PORT_STAT_OVERCURRENT; if (temp & PORT_RESET) - status |= 1 << USB_PORT_FEAT_RESET; + status |= USB_PORT_STAT_RESET; if (temp & PORT_POWER) - status |= 1 << USB_PORT_FEAT_POWER; + status |= USB_PORT_STAT_POWER; if (test_bit(wIndex, &ehci->port_c_suspend)) - status |= 1 << USB_PORT_FEAT_C_SUSPEND; + status |= USB_PORT_STAT_C_SUSPEND << 16; #ifndef VERBOSE_DEBUG if (status & ~0xffff) /* only if wPortChange is interesting */ @@ -886,17 +1000,18 @@ static int ehci_hub_control ( if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) goto error; - ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); + /* After above check the port must be connected. * Set appropriate bit thus could put phy into low power * mode if we have hostpc feature */ + temp &= ~PORT_WKCONN_E; + temp |= PORT_WKDISC_E | PORT_WKOC_E; + ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); if (hostpc_reg) { - temp &= ~PORT_WKCONN_E; - temp |= (PORT_WKDISC_E | PORT_WKOC_E); - ehci_writel(ehci, temp | PORT_SUSPEND, - status_reg); + spin_unlock_irqrestore(&ehci->lock, flags); msleep(5);/* 5ms for HCD enter low pwr mode */ + spin_lock_irqsave(&ehci->lock, flags); temp1 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp1 | HOSTPC_PHCD, hostpc_reg); diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index aeda96e0af6..1f3f01eacaf 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh) static void ehci_mem_cleanup (struct ehci_hcd *ehci) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); if (ehci->async) qh_put (ehci->async); ehci->async = NULL; diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c new file mode 100644 index 00000000000..a8ad8ac120a --- /dev/null +++ b/drivers/usb/host/ehci-mxc.c @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix + * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/usb/otg.h> +#include <linux/slab.h> + +#include <mach/mxc_ehci.h> + +#define ULPI_VIEWPORT_OFFSET 0x170 +#define PORTSC_OFFSET 0x184 +#define USBMODE_OFFSET 0x1a8 +#define USBMODE_CM_HOST 3 + +struct ehci_mxc_priv { + struct clk *usbclk, *ahbclk; + struct usb_hcd *hcd; +}; + +/* called during probe() after chip reset completes */ +static int ehci_mxc_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + /* EHCI registers start at offset 0x100 */ + ehci->caps = hcd->regs + 0x100; + ehci->regs = hcd->regs + 0x100 + + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); + dbg_hcs_params(ehci, "reset"); + dbg_hcc_params(ehci, "reset"); + + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + + retval = ehci_halt(ehci); + if (retval) + return retval; + + /* data structure init */ + retval = ehci_init(hcd); + if (retval) + return retval; + + hcd->has_tt = 1; + + ehci->sbrn = 0x20; + + ehci_reset(ehci); + + ehci_port_power(ehci, 0); + return 0; +} + +static const struct hc_driver ehci_mxc_hc_driver = { + .description = hcd_name, + .product_desc = "Freescale On-Chip EHCI Host Controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_USB2 | HCD_MEMORY, + + /* + * basic lifecycle operations + */ + .reset = ehci_mxc_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + .relinquish_port = ehci_relinquish_port, + .port_handed_over = ehci_port_handed_over, +}; + +static int ehci_mxc_drv_probe(struct platform_device *pdev) +{ + struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; + struct usb_hcd *hcd; + struct resource *res; + int irq, ret, temp; + struct ehci_mxc_priv *priv; + struct device *dev = &pdev->dev; + + dev_info(&pdev->dev, "initializing i.MX USB Controller\n"); + + if (!pdata) { + dev_err(dev, "No platform data given, bailing out.\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + + hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); + if (!hcd) + return -ENOMEM; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_alloc; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Found HC with no register addr. Check setup!\n"); + ret = -ENODEV; + goto err_get_resource; + } + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + dev_dbg(dev, "controller already in use\n"); + ret = -EBUSY; + goto err_request_mem; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(dev, "error mapping memory\n"); + ret = -EFAULT; + goto err_ioremap; + } + + /* call platform specific init function */ + if (pdata->init) { + ret = pdata->init(pdev); + if (ret) { + dev_err(dev, "platform init failed\n"); + goto err_init; + } + /* platforms need some time to settle changed IO settings */ + mdelay(10); + } + + /* enable clocks */ + priv->usbclk = clk_get(dev, "usb"); + if (IS_ERR(priv->usbclk)) { + ret = PTR_ERR(priv->usbclk); + goto err_clk; + } + clk_enable(priv->usbclk); + + if (!cpu_is_mx35() && !cpu_is_mx25()) { + priv->ahbclk = clk_get(dev, "usb_ahb"); + if (IS_ERR(priv->ahbclk)) { + ret = PTR_ERR(priv->ahbclk); + goto err_clk_ahb; + } + clk_enable(priv->ahbclk); + } + + /* set USBMODE to host mode */ + temp = readl(hcd->regs + USBMODE_OFFSET); + writel(temp | USBMODE_CM_HOST, hcd->regs + USBMODE_OFFSET); + + /* set up the PORTSCx register */ + writel(pdata->portsc, hcd->regs + PORTSC_OFFSET); + mdelay(10); + + /* setup specific usb hw */ + ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); + if (ret < 0) + goto err_init; + + /* Initialize the transceiver */ + if (pdata->otg) { + pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; + ret = otg_init(pdata->otg); + if (ret) { + dev_err(dev, "unable to init transceiver, probably missing\n"); + ret = -ENODEV; + goto err_add; + } + ret = otg_set_vbus(pdata->otg, 1); + if (ret) { + dev_err(dev, "unable to enable vbus on transceiver\n"); + goto err_add; + } + } + + priv->hcd = hcd; + platform_set_drvdata(pdev, priv); + + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); + if (ret) + goto err_add; + + return 0; + +err_add: + if (pdata && pdata->exit) + pdata->exit(pdev); +err_init: + if (priv->ahbclk) { + clk_disable(priv->ahbclk); + clk_put(priv->ahbclk); + } +err_clk_ahb: + clk_disable(priv->usbclk); + clk_put(priv->usbclk); +err_clk: + iounmap(hcd->regs); +err_ioremap: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +err_request_mem: +err_get_resource: + kfree(priv); +err_alloc: + usb_put_hcd(hcd); + return ret; +} + +static int __exit ehci_mxc_drv_remove(struct platform_device *pdev) +{ + struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; + struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); + struct usb_hcd *hcd = priv->hcd; + + if (pdata && pdata->exit) + pdata->exit(pdev); + + if (pdata->otg) + otg_shutdown(pdata->otg); + + usb_remove_hcd(hcd); + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + usb_put_hcd(hcd); + platform_set_drvdata(pdev, NULL); + + clk_disable(priv->usbclk); + clk_put(priv->usbclk); + if (priv->ahbclk) { + clk_disable(priv->ahbclk); + clk_put(priv->ahbclk); + } + + kfree(priv); + + return 0; +} + +static void ehci_mxc_drv_shutdown(struct platform_device *pdev) +{ + struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); + struct usb_hcd *hcd = priv->hcd; + + if (hcd->driver->shutdown) + hcd->driver->shutdown(hcd); +} + +MODULE_ALIAS("platform:mxc-ehci"); + +static struct platform_driver ehci_mxc_driver = { + .probe = ehci_mxc_drv_probe, + .remove = __exit_p(ehci_mxc_drv_remove), + .shutdown = ehci_mxc_drv_shutdown, + .driver = { + .name = "mxc-ehci", + }, +}; diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c new file mode 100644 index 00000000000..5450e628157 --- /dev/null +++ b/drivers/usb/host/ehci-omap.c @@ -0,0 +1,811 @@ +/* + * ehci-omap.c - driver for USBHOST on OMAP 34xx processor + * + * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller + * Tested on OMAP3430 ES2.0 SDP + * + * Copyright (C) 2007-2008 Texas Instruments, Inc. + * Author: Vikram Pandita <vikram.pandita@ti.com> + * + * Copyright (C) 2009 Nokia Corporation + * Contact: Felipe Balbi <felipe.balbi@nokia.com> + * + * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * TODO (last updated Feb 12, 2010): + * - add kernel-doc + * - enable AUTOIDLE + * - add suspend/resume + * - move workarounds to board-files + */ + +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/gpio.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <plat/usb.h> + +/* + * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES + * Use ehci_omap_readl()/ehci_omap_writel() functions + */ + +/* TLL Register Set */ +#define OMAP_USBTLL_REVISION (0x00) +#define OMAP_USBTLL_SYSCONFIG (0x10) +#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8) +#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3) +#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2) +#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1) +#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0) + +#define OMAP_USBTLL_SYSSTATUS (0x14) +#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0) + +#define OMAP_USBTLL_IRQSTATUS (0x18) +#define OMAP_USBTLL_IRQENABLE (0x1C) + +#define OMAP_TLL_SHARED_CONF (0x30) +#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6) +#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5) +#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2) +#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1) +#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0) + +#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num) +#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11) +#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10) +#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9) +#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8) +#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0) + +#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num) +#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num) +#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num) +#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num) +#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num) +#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num) +#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num) +#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num) +#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num) + +#define OMAP_TLL_CHANNEL_COUNT 3 +#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 1) +#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 2) +#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 4) + +/* UHH Register Set */ +#define OMAP_UHH_REVISION (0x00) +#define OMAP_UHH_SYSCONFIG (0x10) +#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12) +#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8) +#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3) +#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2) +#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1) +#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0) + +#define OMAP_UHH_SYSSTATUS (0x14) +#define OMAP_UHH_HOSTCONFIG (0x40) +#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0) +#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0) +#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11) +#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12) +#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2) +#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3) +#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4) +#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5) +#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8) +#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9) +#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10) + +#define OMAP_UHH_DEBUG_CSR (0x44) + +/* EHCI Register Set */ +#define EHCI_INSNREG04 (0xA0) +#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5) +#define EHCI_INSNREG05_ULPI (0xA4) +#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31 +#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24 +#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22 +#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16 +#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 +#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 + +/*-------------------------------------------------------------------------*/ + +static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val) +{ + __raw_writel(val, base + reg); +} + +static inline u32 ehci_omap_readl(void __iomem *base, u32 reg) +{ + return __raw_readl(base + reg); +} + +static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val) +{ + __raw_writeb(val, base + reg); +} + +static inline u8 ehci_omap_readb(void __iomem *base, u8 reg) +{ + return __raw_readb(base + reg); +} + +/*-------------------------------------------------------------------------*/ + +struct ehci_hcd_omap { + struct ehci_hcd *ehci; + struct device *dev; + + struct clk *usbhost_ick; + struct clk *usbhost2_120m_fck; + struct clk *usbhost1_48m_fck; + struct clk *usbtll_fck; + struct clk *usbtll_ick; + + /* FIXME the following two workarounds are + * board specific not silicon-specific so these + * should be moved to board-file instead. + * + * Maybe someone from TI will know better which + * board is affected and needs the workarounds + * to be applied + */ + + /* gpio for resetting phy */ + int reset_gpio_port[OMAP3_HS_USB_PORTS]; + + /* phy reset workaround */ + int phy_reset; + + /* desired phy_mode: TLL, PHY */ + enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS]; + + void __iomem *uhh_base; + void __iomem *tll_base; + void __iomem *ehci_base; + + /* Regulators for USB PHYs. + * Each PHY can have a separate regulator. + */ + struct regulator *regulator[OMAP3_HS_USB_PORTS]; +}; + +/*-------------------------------------------------------------------------*/ + +static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask) +{ + unsigned reg; + int i; + + /* Program the 3 TLL channels upfront */ + for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) { + reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); + + /* Disable AutoIdle, BitStuffing and use SDR Mode */ + reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE + | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF + | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); + ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg); + } + + /* Program Common TLL register */ + reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF); + reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON + | OMAP_TLL_SHARED_CONF_USB_DIVRATION + | OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN); + reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN; + + ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg); + + /* Enable channels now */ + for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) { + reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); + + /* Enable only the reg that is needed */ + if (!(tll_channel_mask & 1<<i)) + continue; + + reg |= OMAP_TLL_CHANNEL_CONF_CHANEN; + ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg); + + ehci_omap_writeb(omap->tll_base, + OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe); + dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n", + i+1, ehci_omap_readb(omap->tll_base, + OMAP_TLL_ULPI_SCRATCH_REGISTER(i))); + } +} + +/*-------------------------------------------------------------------------*/ + +/* omap_start_ehc + * - Start the TI USBHOST controller + */ +static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + u8 tll_ch_mask = 0; + unsigned reg = 0; + int ret = 0; + + dev_dbg(omap->dev, "starting TI EHCI USB Controller\n"); + + /* Enable Clocks for USBHOST */ + omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick"); + if (IS_ERR(omap->usbhost_ick)) { + ret = PTR_ERR(omap->usbhost_ick); + goto err_host_ick; + } + clk_enable(omap->usbhost_ick); + + omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck"); + if (IS_ERR(omap->usbhost2_120m_fck)) { + ret = PTR_ERR(omap->usbhost2_120m_fck); + goto err_host_120m_fck; + } + clk_enable(omap->usbhost2_120m_fck); + + omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck"); + if (IS_ERR(omap->usbhost1_48m_fck)) { + ret = PTR_ERR(omap->usbhost1_48m_fck); + goto err_host_48m_fck; + } + clk_enable(omap->usbhost1_48m_fck); + + if (omap->phy_reset) { + /* Refer: ISSUE1 */ + if (gpio_is_valid(omap->reset_gpio_port[0])) { + gpio_request(omap->reset_gpio_port[0], + "USB1 PHY reset"); + gpio_direction_output(omap->reset_gpio_port[0], 0); + } + + if (gpio_is_valid(omap->reset_gpio_port[1])) { + gpio_request(omap->reset_gpio_port[1], + "USB2 PHY reset"); + gpio_direction_output(omap->reset_gpio_port[1], 0); + } + + /* Hold the PHY in RESET for enough time till DIR is high */ + udelay(10); + } + + /* Configure TLL for 60Mhz clk for ULPI */ + omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck"); + if (IS_ERR(omap->usbtll_fck)) { + ret = PTR_ERR(omap->usbtll_fck); + goto err_tll_fck; + } + clk_enable(omap->usbtll_fck); + + omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick"); + if (IS_ERR(omap->usbtll_ick)) { + ret = PTR_ERR(omap->usbtll_ick); + goto err_tll_ick; + } + clk_enable(omap->usbtll_ick); + + /* perform TLL soft reset, and wait until reset is complete */ + ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, + OMAP_USBTLL_SYSCONFIG_SOFTRESET); + + /* Wait for TLL reset to complete */ + while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) + & OMAP_USBTLL_SYSSTATUS_RESETDONE)) { + cpu_relax(); + + if (time_after(jiffies, timeout)) { + dev_dbg(omap->dev, "operation timed out\n"); + ret = -EINVAL; + goto err_sys_status; + } + } + + dev_dbg(omap->dev, "TLL RESET DONE\n"); + + /* (1<<3) = no idle mode only for initial debugging */ + ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, + OMAP_USBTLL_SYSCONFIG_ENAWAKEUP | + OMAP_USBTLL_SYSCONFIG_SIDLEMODE | + OMAP_USBTLL_SYSCONFIG_CACTIVITY); + + + /* Put UHH in NoIdle/NoStandby mode */ + reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG); + reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP + | OMAP_UHH_SYSCONFIG_SIDLEMODE + | OMAP_UHH_SYSCONFIG_CACTIVITY + | OMAP_UHH_SYSCONFIG_MIDLEMODE); + reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE; + + ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg); + + reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG); + + /* setup ULPI bypass and burst configurations */ + reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN + | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN + | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN); + reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN; + + if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN) + reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; + if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN) + reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; + if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN) + reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; + + /* Bypass the TLL module for PHY mode operation */ + if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) { + dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n"); + if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) || + (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) || + (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + else + reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + } else { + dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n"); + if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; + else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; + + if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; + else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; + + if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; + else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL) + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; + + } + ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg); + dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg); + + + /* + * An undocumented "feature" in the OMAP3 EHCI controller, + * causes suspended ports to be taken out of suspend when + * the USBCMD.Run/Stop bit is cleared (for example when + * we do ehci_bus_suspend). + * This breaks suspend-resume if the root-hub is allowed + * to suspend. Writing 1 to this undocumented register bit + * disables this feature and restores normal behavior. + */ + ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04, + EHCI_INSNREG04_DISABLE_UNSUSPEND); + + if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) || + (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) || + (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) { + + if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) + tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK; + if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) + tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK; + if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL) + tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK; + + /* Enable UTMI mode for required TLL channels */ + omap_usb_utmi_init(omap, tll_ch_mask); + } + + if (omap->phy_reset) { + /* Refer ISSUE1: + * Hold the PHY in RESET for enough time till + * PHY is settled and ready + */ + udelay(10); + + if (gpio_is_valid(omap->reset_gpio_port[0])) + gpio_set_value(omap->reset_gpio_port[0], 1); + + if (gpio_is_valid(omap->reset_gpio_port[1])) + gpio_set_value(omap->reset_gpio_port[1], 1); + } + + return 0; + +err_sys_status: + clk_disable(omap->usbtll_ick); + clk_put(omap->usbtll_ick); + +err_tll_ick: + clk_disable(omap->usbtll_fck); + clk_put(omap->usbtll_fck); + +err_tll_fck: + clk_disable(omap->usbhost1_48m_fck); + clk_put(omap->usbhost1_48m_fck); + + if (omap->phy_reset) { + if (gpio_is_valid(omap->reset_gpio_port[0])) + gpio_free(omap->reset_gpio_port[0]); + + if (gpio_is_valid(omap->reset_gpio_port[1])) + gpio_free(omap->reset_gpio_port[1]); + } + +err_host_48m_fck: + clk_disable(omap->usbhost2_120m_fck); + clk_put(omap->usbhost2_120m_fck); + +err_host_120m_fck: + clk_disable(omap->usbhost_ick); + clk_put(omap->usbhost_ick); + +err_host_ick: + return ret; +} + +static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n"); + + /* Reset OMAP modules for insmod/rmmod to work */ + ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, + OMAP_UHH_SYSCONFIG_SOFTRESET); + while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & (1 << 0))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & (1 << 1))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & (1 << 2))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1)); + + while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) + & (1 << 0))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + if (omap->usbtll_fck != NULL) { + clk_disable(omap->usbtll_fck); + clk_put(omap->usbtll_fck); + omap->usbtll_fck = NULL; + } + + if (omap->usbhost_ick != NULL) { + clk_disable(omap->usbhost_ick); + clk_put(omap->usbhost_ick); + omap->usbhost_ick = NULL; + } + + if (omap->usbhost1_48m_fck != NULL) { + clk_disable(omap->usbhost1_48m_fck); + clk_put(omap->usbhost1_48m_fck); + omap->usbhost1_48m_fck = NULL; + } + + if (omap->usbhost2_120m_fck != NULL) { + clk_disable(omap->usbhost2_120m_fck); + clk_put(omap->usbhost2_120m_fck); + omap->usbhost2_120m_fck = NULL; + } + + if (omap->usbtll_ick != NULL) { + clk_disable(omap->usbtll_ick); + clk_put(omap->usbtll_ick); + omap->usbtll_ick = NULL; + } + + if (omap->phy_reset) { + if (gpio_is_valid(omap->reset_gpio_port[0])) + gpio_free(omap->reset_gpio_port[0]); + + if (gpio_is_valid(omap->reset_gpio_port[1])) + gpio_free(omap->reset_gpio_port[1]); + } + + dev_dbg(omap->dev, "Clock to USB host has been disabled\n"); +} + +/*-------------------------------------------------------------------------*/ + +static const struct hc_driver ehci_omap_hc_driver; + +/* configure so an HC device and id are always provided */ +/* always called with process context; sleeping is OK */ + +/** + * ehci_hcd_omap_probe - initialize TI-based HCDs + * + * Allocates basic resources for this USB host controller, and + * then invokes the start() method for the HCD associated with it + * through the hotplug entry's driver_data. + */ +static int ehci_hcd_omap_probe(struct platform_device *pdev) +{ + struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data; + struct ehci_hcd_omap *omap; + struct resource *res; + struct usb_hcd *hcd; + + int irq = platform_get_irq(pdev, 0); + int ret = -ENODEV; + int i; + char supply[7]; + + if (!pdata) { + dev_dbg(&pdev->dev, "missing platform_data\n"); + goto err_pdata; + } + + if (usb_disabled()) + goto err_disabled; + + omap = kzalloc(sizeof(*omap), GFP_KERNEL); + if (!omap) { + ret = -ENOMEM; + goto err_disabled; + } + + hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, + dev_name(&pdev->dev)); + if (!hcd) { + dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret); + ret = -ENOMEM; + goto err_create_hcd; + } + + platform_set_drvdata(pdev, omap); + omap->dev = &pdev->dev; + omap->phy_reset = pdata->phy_reset; + omap->reset_gpio_port[0] = pdata->reset_gpio_port[0]; + omap->reset_gpio_port[1] = pdata->reset_gpio_port[1]; + omap->reset_gpio_port[2] = pdata->reset_gpio_port[2]; + omap->port_mode[0] = pdata->port_mode[0]; + omap->port_mode[1] = pdata->port_mode[1]; + omap->port_mode[2] = pdata->port_mode[2]; + omap->ehci = hcd_to_ehci(hcd); + omap->ehci->sbrn = 0x20; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(&pdev->dev, "EHCI ioremap failed\n"); + ret = -ENOMEM; + goto err_ioremap; + } + + /* we know this is the memory we want, no need to ioremap again */ + omap->ehci->caps = hcd->regs; + omap->ehci_base = hcd->regs; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + omap->uhh_base = ioremap(res->start, resource_size(res)); + if (!omap->uhh_base) { + dev_err(&pdev->dev, "UHH ioremap failed\n"); + ret = -ENOMEM; + goto err_uhh_ioremap; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + omap->tll_base = ioremap(res->start, resource_size(res)); + if (!omap->tll_base) { + dev_err(&pdev->dev, "TLL ioremap failed\n"); + ret = -ENOMEM; + goto err_tll_ioremap; + } + + /* get ehci regulator and enable */ + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) { + omap->regulator[i] = NULL; + continue; + } + snprintf(supply, sizeof(supply), "hsusb%d", i); + omap->regulator[i] = regulator_get(omap->dev, supply); + if (IS_ERR(omap->regulator[i])) { + omap->regulator[i] = NULL; + dev_dbg(&pdev->dev, + "failed to get ehci port%d regulator\n", i); + } else { + regulator_enable(omap->regulator[i]); + } + } + + ret = omap_start_ehc(omap, hcd); + if (ret) { + dev_dbg(&pdev->dev, "failed to start ehci\n"); + goto err_start; + } + + omap->ehci->regs = hcd->regs + + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase)); + + dbg_hcs_params(omap->ehci, "reset"); + dbg_hcc_params(omap->ehci, "reset"); + + /* cache this readonly data; minimize chip reads */ + omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params); + + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); + if (ret) { + dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); + goto err_add_hcd; + } + + /* root ports should always stay powered */ + ehci_port_power(omap->ehci, 1); + + return 0; + +err_add_hcd: + omap_stop_ehc(omap, hcd); + +err_start: + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (omap->regulator[i]) { + regulator_disable(omap->regulator[i]); + regulator_put(omap->regulator[i]); + } + } + iounmap(omap->tll_base); + +err_tll_ioremap: + iounmap(omap->uhh_base); + +err_uhh_ioremap: + iounmap(hcd->regs); + +err_ioremap: + usb_put_hcd(hcd); + +err_create_hcd: + kfree(omap); +err_disabled: +err_pdata: + return ret; +} + +/* may be called without controller electrically present */ +/* may be called with controller, bus, and devices active */ + +/** + * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs + * @pdev: USB Host Controller being removed + * + * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking + * the HCD's stop() method. It is always called from a thread + * context, normally "rmmod", "apmd", or something similar. + */ +static int ehci_hcd_omap_remove(struct platform_device *pdev) +{ + struct ehci_hcd_omap *omap = platform_get_drvdata(pdev); + struct usb_hcd *hcd = ehci_to_hcd(omap->ehci); + int i; + + usb_remove_hcd(hcd); + omap_stop_ehc(omap, hcd); + iounmap(hcd->regs); + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (omap->regulator[i]) { + regulator_disable(omap->regulator[i]); + regulator_put(omap->regulator[i]); + } + } + iounmap(omap->tll_base); + iounmap(omap->uhh_base); + usb_put_hcd(hcd); + kfree(omap); + + return 0; +} + +static void ehci_hcd_omap_shutdown(struct platform_device *pdev) +{ + struct ehci_hcd_omap *omap = platform_get_drvdata(pdev); + struct usb_hcd *hcd = ehci_to_hcd(omap->ehci); + + if (hcd->driver->shutdown) + hcd->driver->shutdown(hcd); +} + +static struct platform_driver ehci_hcd_omap_driver = { + .probe = ehci_hcd_omap_probe, + .remove = ehci_hcd_omap_remove, + .shutdown = ehci_hcd_omap_shutdown, + /*.suspend = ehci_hcd_omap_suspend, */ + /*.resume = ehci_hcd_omap_resume, */ + .driver = { + .name = "ehci-omap", + } +}; + +/*-------------------------------------------------------------------------*/ + +static const struct hc_driver ehci_omap_hc_driver = { + .description = hcd_name, + .product_desc = "OMAP-EHCI Host Controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2, + + /* + * basic lifecycle operations + */ + .reset = ehci_init, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + .endpoint_reset = ehci_endpoint_reset, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, +}; + +MODULE_ALIAS("platform:omap-ehci"); +MODULE_AUTHOR("Texas Instruments, Inc."); +MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>"); + diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 1d283e1b2b8..0f87dc72820 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -222,14 +222,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev) goto err1; } - if (!request_mem_region(res->start, res->end - res->start + 1, + if (!request_mem_region(res->start, resource_size(res), ehci_orion_hc_driver.description)) { dev_dbg(&pdev->dev, "controller already in use\n"); err = -EBUSY; goto err1; } - regs = ioremap(res->start, res->end - res->start + 1); + regs = ioremap(res->start, resource_size(res)); if (regs == NULL) { dev_dbg(&pdev->dev, "error mapping memory\n"); err = -EFAULT; @@ -244,7 +244,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev) } hcd->rsrc_start = res->start; - hcd->rsrc_len = res->end - res->start + 1; + hcd->rsrc_len = resource_size(res); hcd->regs = regs; ehci = hcd_to_ehci(hcd); @@ -287,7 +287,7 @@ err4: err3: iounmap(regs); err2: - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); err1: dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), err); diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 378861b9d79..d43d176161a 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -109,8 +109,15 @@ static int ehci_pci_setup(struct usb_hcd *hcd) return retval; switch (pdev->vendor) { + case PCI_VENDOR_ID_NEC: + ehci->need_io_watchdog = 0; + break; case PCI_VENDOR_ID_INTEL: ehci->need_io_watchdog = 0; + if (pdev->device == 0x27cc) { + ehci->broken_periodic = 1; + ehci_info(ehci, "using broken periodic workaround\n"); + } break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { @@ -280,23 +287,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd) msleep(10); /* Root hub was already suspended. Disable irq emission and - * mark HW unaccessible, bail out if RH has been resumed. Use - * the spinlock to properly synchronize with possible pending - * RH suspend or resume activity. - * - * This is still racy as hcd->state is manipulated outside of - * any locks =P But that will be a different fix. + * mark HW unaccessible. The PM and USB cores make sure that + * the root hub is either suspended or stopped. */ spin_lock_irqsave (&ehci->lock, flags); - if (hcd->state != HC_STATE_SUSPENDED) { - rc = -EINVAL; - goto bail; - } + ehci_prepare_ports_for_controller_suspend(ehci); ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); - bail: spin_unlock_irqrestore (&ehci->lock, flags); // could save FLADJ in case of Vaux power loss @@ -326,6 +325,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) !hibernated) { int mask = INTR_MASK; + ehci_prepare_ports_for_controller_resume(ehci); if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 36f96da129f..5aec92866ab 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -108,7 +108,7 @@ ppc44x_enable_bmt(struct device_node *dn) static int __devinit ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dn = op->node; + struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ehci_hcd *ehci = NULL; struct resource res; @@ -134,21 +134,21 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) hcd->rsrc_len = res.end - res.start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); + printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { - printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); + printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { - printk(KERN_ERR __FILE__ ": ioremap failed\n"); + printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } @@ -161,9 +161,9 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) ehci->ohci_hcctrl_reg = ioremap(res.start + OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN); else - pr_debug(__FILE__ ": no ohci offset in fdt\n"); + pr_debug("%s: no ohci offset in fdt\n", __FILE__); if (!ehci->ohci_hcctrl_reg) { - pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n"); + pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__); } else { ehci->has_amcc_usb23 = 1; } @@ -241,7 +241,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op) else release_mem_region(res.start, 0x4); else - pr_debug(__FILE__ ": no ohci offset in fdt\n"); + pr_debug("%s: no ohci offset in fdt\n", __FILE__); of_node_put(np); } @@ -264,7 +264,7 @@ static int ehci_hcd_ppc_of_shutdown(struct of_device *op) } -static struct of_device_id ehci_hcd_ppc_of_match[] = { +static const struct of_device_id ehci_hcd_ppc_of_match[] = { { .compatible = "usb-ehci", }, @@ -274,13 +274,12 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match); static struct of_platform_driver ehci_hcd_ppc_of_driver = { - .name = "ppc-of-ehci", - .match_table = ehci_hcd_ppc_of_match, .probe = ehci_hcd_ppc_of_probe, .remove = ehci_hcd_ppc_of_remove, .shutdown = ehci_hcd_ppc_of_shutdown, - .driver = { - .name = "ppc-of-ehci", - .owner = THIS_MODULE, + .driver = { + .name = "ppc-of-ehci", + .owner = THIS_MODULE, + .of_match_table = ehci_hcd_ppc_of_match, }, }; diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 00ad9ce392e..11a79c4f4a9 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -487,8 +487,20 @@ halt: * we must clear the TT buffer (11.17.5). */ if (unlikely(last_status != -EINPROGRESS && - last_status != -EREMOTEIO)) - ehci_clear_tt_buffer(ehci, qh, urb, token); + last_status != -EREMOTEIO)) { + /* The TT's in some hubs malfunction when they + * receive this request following a STALL (they + * stop sending isochronous packets). Since a + * STALL can't leave the TT buffer in a busy + * state (if you believe Figures 11-48 - 11-51 + * in the USB 2.0 spec), we won't clear the TT + * buffer in this case. Strictly speaking this + * is a violation of the spec. + */ + if (last_status != -EPIPE) + ehci_clear_tt_buffer(ehci, qh, urb, + token); + } } /* if we're removing something not at the queue head, @@ -604,9 +616,11 @@ qh_urb_transaction ( ) { struct ehci_qtd *qtd, *qtd_prev; dma_addr_t buf; - int len, maxpacket; + int len, this_sg_len, maxpacket; int is_input; u32 token; + int i; + struct scatterlist *sg; /* * URBs map to sequences of QTDs: one logical transaction @@ -647,7 +661,20 @@ qh_urb_transaction ( /* * data transfer stage: buffer setup */ - buf = urb->transfer_dma; + i = urb->num_sgs; + if (len > 0 && i > 0) { + sg = urb->sg; + buf = sg_dma_address(sg); + + /* urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), len); + } else { + sg = NULL; + buf = urb->transfer_dma; + this_sg_len = len; + } if (is_input) token |= (1 /* "in" */ << 8); @@ -663,7 +690,9 @@ qh_urb_transaction ( for (;;) { int this_qtd_len; - this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket); + this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, + maxpacket); + this_sg_len -= this_qtd_len; len -= this_qtd_len; buf += this_qtd_len; @@ -679,8 +708,13 @@ qh_urb_transaction ( if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) token ^= QTD_TOGGLE; - if (likely (len <= 0)) - break; + if (likely(this_sg_len <= 0)) { + if (--i <= 0 || len <= 0) + break; + sg = sg_next(sg); + buf = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), len); + } qtd_prev = qtd; qtd = ehci_qtd_alloc (ehci, flags); @@ -815,9 +849,10 @@ qh_make ( * But interval 1 scheduling is simpler, and * includes high bandwidth. */ - dbg ("intr period %d uframes, NYET!", - urb->interval); - goto done; + urb->interval = 1; + } else if (qh->period > ehci->periodic_size) { + qh->period = ehci->periodic_size; + urb->interval = qh->period << 3; } } else { int think_time; @@ -840,6 +875,10 @@ qh_make ( usb_calc_bus_time (urb->dev->speed, is_input, 0, max_packet (maxp))); qh->period = urb->interval; + if (qh->period > ehci->periodic_size) { + qh->period = ehci->periodic_size; + urb->interval = qh->period; + } } } diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 3ea05936851..805ec633a65 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -475,6 +475,8 @@ static int enable_periodic (struct ehci_hcd *ehci) /* make sure ehci_work scans these */ ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index) % (ehci->periodic_size << 3); + if (unlikely(ehci->broken_periodic)) + ehci->last_periodic_enable = ktime_get_real(); return 0; } @@ -486,6 +488,16 @@ static int disable_periodic (struct ehci_hcd *ehci) if (--ehci->periodic_sched) return 0; + if (unlikely(ehci->broken_periodic)) { + /* delay experimentally determined */ + ktime_t safe = ktime_add_us(ehci->last_periodic_enable, 1000); + ktime_t now = ktime_get_real(); + s64 delay = ktime_us_delta(safe, now); + + if (unlikely(delay > 0)) + udelay(delay); + } + /* did setting PSE not take effect yet? * takes effect only at frame boundaries... */ @@ -498,6 +510,8 @@ static int disable_periodic (struct ehci_hcd *ehci) ehci_writel(ehci, cmd, &ehci->regs->command); /* posted write ... */ + free_cached_lists(ehci); + ehci->next_uframe = -1; return 0; } @@ -1109,8 +1123,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) urb->interval); } - /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ - } else if (unlikely (stream->hw_info1 != 0)) { + /* if dev->ep [epnum] is a QH, hw is set */ + } else if (unlikely (stream->hw != NULL)) { ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", urb->dev->devpath, epnum, usb_pipein(urb->pipe) ? "in" : "out"); @@ -1373,7 +1387,7 @@ sitd_slot_ok ( * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! */ -#define SCHEDULE_SLOP 10 /* frames */ +#define SCHEDULE_SLOP 80 /* microframes */ static int iso_stream_schedule ( @@ -1382,12 +1396,13 @@ iso_stream_schedule ( struct ehci_iso_stream *stream ) { - u32 now, start, max, period; + u32 now, next, start, period; int status; unsigned mod = ehci->periodic_size << 3; struct ehci_iso_sched *sched = urb->hcpriv; + struct pci_dev *pdev; - if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { + if (sched->span > (mod - SCHEDULE_SLOP)) { ehci_dbg (ehci, "iso request %p too long\n", urb); status = -EFBIG; goto fail; @@ -1400,10 +1415,11 @@ iso_stream_schedule ( goto fail; } - now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; + period = urb->interval; + if (!stream->highspeed) + period <<= 3; - /* when's the last uframe this urb could start? */ - max = now + mod; + now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; /* Typical case: reuse current schedule, stream is still active. * Hopefully there are no gaps from the host falling behind @@ -1411,20 +1427,33 @@ iso_stream_schedule ( * slot in the schedule, implicitly assuming URB_ISO_ASAP. */ if (likely (!list_empty (&stream->td_list))) { + pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); start = stream->next_uframe; - if (start < now) - start += mod; + + /* For high speed devices, allow scheduling within the + * isochronous scheduling threshold. For full speed devices, + * don't. (Work around for Intel ICH9 bug.) + */ + if (!stream->highspeed && + pdev->vendor == PCI_VENDOR_ID_INTEL) + next = now + ehci->i_thresh; + else + next = now; /* Fell behind (by up to twice the slop amount)? */ - if (start >= max - 2 * 8 * SCHEDULE_SLOP) - start += stream->interval * DIV_ROUND_UP( - max - start, stream->interval) - mod; + if (((start - next) & (mod - 1)) >= + mod - 2 * SCHEDULE_SLOP) + start += period * DIV_ROUND_UP( + (next - start) & (mod - 1), + period); /* Tried to schedule too far into the future? */ - if (unlikely((start + sched->span) >= max)) { + if (unlikely(((start - now) & (mod - 1)) + sched->span + >= mod - 2 * SCHEDULE_SLOP)) { status = -EFBIG; goto fail; } + stream->next_uframe = start; goto ready; } @@ -1434,16 +1463,12 @@ iso_stream_schedule ( * can also help high bandwidth if the dma and irq loads don't * jump until after the queue is primed. */ - start = SCHEDULE_SLOP * 8 + (now & ~0x07); + start = SCHEDULE_SLOP + (now & ~0x07); start %= mod; stream->next_uframe = start; /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ - period = urb->interval; - if (!stream->highspeed) - period <<= 3; - /* find a uframe slot with enough bandwidth */ for (; start < (stream->next_uframe + period); start++) { int enough_space; @@ -1469,7 +1494,7 @@ iso_stream_schedule ( /* no room in the schedule */ ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", list_empty (&stream->td_list) ? "" : "re", - urb, now, max); + urb, now, now + mod); status = -ENOSPC; fail: @@ -1540,13 +1565,27 @@ itd_patch( static inline void itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) { - /* always prepend ITD/SITD ... only QH tree is order-sensitive */ - itd->itd_next = ehci->pshadow [frame]; - itd->hw_next = ehci->periodic [frame]; - ehci->pshadow [frame].itd = itd; + union ehci_shadow *prev = &ehci->pshadow[frame]; + __hc32 *hw_p = &ehci->periodic[frame]; + union ehci_shadow here = *prev; + __hc32 type = 0; + + /* skip any iso nodes which might belong to previous microframes */ + while (here.ptr) { + type = Q_NEXT_TYPE(ehci, *hw_p); + if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(ehci, prev, type); + hw_p = shadow_next_periodic(ehci, &here, type); + here = *prev; + } + + itd->itd_next = here; + itd->hw_next = *hw_p; + prev->itd = itd; itd->frame = frame; wmb (); - ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); + *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); } /* fit urb's itds into the selected schedule slot; activate as needed */ @@ -2100,13 +2139,27 @@ sitd_complete ( (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); } iso_stream_put (ehci, stream); - /* OK to recycle this SITD now that its completion callback ran. */ + done: sitd->urb = NULL; - sitd->stream = NULL; - list_move(&sitd->sitd_list, &stream->free_list); - iso_stream_put(ehci, stream); - + if (ehci->clock_frame != sitd->frame) { + /* OK to recycle this SITD now. */ + sitd->stream = NULL; + list_move(&sitd->sitd_list, &stream->free_list); + iso_stream_put(ehci, stream); + } else { + /* HW might remember this SITD, so we can't recycle it yet. + * Move it to a safe place until a new frame starts. + */ + list_move(&sitd->sitd_list, &ehci->cached_sitd_list); + if (stream->refcount == 2) { + /* If iso_stream_put() were called here, stream + * would be freed. Instead, just prevent reuse. + */ + stream->ep->hcpriv = NULL; + stream->ep = NULL; + } + } return retval; } @@ -2172,9 +2225,10 @@ done: /*-------------------------------------------------------------------------*/ -static void free_cached_itd_list(struct ehci_hcd *ehci) +static void free_cached_lists(struct ehci_hcd *ehci) { struct ehci_itd *itd, *n; + struct ehci_sitd *sitd, *sn; list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { struct ehci_iso_stream *stream = itd->stream; @@ -2182,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci) list_move(&itd->itd_list, &stream->free_list); iso_stream_put(ehci, stream); } + + list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { + struct ehci_iso_stream *stream = sitd->stream; + sitd->stream = NULL; + list_move(&sitd->sitd_list, &stream->free_list); + iso_stream_put(ehci, stream); + } } /*-------------------------------------------------------------------------*/ @@ -2208,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci) clock_frame = -1; } if (ehci->clock_frame != clock_frame) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); ehci->clock_frame = clock_frame; } clock %= mod; @@ -2299,9 +2360,13 @@ restart: * No need to check for activity unless the * frame is current. */ - if (frame == clock_frame && live && - (q.sitd->hw_results & - SITD_ACTIVE(ehci))) { + if (((frame == clock_frame) || + (((frame + 1) % ehci->periodic_size) + == clock_frame)) + && live + && (q.sitd->hw_results & + SITD_ACTIVE(ehci))) { + incomplete = true; q_p = &q.sitd->sitd_next; hw_p = &q.sitd->hw_next; @@ -2371,7 +2436,7 @@ restart: clock = now; clock_frame = clock >> 3; if (ehci->clock_frame != clock_frame) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); ehci->clock_frame = clock_frame; } } else { diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c new file mode 100644 index 00000000000..4899f451add --- /dev/null +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -0,0 +1,299 @@ +/* + * EHCI HCD (Host Controller Driver) for USB. + * + * Bus Glue for Xilinx EHCI core on the of_platform bus + * + * Copyright (c) 2009 Xilinx, Inc. + * + * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com> + * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de> + * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/signal.h> + +#include <linux/of.h> +#include <linux/of_platform.h> + +/** + * ehci_xilinx_of_setup - Initialize the device for ehci_reset() + * @hcd: Pointer to the usb_hcd device to which the host controller bound + * + * called during probe() after chip reset completes. + */ +static int ehci_xilinx_of_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + retval = ehci_halt(ehci); + if (retval) + return retval; + + retval = ehci_init(hcd); + if (retval) + return retval; + + ehci->sbrn = 0x20; + + return ehci_reset(ehci); +} + +/** + * ehci_xilinx_port_handed_over - hand the port out if failed to enable it + * @hcd: Pointer to the usb_hcd device to which the host controller bound + * @portnum:Port number to which the device is attached. + * + * This function is used as a place to tell the user that the Xilinx USB host + * controller does support LS devices. And in an HS only configuration, it + * does not support FS devices either. It is hoped that this can help a + * confused user. + * + * There are cases when the host controller fails to enable the port due to, + * for example, insufficient power that can be supplied to the device from + * the USB bus. In those cases, the messages printed here are not helpful. + */ +static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum) +{ + dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum); + if (hcd->has_tt) { + dev_warn(hcd->self.controller, + "Maybe you have connected a low speed device?\n"); + + dev_warn(hcd->self.controller, + "We do not support low speed devices\n"); + } else { + dev_warn(hcd->self.controller, + "Maybe your device is not a high speed device?\n"); + dev_warn(hcd->self.controller, + "The USB host controller does not support full speed " + "nor low speed devices\n"); + dev_warn(hcd->self.controller, + "You can reconfigure the host controller to have " + "full speed support\n"); + } + + return 0; +} + + +static const struct hc_driver ehci_xilinx_of_hc_driver = { + .description = hcd_name, + .product_desc = "OF EHCI", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2, + + /* + * basic lifecycle operations + */ + .reset = ehci_xilinx_of_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, +#ifdef CONFIG_PM + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, +#endif + .relinquish_port = NULL, + .port_handed_over = ehci_xilinx_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, +}; + +/** + * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller + * @op: pointer to the of_device to which the host controller bound + * @match: pointer to of_device_id structure, not used + * + * This function requests resources and sets up appropriate properties for the + * host controller. Because the Xilinx USB host controller can be configured + * as HS only or HS/FS only, it checks the configuration in the device tree + * entry, and sets an appropriate value for hcd->has_tt. + */ +static int __devinit +ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match) +{ + struct device_node *dn = op->dev.of_node; + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + struct resource res; + int irq; + int rv; + int *value; + + if (usb_disabled()) + return -ENODEV; + + dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n"); + + rv = of_address_to_resource(dn, 0, &res); + if (rv) + return rv; + + hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev, + "XILINX-OF USB"); + if (!hcd) + return -ENOMEM; + + hcd->rsrc_start = res.start; + hcd->rsrc_len = res.end - res.start + 1; + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); + rv = -EBUSY; + goto err_rmr; + } + + irq = irq_of_parse_and_map(dn, 0); + if (irq == NO_IRQ) { + printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); + rv = -EBUSY; + goto err_irq; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + printk(KERN_ERR "%s: ioremap failed\n", __FILE__); + rv = -ENOMEM; + goto err_ioremap; + } + + ehci = hcd_to_ehci(hcd); + + /* This core always has big-endian register interface and uses + * big-endian memory descriptors. + */ + ehci->big_endian_mmio = 1; + ehci->big_endian_desc = 1; + + /* Check whether the FS support option is selected in the hardware. + */ + value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL); + if (value && (*value == 1)) { + ehci_dbg(ehci, "USB host controller supports FS devices\n"); + hcd->has_tt = 1; + } else { + ehci_dbg(ehci, + "USB host controller is HS only\n"); + hcd->has_tt = 0; + } + + /* Debug registers are at the first 0x100 region + */ + ehci->caps = hcd->regs + 0x100; + ehci->regs = hcd->regs + 0x100 + + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); + + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + + rv = usb_add_hcd(hcd, irq, 0); + if (rv == 0) + return 0; + + iounmap(hcd->regs); + +err_ioremap: +err_irq: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +err_rmr: + usb_put_hcd(hcd); + + return rv; +} + +/** + * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources + * @op: pointer to of_device structure that is to be removed + * + * Remove the hcd structure, and release resources that has been requested + * during probe. + */ +static int ehci_hcd_xilinx_of_remove(struct of_device *op) +{ + struct usb_hcd *hcd = dev_get_drvdata(&op->dev); + dev_set_drvdata(&op->dev, NULL); + + dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n"); + + usb_remove_hcd(hcd); + + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + + usb_put_hcd(hcd); + + return 0; +} + +/** + * ehci_hcd_xilinx_of_shutdown - shutdown the hcd + * @op: pointer to of_device structure that is to be removed + * + * Properly shutdown the hcd, call driver's shutdown routine. + */ +static int ehci_hcd_xilinx_of_shutdown(struct of_device *op) +{ + struct usb_hcd *hcd = dev_get_drvdata(&op->dev); + + if (hcd->driver->shutdown) + hcd->driver->shutdown(hcd); + + return 0; +} + + +static const struct of_device_id ehci_hcd_xilinx_of_match[] = { + {.compatible = "xlnx,xps-usb-host-1.00.a",}, + {}, +}; +MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); + +static struct of_platform_driver ehci_hcd_xilinx_of_driver = { + .probe = ehci_hcd_xilinx_of_probe, + .remove = ehci_hcd_xilinx_of_remove, + .shutdown = ehci_hcd_xilinx_of_shutdown, + .driver = { + .name = "xilinx-of-ehci", + .owner = THIS_MODULE, + .of_match_table = ehci_hcd_xilinx_of_match, + }, +}; diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 064e76821ff..650a687f285 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */ int next_uframe; /* scan periodic, start here */ unsigned periodic_sched; /* periodic activity count */ - /* list of itds completed while clock_frame was still active */ + /* list of itds & sitds completed while clock_frame was still active */ struct list_head cached_itd_list; + struct list_head cached_sitd_list; unsigned clock_frame; /* per root hub port */ @@ -118,6 +119,7 @@ struct ehci_hcd { /* one per controller */ unsigned stamp; unsigned random_frame; unsigned long next_statechange; + ktime_t last_periodic_enable; u32 command; /* SILICON QUIRKS */ @@ -127,6 +129,7 @@ struct ehci_hcd { /* one per controller */ unsigned big_endian_desc:1; unsigned has_amcc_usb23:1; unsigned need_io_watchdog:1; + unsigned broken_periodic:1; /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) @@ -193,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action) clear_bit (action, &ehci->actions); } -static void free_cached_itd_list(struct ehci_hcd *ehci); +static void free_cached_lists(struct ehci_hcd *ehci); /*-------------------------------------------------------------------------*/ @@ -392,9 +395,8 @@ struct ehci_iso_sched { * acts like a qh would, if EHCI had them for ISO. */ struct ehci_iso_stream { - /* first two fields match QH, but info1 == 0 */ - __hc32 hw_next; - __hc32 hw_info1; + /* first field matches ehci_hq, but is NULL */ + struct ehci_qh_hw *hw; u32 refcount; u8 bEndpointAddress; @@ -534,6 +536,16 @@ struct ehci_fstn { /*-------------------------------------------------------------------------*/ +/* Prepare the PORTSC wakeup flags during controller suspend/resume */ + +#define ehci_prepare_ports_for_controller_suspend(ehci) \ + ehci_adjust_port_wakeup_flags(ehci, true); + +#define ehci_prepare_ports_for_controller_resume(ehci) \ + ehci_adjust_port_wakeup_flags(ehci, false); + +/*-------------------------------------------------------------------------*/ + #ifdef CONFIG_USB_EHCI_ROOT_HUB_TT /* @@ -554,20 +566,20 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) case 0: return 0; case 1: - return (1<<USB_PORT_FEAT_LOWSPEED); + return USB_PORT_STAT_LOW_SPEED; case 2: default: - return (1<<USB_PORT_FEAT_HIGHSPEED); + return USB_PORT_STAT_HIGH_SPEED; } } - return (1<<USB_PORT_FEAT_HIGHSPEED); + return USB_PORT_STAT_HIGH_SPEED; } #else #define ehci_is_TDI(e) (0) -#define ehci_port_speed(ehci, portsc) (1<<USB_PORT_FEAT_HIGHSPEED) +#define ehci_port_speed(ehci, portsc) USB_PORT_STAT_HIGH_SPEED #endif /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c index e799f86dab1..6fe55004911 100644 --- a/drivers/usb/host/fhci-dbg.c +++ b/drivers/usb/host/fhci-dbg.c @@ -20,7 +20,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/usb.h> -#include "../core/hcd.h" +#include <linux/usb/hcd.h> #include "fhci.h" void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er) diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 0951818ef93..c7c8392a88b 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -25,11 +25,12 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> +#include <linux/slab.h> #include <asm/qe.h> #include <asm/fsl_gtm.h> -#include "../core/hcd.h" #include "fhci.h" void fhci_start_sof_timer(struct fhci_hcd *fhci) @@ -242,9 +243,10 @@ err: static void fhci_usb_free(void *lld) { struct fhci_usb *usb = lld; - struct fhci_hcd *fhci = usb->fhci; + struct fhci_hcd *fhci; if (usb) { + fhci = usb->fhci; fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); fhci_ep0_free(usb); kfree(usb->actual_frame); @@ -432,7 +434,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return -ENOMEM; /* allocate the private part of the URB */ - urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags); + urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags); if (!urb_priv->tds) { kfree(urb_priv); return -ENOMEM; @@ -563,7 +565,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev, const struct of_device_id *ofid) { struct device *dev = &ofdev->dev; - struct device_node *node = ofdev->node; + struct device_node *node = dev->of_node; struct usb_hcd *hcd; struct fhci_hcd *fhci; struct resource usb_regs; @@ -668,7 +670,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev, } for (j = 0; j < NUM_PINS; j++) { - fhci->pins[j] = qe_pin_request(ofdev->node, j); + fhci->pins[j] = qe_pin_request(node, j); if (IS_ERR(fhci->pins[j])) { ret = PTR_ERR(fhci->pins[j]); dev_err(dev, "can't get pin %d: %d\n", j, ret); @@ -804,15 +806,18 @@ static int __devexit of_fhci_remove(struct of_device *ofdev) return fhci_remove(&ofdev->dev); } -static struct of_device_id of_fhci_match[] = { +static const struct of_device_id of_fhci_match[] = { { .compatible = "fsl,mpc8323-qe-usb", }, {}, }; MODULE_DEVICE_TABLE(of, of_fhci_match); static struct of_platform_driver of_fhci_driver = { - .name = "fsl,usb-fhci", - .match_table = of_fhci_match, + .driver = { + .name = "fsl,usb-fhci", + .owner = THIS_MODULE, + .of_match_table = of_fhci_match, + }, .probe = of_fhci_probe, .remove = __devexit_p(of_fhci_remove), }; diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c index 0cfaedc3e12..348fe62e94f 100644 --- a/drivers/usb/host/fhci-hub.c +++ b/drivers/usb/host/fhci-hub.c @@ -22,9 +22,9 @@ #include <linux/errno.h> #include <linux/io.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/gpio.h> #include <asm/qe.h> -#include "../core/hcd.h" #include "fhci.h" /* virtual root hub specific descriptor */ diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c index 2c0736c9971..b0b88f57a5a 100644 --- a/drivers/usb/host/fhci-mem.c +++ b/drivers/usb/host/fhci-mem.c @@ -18,9 +18,10 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> +#include <linux/slab.h> #include <linux/list.h> #include <linux/usb.h> -#include "../core/hcd.h" +#include <linux/usb/hcd.h> #include "fhci.h" static void init_td(struct td *td) diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c index b0a1446ba29..03be7494a47 100644 --- a/drivers/usb/host/fhci-q.c +++ b/drivers/usb/host/fhci-q.c @@ -19,9 +19,10 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/errno.h> +#include <linux/slab.h> #include <linux/list.h> #include <linux/usb.h> -#include "../core/hcd.h" +#include <linux/usb/hcd.h> #include "fhci.h" /* maps the hardware error code to the USB error code */ diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c index 62a226b6167..a42ef380e91 100644 --- a/drivers/usb/host/fhci-sched.c +++ b/drivers/usb/host/fhci-sched.c @@ -24,9 +24,9 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <asm/qe.h> #include <asm/fsl_gtm.h> -#include "../core/hcd.h" #include "fhci.h" static void recycle_frame(struct fhci_usb *usb, struct packet *pkt) @@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt) pkt->info = 0; pkt->priv_data = NULL; - cq_put(usb->ep0->empty_frame_Q, pkt); + cq_put(&usb->ep0->empty_frame_Q, pkt); } /* confirm submitted packet */ @@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt) if ((td->data + td->actual_len) && trans_len) memcpy(td->data + td->actual_len, pkt->data, trans_len); - cq_put(usb->ep0->dummy_packets_Q, pkt->data); + cq_put(&usb->ep0->dummy_packets_Q, pkt->data); } recycle_frame(usb, pkt); @@ -125,7 +125,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt) /* * Flush all transmitted packets from BDs * This routine is called when disabling the USB port to flush all - * transmissions that are allready scheduled in the BDs + * transmissions that are already scheduled in the BDs */ void fhci_flush_all_transmissions(struct fhci_usb *usb) { @@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) } /* update frame object fields before transmitting */ - pkt = cq_get(usb->ep0->empty_frame_Q); + pkt = cq_get(&usb->ep0->empty_frame_Q); if (!pkt) { fhci_dbg(usb->fhci, "there is no empty frame\n"); return -1; @@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) pkt->info = 0; if (data == NULL) { - data = cq_get(usb->ep0->dummy_packets_Q); + data = cq_get(&usb->ep0->dummy_packets_Q); BUG_ON(!data); pkt->info = PKT_DUMMY_PACKET; } @@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) list_del_init(&td->frame_lh); td->status = USB_TD_OK; if (pkt->info & PKT_DUMMY_PACKET) - cq_put(usb->ep0->dummy_packets_Q, pkt->data); + cq_put(&usb->ep0->dummy_packets_Q, pkt->data); recycle_frame(usb, pkt); usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD); fhci_err(usb->fhci, "host transaction failed\n"); @@ -627,7 +627,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd) /* - * Process normal completions(error or sucess) and clean the schedule. + * Process normal completions(error or success) and clean the schedule. * * This is the main path for handing urbs back to drivers. The only other patth * is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index b4033229031..7be548ca218 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c @@ -18,10 +18,11 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> +#include <linux/slab.h> #include <linux/list.h> #include <linux/io.h> #include <linux/usb.h> -#include "../core/hcd.h" +#include <linux/usb/hcd.h> #include "fhci.h" #define DUMMY_BD_BUFFER 0xdeadbeef @@ -105,34 +106,34 @@ void fhci_ep0_free(struct fhci_usb *usb) if (ep->td_base) cpm_muram_free(cpm_muram_offset(ep->td_base)); - if (ep->conf_frame_Q) { - size = cq_howmany(ep->conf_frame_Q); + if (kfifo_initialized(&ep->conf_frame_Q)) { + size = cq_howmany(&ep->conf_frame_Q); for (; size; size--) { - struct packet *pkt = cq_get(ep->conf_frame_Q); + struct packet *pkt = cq_get(&ep->conf_frame_Q); kfree(pkt); } - cq_delete(ep->conf_frame_Q); + cq_delete(&ep->conf_frame_Q); } - if (ep->empty_frame_Q) { - size = cq_howmany(ep->empty_frame_Q); + if (kfifo_initialized(&ep->empty_frame_Q)) { + size = cq_howmany(&ep->empty_frame_Q); for (; size; size--) { - struct packet *pkt = cq_get(ep->empty_frame_Q); + struct packet *pkt = cq_get(&ep->empty_frame_Q); kfree(pkt); } - cq_delete(ep->empty_frame_Q); + cq_delete(&ep->empty_frame_Q); } - if (ep->dummy_packets_Q) { - size = cq_howmany(ep->dummy_packets_Q); + if (kfifo_initialized(&ep->dummy_packets_Q)) { + size = cq_howmany(&ep->dummy_packets_Q); for (; size; size--) { - u8 *buff = cq_get(ep->dummy_packets_Q); + u8 *buff = cq_get(&ep->dummy_packets_Q); kfree(buff); } - cq_delete(ep->dummy_packets_Q); + cq_delete(&ep->dummy_packets_Q); } kfree(ep); @@ -175,10 +176,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, ep->td_base = cpm_muram_addr(ep_offset); /* zero all queue pointers */ - ep->conf_frame_Q = cq_new(ring_len + 2); - ep->empty_frame_Q = cq_new(ring_len + 2); - ep->dummy_packets_Q = cq_new(ring_len + 2); - if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) { + if (cq_new(&ep->conf_frame_Q, ring_len + 2) || + cq_new(&ep->empty_frame_Q, ring_len + 2) || + cq_new(&ep->dummy_packets_Q, ring_len + 2)) { err_for = "frame_queues"; goto err; } @@ -199,8 +199,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, err_for = "buffer"; goto err; } - cq_put(ep->empty_frame_Q, pkt); - cq_put(ep->dummy_packets_Q, buff); + cq_put(&ep->empty_frame_Q, pkt); + cq_put(&ep->dummy_packets_Q, buff); } /* we put the endpoint parameter RAM right behind the TD ring */ @@ -319,7 +319,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb) if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W)) continue; - pkt = cq_get(ep->conf_frame_Q); + pkt = cq_get(&ep->conf_frame_Q); if (!pkt) fhci_err(usb->fhci, "no frame to confirm\n"); @@ -460,9 +460,9 @@ u32 fhci_host_transaction(struct fhci_usb *usb, out_be16(&td->length, pkt->len); /* put the frame to the confirmation queue */ - cq_put(ep->conf_frame_Q, pkt); + cq_put(&ep->conf_frame_Q, pkt); - if (cq_howmany(ep->conf_frame_Q) == 1) + if (cq_howmany(&ep->conf_frame_Q) == 1) out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); return 0; diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h index 7116284ed21..71c3caaea4c 100644 --- a/drivers/usb/host/fhci.h +++ b/drivers/usb/host/fhci.h @@ -20,13 +20,14 @@ #include <linux/kernel.h> #include <linux/types.h> +#include <linux/bug.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/kfifo.h> #include <linux/io.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <asm/qe.h> -#include "../core/hcd.h" #define USB_CLOCK 48000000 @@ -423,9 +424,9 @@ struct endpoint { struct usb_td __iomem *td_base; /* first TD in the ring */ struct usb_td __iomem *conf_td; /* next TD for confirm after transac */ struct usb_td __iomem *empty_td;/* next TD for new transaction req. */ - struct kfifo *empty_frame_Q; /* Empty frames list to use */ - struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */ - struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */ + struct kfifo empty_frame_Q; /* Empty frames list to use */ + struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */ + struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */ bool already_pushed_dummy_bd; }; @@ -493,9 +494,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci) } /* fifo of pointers */ -static inline struct kfifo *cq_new(int size) +static inline int cq_new(struct kfifo *fifo, int size) { - return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL); + return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL); } static inline void cq_delete(struct kfifo *kfifo) @@ -505,19 +506,23 @@ static inline void cq_delete(struct kfifo *kfifo) static inline unsigned int cq_howmany(struct kfifo *kfifo) { - return __kfifo_len(kfifo) / sizeof(void *); + return kfifo_len(kfifo) / sizeof(void *); } static inline int cq_put(struct kfifo *kfifo, void *p) { - return __kfifo_put(kfifo, (void *)&p, sizeof(p)); + return kfifo_in(kfifo, (void *)&p, sizeof(p)); } static inline void *cq_get(struct kfifo *kfifo) { - void *p = NULL; + unsigned int sz; + void *p; + + sz = kfifo_out(kfifo, (void *)&p, sizeof(p)); + if (sz != sizeof(p)) + return NULL; - __kfifo_get(kfifo, (void *)&p, sizeof(p)); return p; } diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 88b03214622..35742f8c7cd 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -55,6 +55,7 @@ */ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/slab.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/wait.h> diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c new file mode 100644 index 00000000000..512f647448c --- /dev/null +++ b/drivers/usb/host/imx21-dbg.c @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2009 by Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of imx21-hcd.c */ + +#ifndef DEBUG + +static inline void create_debug_files(struct imx21 *imx21) { } +static inline void remove_debug_files(struct imx21 *imx21) { } +static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {} +static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb, + int status) {} +static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {} +static inline void debug_urb_queued_for_etd(struct imx21 *imx21, + struct urb *urb) {} +static inline void debug_urb_queued_for_dmem(struct imx21 *imx21, + struct urb *urb) {} +static inline void debug_etd_allocated(struct imx21 *imx21) {} +static inline void debug_etd_freed(struct imx21 *imx21) {} +static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {} +static inline void debug_dmem_freed(struct imx21 *imx21, int size) {} +static inline void debug_isoc_submitted(struct imx21 *imx21, + int frame, struct td *td) {} +static inline void debug_isoc_completed(struct imx21 *imx21, + int frame, struct td *td, int cc, int len) {} + +#else + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +static const char *dir_labels[] = { + "TD 0", + "OUT", + "IN", + "TD 1" +}; + +static const char *speed_labels[] = { + "Full", + "Low" +}; + +static const char *format_labels[] = { + "Control", + "ISO", + "Bulk", + "Interrupt" +}; + +static inline struct debug_stats *stats_for_urb(struct imx21 *imx21, + struct urb *urb) +{ + return usb_pipeisoc(urb->pipe) ? + &imx21->isoc_stats : &imx21->nonisoc_stats; +} + +static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->submitted++; +} + +static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st) +{ + if (st) + stats_for_urb(imx21, urb)->completed_failed++; + else + stats_for_urb(imx21, urb)->completed_ok++; +} + +static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->unlinked++; +} + +static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->queue_etd++; +} + +static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->queue_dmem++; +} + +static inline void debug_etd_allocated(struct imx21 *imx21) +{ + imx21->etd_usage.maximum = max( + ++(imx21->etd_usage.value), + imx21->etd_usage.maximum); +} + +static inline void debug_etd_freed(struct imx21 *imx21) +{ + imx21->etd_usage.value--; +} + +static inline void debug_dmem_allocated(struct imx21 *imx21, int size) +{ + imx21->dmem_usage.value += size; + imx21->dmem_usage.maximum = max( + imx21->dmem_usage.value, + imx21->dmem_usage.maximum); +} + +static inline void debug_dmem_freed(struct imx21 *imx21, int size) +{ + imx21->dmem_usage.value -= size; +} + + +static void debug_isoc_submitted(struct imx21 *imx21, + int frame, struct td *td) +{ + struct debug_isoc_trace *trace = &imx21->isoc_trace[ + imx21->isoc_trace_index++]; + + imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace); + trace->schedule_frame = td->frame; + trace->submit_frame = frame; + trace->request_len = td->len; + trace->td = td; +} + +static inline void debug_isoc_completed(struct imx21 *imx21, + int frame, struct td *td, int cc, int len) +{ + struct debug_isoc_trace *trace, *trace_failed; + int i; + int found = 0; + + trace = imx21->isoc_trace; + for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) { + if (trace->td == td) { + trace->done_frame = frame; + trace->done_len = len; + trace->cc = cc; + trace->td = NULL; + found = 1; + break; + } + } + + if (found && cc) { + trace_failed = &imx21->isoc_trace_failed[ + imx21->isoc_trace_index_failed++]; + + imx21->isoc_trace_index_failed %= ARRAY_SIZE( + imx21->isoc_trace_failed); + *trace_failed = *trace; + } +} + + +static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize) +{ + if (ep) + snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)", + ep->desc.bEndpointAddress, + usb_endpoint_type(&ep->desc), + ep); + else + snprintf(buf, bufsize, "none"); + return buf; +} + +static char *format_etd_dword0(u32 value, char *buf, int bufsize) +{ + snprintf(buf, bufsize, + "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d", + value & 0x7F, + (value >> DW0_ENDPNT) & 0x0F, + dir_labels[(value >> DW0_DIRECT) & 0x03], + speed_labels[(value >> DW0_SPEED) & 0x01], + format_labels[(value >> DW0_FORMAT) & 0x03], + (value >> DW0_HALTED) & 0x01); + return buf; +} + +static int debug_status_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + int etds_allocated = 0; + int etds_sw_busy = 0; + int etds_hw_busy = 0; + int dmem_blocks = 0; + int queued_for_etd = 0; + int queued_for_dmem = 0; + unsigned int dmem_bytes = 0; + int i; + struct etd_priv *etd; + u32 etd_enable_mask; + unsigned long flags; + struct imx21_dmem_area *dmem; + struct ep_priv *ep_priv; + + spin_lock_irqsave(&imx21->lock, flags); + + etd_enable_mask = readl(imx21->regs + USBH_ETDENSET); + for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { + if (etd->alloc) + etds_allocated++; + if (etd->urb) + etds_sw_busy++; + if (etd_enable_mask & (1<<i)) + etds_hw_busy++; + } + + list_for_each_entry(dmem, &imx21->dmem_list, list) { + dmem_bytes += dmem->size; + dmem_blocks++; + } + + list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue) + queued_for_etd++; + + list_for_each_entry(etd, &imx21->queue_for_dmem, queue) + queued_for_dmem++; + + spin_unlock_irqrestore(&imx21->lock, flags); + + seq_printf(s, + "Frame: %d\n" + "ETDs allocated: %d/%d (max=%d)\n" + "ETDs in use sw: %d\n" + "ETDs in use hw: %d\n" + "DMEM alocated: %d/%d (max=%d)\n" + "DMEM blocks: %d\n" + "Queued waiting for ETD: %d\n" + "Queued waiting for DMEM: %d\n", + readl(imx21->regs + USBH_FRMNUB) & 0xFFFF, + etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum, + etds_sw_busy, + etds_hw_busy, + dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum, + dmem_blocks, + queued_for_etd, + queued_for_dmem); + + return 0; +} + +static int debug_dmem_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + struct imx21_dmem_area *dmem; + unsigned long flags; + char ep_text[40]; + + spin_lock_irqsave(&imx21->lock, flags); + + list_for_each_entry(dmem, &imx21->dmem_list, list) + seq_printf(s, + "%04X: size=0x%X " + "ep=%s\n", + dmem->offset, dmem->size, + format_ep(dmem->ep, ep_text, sizeof(ep_text))); + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static int debug_etd_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + struct etd_priv *etd; + char buf[60]; + u32 dword; + int i, j; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { + int state = -1; + struct urb_priv *urb_priv; + if (etd->urb) { + urb_priv = etd->urb->hcpriv; + if (urb_priv) + state = urb_priv->state; + } + + seq_printf(s, + "etd_num: %d\n" + "ep: %s\n" + "alloc: %d\n" + "len: %d\n" + "busy sw: %d\n" + "busy hw: %d\n" + "urb state: %d\n" + "current urb: %p\n", + + i, + format_ep(etd->ep, buf, sizeof(buf)), + etd->alloc, + etd->len, + etd->urb != NULL, + (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0, + state, + etd->urb); + + for (j = 0; j < 4; j++) { + dword = etd_readl(imx21, i, j); + switch (j) { + case 0: + format_etd_dword0(dword, buf, sizeof(buf)); + break; + case 2: + snprintf(buf, sizeof(buf), + "cc=0X%02X", dword >> DW2_COMPCODE); + break; + default: + *buf = 0; + break; + } + seq_printf(s, + "dword %d: submitted=%08X cur=%08X [%s]\n", + j, + etd->submitted_dwords[j], + dword, + buf); + } + seq_printf(s, "\n"); + } + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static void debug_statistics_show_one(struct seq_file *s, + const char *name, struct debug_stats *stats) +{ + seq_printf(s, "%s:\n" + "submitted URBs: %lu\n" + "completed OK: %lu\n" + "completed failed: %lu\n" + "unlinked: %lu\n" + "queued for ETD: %lu\n" + "queued for DMEM: %lu\n\n", + name, + stats->submitted, + stats->completed_ok, + stats->completed_failed, + stats->unlinked, + stats->queue_etd, + stats->queue_dmem); +} + +static int debug_statistics_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats); + debug_statistics_show_one(s, "isoc", &imx21->isoc_stats); + seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks); + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static void debug_isoc_show_one(struct seq_file *s, + const char *name, int index, struct debug_isoc_trace *trace) +{ + seq_printf(s, "%s %d:\n" + "cc=0X%02X\n" + "scheduled frame %d (%d)\n" + "submittted frame %d (%d)\n" + "completed frame %d (%d)\n" + "requested length=%d\n" + "completed length=%d\n\n", + name, index, + trace->cc, + trace->schedule_frame, trace->schedule_frame & 0xFFFF, + trace->submit_frame, trace->submit_frame & 0xFFFF, + trace->done_frame, trace->done_frame & 0xFFFF, + trace->request_len, + trace->done_len); +} + +static int debug_isoc_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + struct debug_isoc_trace *trace; + unsigned long flags; + int i; + + spin_lock_irqsave(&imx21->lock, flags); + + trace = imx21->isoc_trace_failed; + for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++) + debug_isoc_show_one(s, "isoc failed", i, trace); + + trace = imx21->isoc_trace; + for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) + debug_isoc_show_one(s, "isoc", i, trace); + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static int debug_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_status_show, inode->i_private); +} + +static int debug_dmem_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_dmem_show, inode->i_private); +} + +static int debug_etd_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_etd_show, inode->i_private); +} + +static int debug_statistics_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_statistics_show, inode->i_private); +} + +static int debug_isoc_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_isoc_show, inode->i_private); +} + +static const struct file_operations debug_status_fops = { + .open = debug_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_dmem_fops = { + .open = debug_dmem_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_etd_fops = { + .open = debug_etd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_statistics_fops = { + .open = debug_statistics_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_isoc_fops = { + .open = debug_isoc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void create_debug_files(struct imx21 *imx21) +{ + imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL); + if (!imx21->debug_root) + goto failed_create_rootdir; + + if (!debugfs_create_file("status", S_IRUGO, + imx21->debug_root, imx21, &debug_status_fops)) + goto failed_create; + + if (!debugfs_create_file("dmem", S_IRUGO, + imx21->debug_root, imx21, &debug_dmem_fops)) + goto failed_create; + + if (!debugfs_create_file("etd", S_IRUGO, + imx21->debug_root, imx21, &debug_etd_fops)) + goto failed_create; + + if (!debugfs_create_file("statistics", S_IRUGO, + imx21->debug_root, imx21, &debug_statistics_fops)) + goto failed_create; + + if (!debugfs_create_file("isoc", S_IRUGO, + imx21->debug_root, imx21, &debug_isoc_fops)) + goto failed_create; + + return; + +failed_create: + debugfs_remove_recursive(imx21->debug_root); + +failed_create_rootdir: + imx21->debug_root = NULL; +} + + +static void remove_debug_files(struct imx21 *imx21) +{ + if (imx21->debug_root) { + debugfs_remove_recursive(imx21->debug_root); + imx21->debug_root = NULL; + } +} + +#endif + diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c new file mode 100644 index 00000000000..ca0e98d8e1f --- /dev/null +++ b/drivers/usb/host/imx21-hcd.c @@ -0,0 +1,1790 @@ +/* + * USB Host Controller Driver for IMX21 + * + * Copyright (C) 2006 Loping Dog Embedded Systems + * Copyright (C) 2009 Martin Fuzzey + * Originally written by Jay Monkman <jtm@lopingdog.com> + * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + + /* + * The i.MX21 USB hardware contains + * * 32 transfer descriptors (called ETDs) + * * 4Kb of Data memory + * + * The data memory is shared between the host and fuction controlers + * (but this driver only supports the host controler) + * + * So setting up a transfer involves: + * * Allocating a ETD + * * Fill in ETD with appropriate information + * * Allocating data memory (and putting the offset in the ETD) + * * Activate the ETD + * * Get interrupt when done. + * + * An ETD is assigned to each active endpoint. + * + * Low resource (ETD and Data memory) situations are handled differently for + * isochronous and non insosynchronous transactions : + * + * Non ISOC transfers are queued if either ETDs or Data memory are unavailable + * + * ISOC transfers use 2 ETDs per endpoint to achieve double buffering. + * They allocate both ETDs and Data memory during URB submission + * (and fail if unavailable). + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/usb.h> +#include <linux/usb/hcd.h> + +#include "imx21-hcd.h" + +#ifdef DEBUG +#define DEBUG_LOG_FRAME(imx21, etd, event) \ + (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB) +#else +#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) +#endif + +static const char hcd_name[] = "imx21-hcd"; + +static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd) +{ + return (struct imx21 *)hcd->hcd_priv; +} + + +/* =========================================== */ +/* Hardware access helpers */ +/* =========================================== */ + +static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + writel(readl(reg) | mask, reg); +} + +static inline void clear_register_bits(struct imx21 *imx21, + u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + writel(readl(reg) & ~mask, reg); +} + +static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + + if (readl(reg) & mask) + writel(mask, reg); +} + +static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + + if (!(readl(reg) & mask)) + writel(mask, reg); +} + +static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value) +{ + writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword)); +} + +static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword) +{ + return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword)); +} + +static inline int wrap_frame(int counter) +{ + return counter & 0xFFFF; +} + +static inline int frame_after(int frame, int after) +{ + /* handle wrapping like jiffies time_afer */ + return (s16)((s16)after - (s16)frame) < 0; +} + +static int imx21_hc_get_frame(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + + return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); +} + + +#include "imx21-dbg.c" + +/* =========================================== */ +/* ETD management */ +/* =========================================== */ + +static int alloc_etd(struct imx21 *imx21) +{ + int i; + struct etd_priv *etd = imx21->etd; + + for (i = 0; i < USB_NUM_ETD; i++, etd++) { + if (etd->alloc == 0) { + memset(etd, 0, sizeof(imx21->etd[0])); + etd->alloc = 1; + debug_etd_allocated(imx21); + return i; + } + } + return -1; +} + +static void disactivate_etd(struct imx21 *imx21, int num) +{ + int etd_mask = (1 << num); + struct etd_priv *etd = &imx21->etd[num]; + + writel(etd_mask, imx21->regs + USBH_ETDENCLR); + clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask); + writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR); + clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); + + etd->active_count = 0; + + DEBUG_LOG_FRAME(imx21, etd, disactivated); +} + +static void reset_etd(struct imx21 *imx21, int num) +{ + struct etd_priv *etd = imx21->etd + num; + int i; + + disactivate_etd(imx21, num); + + for (i = 0; i < 4; i++) + etd_writel(imx21, num, i, 0); + etd->urb = NULL; + etd->ep = NULL; + etd->td = NULL;; +} + +static void free_etd(struct imx21 *imx21, int num) +{ + if (num < 0) + return; + + if (num >= USB_NUM_ETD) { + dev_err(imx21->dev, "BAD etd=%d!\n", num); + return; + } + if (imx21->etd[num].alloc == 0) { + dev_err(imx21->dev, "ETD %d already free!\n", num); + return; + } + + debug_etd_freed(imx21); + reset_etd(imx21, num); + memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); +} + + +static void setup_etd_dword0(struct imx21 *imx21, + int etd_num, struct urb *urb, u8 dir, u16 maxpacket) +{ + etd_writel(imx21, etd_num, 0, + ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS | + ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) | + ((u32) dir << DW0_DIRECT) | + ((u32) ((urb->dev->speed == USB_SPEED_LOW) ? + 1 : 0) << DW0_SPEED) | + ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) | + ((u32) maxpacket << DW0_MAXPKTSIZ)); +} + +static void activate_etd(struct imx21 *imx21, + int etd_num, dma_addr_t dma, u8 dir) +{ + u32 etd_mask = 1 << etd_num; + struct etd_priv *etd = &imx21->etd[etd_num]; + + clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); + set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); + clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); + + if (dma) { + set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); + clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); + clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); + writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); + set_register_bits(imx21, USB_ETDDMAEN, etd_mask); + } else { + if (dir != TD_DIR_IN) { + /* need to set for ZLP */ + set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); + } + } + + DEBUG_LOG_FRAME(imx21, etd, activated); + +#ifdef DEBUG + if (!etd->active_count) { + int i; + etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); + etd->disactivated_frame = -1; + etd->last_int_frame = -1; + etd->last_req_frame = -1; + + for (i = 0; i < 4; i++) + etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); + } +#endif + + etd->active_count = 1; + writel(etd_mask, imx21->regs + USBH_ETDENSET); +} + +/* =========================================== */ +/* Data memory management */ +/* =========================================== */ + +static int alloc_dmem(struct imx21 *imx21, unsigned int size, + struct usb_host_endpoint *ep) +{ + unsigned int offset = 0; + struct imx21_dmem_area *area; + struct imx21_dmem_area *tmp; + + size += (~size + 1) & 0x3; /* Round to 4 byte multiple */ + + if (size > DMEM_SIZE) { + dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n", + size, DMEM_SIZE); + return -EINVAL; + } + + list_for_each_entry(tmp, &imx21->dmem_list, list) { + if ((size + offset) < offset) + goto fail; + if ((size + offset) <= tmp->offset) + break; + offset = tmp->size + tmp->offset; + if ((offset + size) > DMEM_SIZE) + goto fail; + } + + area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC); + if (area == NULL) + return -ENOMEM; + + area->ep = ep; + area->offset = offset; + area->size = size; + list_add_tail(&area->list, &tmp->list); + debug_dmem_allocated(imx21, size); + return offset; + +fail: + return -ENOMEM; +} + +/* Memory now available for a queued ETD - activate it */ +static void activate_queued_etd(struct imx21 *imx21, + struct etd_priv *etd, u32 dmem_offset) +{ + struct urb_priv *urb_priv = etd->urb->hcpriv; + int etd_num = etd - &imx21->etd[0]; + u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD; + u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03; + + dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n", + etd_num); + etd_writel(imx21, etd_num, 1, + ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset); + + urb_priv->active = 1; + activate_etd(imx21, etd_num, etd->dma_handle, dir); +} + +static void free_dmem(struct imx21 *imx21, int offset) +{ + struct imx21_dmem_area *area; + struct etd_priv *etd, *tmp; + int found = 0; + + list_for_each_entry(area, &imx21->dmem_list, list) { + if (area->offset == offset) { + debug_dmem_freed(imx21, area->size); + list_del(&area->list); + kfree(area); + found = 1; + break; + } + } + + if (!found) { + dev_err(imx21->dev, + "Trying to free unallocated DMEM %d\n", offset); + return; + } + + /* Try again to allocate memory for anything we've queued */ + list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { + offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); + if (offset >= 0) { + list_del(&etd->queue); + activate_queued_etd(imx21, etd, (u32)offset); + } + } +} + +static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) +{ + struct imx21_dmem_area *area, *tmp; + + list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) { + if (area->ep == ep) { + dev_err(imx21->dev, + "Active DMEM %d for disabled ep=%p\n", + area->offset, ep); + list_del(&area->list); + kfree(area); + } + } +} + + +/* =========================================== */ +/* End handling */ +/* =========================================== */ +static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb); + +/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ +static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) +{ + int etd_num; + int i; + + for (i = 0; i < NUM_ISO_ETDS; i++) { + etd_num = ep_priv->etd[i]; + if (etd_num < 0) + continue; + + ep_priv->etd[i] = -1; + if (list_empty(&imx21->queue_for_etd)) { + free_etd(imx21, etd_num); + continue; + } + + dev_dbg(imx21->dev, + "assigning idle etd %d for queued request\n", etd_num); + ep_priv = list_first_entry(&imx21->queue_for_etd, + struct ep_priv, queue); + list_del(&ep_priv->queue); + reset_etd(imx21, etd_num); + ep_priv->waiting_etd = 0; + ep_priv->etd[i] = etd_num; + + if (list_empty(&ep_priv->ep->urb_list)) { + dev_err(imx21->dev, "No urb for queued ep!\n"); + continue; + } + schedule_nonisoc_etd(imx21, list_first_entry( + &ep_priv->ep->urb_list, struct urb, urb_list)); + } +} + +static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status) +__releases(imx21->lock) +__acquires(imx21->lock) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct ep_priv *ep_priv = urb->ep->hcpriv; + struct urb_priv *urb_priv = urb->hcpriv; + + debug_urb_completed(imx21, urb, status); + dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status); + + kfree(urb_priv->isoc_td); + kfree(urb->hcpriv); + urb->hcpriv = NULL; + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock(&imx21->lock); + usb_hcd_giveback_urb(hcd, urb, status); + spin_lock(&imx21->lock); + if (list_empty(&ep_priv->ep->urb_list)) + ep_idle(imx21, ep_priv); +} + +/* =========================================== */ +/* ISOC Handling ... */ +/* =========================================== */ + +static void schedule_isoc_etds(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct ep_priv *ep_priv = ep->hcpriv; + struct etd_priv *etd; + struct urb_priv *urb_priv; + struct td *td; + int etd_num; + int i; + int cur_frame; + u8 dir; + + for (i = 0; i < NUM_ISO_ETDS; i++) { +too_late: + if (list_empty(&ep_priv->td_list)) + break; + + etd_num = ep_priv->etd[i]; + if (etd_num < 0) + break; + + etd = &imx21->etd[etd_num]; + if (etd->urb) + continue; + + td = list_entry(ep_priv->td_list.next, struct td, list); + list_del(&td->list); + urb_priv = td->urb->hcpriv; + + cur_frame = imx21_hc_get_frame(hcd); + if (frame_after(cur_frame, td->frame)) { + dev_dbg(imx21->dev, "isoc too late frame %d > %d\n", + cur_frame, td->frame); + urb_priv->isoc_status = -EXDEV; + td->urb->iso_frame_desc[ + td->isoc_index].actual_length = 0; + td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV; + if (--urb_priv->isoc_remaining == 0) + urb_done(hcd, td->urb, urb_priv->isoc_status); + goto too_late; + } + + urb_priv->active = 1; + etd->td = td; + etd->ep = td->ep; + etd->urb = td->urb; + etd->len = td->len; + + debug_isoc_submitted(imx21, cur_frame, td); + + dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN; + setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); + etd_writel(imx21, etd_num, 1, etd->dmem_offset); + etd_writel(imx21, etd_num, 2, + (TD_NOTACCESSED << DW2_COMPCODE) | + ((td->frame & 0xFFFF) << DW2_STARTFRM)); + etd_writel(imx21, etd_num, 3, + (TD_NOTACCESSED << DW3_COMPCODE0) | + (td->len << DW3_PKTLEN0)); + + activate_etd(imx21, etd_num, td->data, dir); + } +} + +static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + int etd_mask = 1 << etd_num; + struct urb_priv *urb_priv = urb->hcpriv; + struct etd_priv *etd = imx21->etd + etd_num; + struct td *td = etd->td; + struct usb_host_endpoint *ep = etd->ep; + int isoc_index = td->isoc_index; + unsigned int pipe = urb->pipe; + int dir_in = usb_pipein(pipe); + int cc; + int bytes_xfrd; + + disactivate_etd(imx21, etd_num); + + cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf; + bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff; + + /* Input doesn't always fill the buffer, don't generate an error + * when this happens. + */ + if (dir_in && (cc == TD_DATAUNDERRUN)) + cc = TD_CC_NOERROR; + + if (cc == TD_NOTACCESSED) + bytes_xfrd = 0; + + debug_isoc_completed(imx21, + imx21_hc_get_frame(hcd), td, cc, bytes_xfrd); + if (cc) { + urb_priv->isoc_status = -EXDEV; + dev_dbg(imx21->dev, + "bad iso cc=0x%X frame=%d sched frame=%d " + "cnt=%d len=%d urb=%p etd=%d index=%d\n", + cc, imx21_hc_get_frame(hcd), td->frame, + bytes_xfrd, td->len, urb, etd_num, isoc_index); + } + + if (dir_in) + clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + + urb->actual_length += bytes_xfrd; + urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; + urb->iso_frame_desc[isoc_index].status = cc_to_error[cc]; + + etd->td = NULL; + etd->urb = NULL; + etd->ep = NULL; + + if (--urb_priv->isoc_remaining == 0) + urb_done(hcd, urb, urb_priv->isoc_status); + + schedule_isoc_etds(hcd, ep); +} + +static struct ep_priv *alloc_isoc_ep( + struct imx21 *imx21, struct usb_host_endpoint *ep) +{ + struct ep_priv *ep_priv; + int i; + + ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); + if (ep_priv == NULL) + return NULL; + + /* Allocate the ETDs */ + for (i = 0; i < NUM_ISO_ETDS; i++) { + ep_priv->etd[i] = alloc_etd(imx21); + if (ep_priv->etd[i] < 0) { + int j; + dev_err(imx21->dev, "isoc: Couldn't allocate etd\n"); + for (j = 0; j < i; j++) + free_etd(imx21, ep_priv->etd[j]); + goto alloc_etd_failed; + } + imx21->etd[ep_priv->etd[i]].ep = ep; + } + + INIT_LIST_HEAD(&ep_priv->td_list); + ep_priv->ep = ep; + ep->hcpriv = ep_priv; + return ep_priv; + +alloc_etd_failed: + kfree(ep_priv); + return NULL; +} + +static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, + struct usb_host_endpoint *ep, + struct urb *urb, gfp_t mem_flags) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct urb_priv *urb_priv; + unsigned long flags; + struct ep_priv *ep_priv; + struct td *td = NULL; + int i; + int ret; + int cur_frame; + u16 maxpacket; + + urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); + if (urb_priv == NULL) + return -ENOMEM; + + urb_priv->isoc_td = kzalloc( + sizeof(struct td) * urb->number_of_packets, mem_flags); + if (urb_priv->isoc_td == NULL) { + ret = -ENOMEM; + goto alloc_td_failed; + } + + spin_lock_irqsave(&imx21->lock, flags); + + if (ep->hcpriv == NULL) { + ep_priv = alloc_isoc_ep(imx21, ep); + if (ep_priv == NULL) { + ret = -ENOMEM; + goto alloc_ep_failed; + } + } else { + ep_priv = ep->hcpriv; + } + + ret = usb_hcd_link_urb_to_ep(hcd, urb); + if (ret) + goto link_failed; + + urb->status = -EINPROGRESS; + urb->actual_length = 0; + urb->error_count = 0; + urb->hcpriv = urb_priv; + urb_priv->ep = ep; + + /* allocate data memory for largest packets if not already done */ + maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); + for (i = 0; i < NUM_ISO_ETDS; i++) { + struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; + + if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { + /* not sure if this can really occur.... */ + dev_err(imx21->dev, "increasing isoc buffer %d->%d\n", + etd->dmem_size, maxpacket); + ret = -EMSGSIZE; + goto alloc_dmem_failed; + } + + if (etd->dmem_size == 0) { + etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); + if (etd->dmem_offset < 0) { + dev_dbg(imx21->dev, "failed alloc isoc dmem\n"); + ret = -EAGAIN; + goto alloc_dmem_failed; + } + etd->dmem_size = maxpacket; + } + } + + /* calculate frame */ + cur_frame = imx21_hc_get_frame(hcd); + if (urb->transfer_flags & URB_ISO_ASAP) { + if (list_empty(&ep_priv->td_list)) + urb->start_frame = cur_frame + 5; + else + urb->start_frame = list_entry( + ep_priv->td_list.prev, + struct td, list)->frame + urb->interval; + } + urb->start_frame = wrap_frame(urb->start_frame); + if (frame_after(cur_frame, urb->start_frame)) { + dev_dbg(imx21->dev, + "enqueue: adjusting iso start %d (cur=%d) asap=%d\n", + urb->start_frame, cur_frame, + (urb->transfer_flags & URB_ISO_ASAP) != 0); + urb->start_frame = wrap_frame(cur_frame + 1); + } + + /* set up transfers */ + td = urb_priv->isoc_td; + for (i = 0; i < urb->number_of_packets; i++, td++) { + td->ep = ep; + td->urb = urb; + td->len = urb->iso_frame_desc[i].length; + td->isoc_index = i; + td->frame = wrap_frame(urb->start_frame + urb->interval * i); + td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; + list_add_tail(&td->list, &ep_priv->td_list); + } + + urb_priv->isoc_remaining = urb->number_of_packets; + dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n", + urb->number_of_packets, urb->start_frame, td->frame); + + debug_urb_submitted(imx21, urb); + schedule_isoc_etds(hcd, ep); + + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; + +alloc_dmem_failed: + usb_hcd_unlink_urb_from_ep(hcd, urb); + +link_failed: +alloc_ep_failed: + spin_unlock_irqrestore(&imx21->lock, flags); + kfree(urb_priv->isoc_td); + +alloc_td_failed: + kfree(urb_priv); + return ret; +} + +static void dequeue_isoc_urb(struct imx21 *imx21, + struct urb *urb, struct ep_priv *ep_priv) +{ + struct urb_priv *urb_priv = urb->hcpriv; + struct td *td, *tmp; + int i; + + if (urb_priv->active) { + for (i = 0; i < NUM_ISO_ETDS; i++) { + int etd_num = ep_priv->etd[i]; + if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { + struct etd_priv *etd = imx21->etd + etd_num; + + reset_etd(imx21, etd_num); + if (etd->dmem_size) + free_dmem(imx21, etd->dmem_offset); + etd->dmem_size = 0; + } + } + } + + list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) { + if (td->urb == urb) { + dev_vdbg(imx21->dev, "removing td %p\n", td); + list_del(&td->list); + } + } +} + +/* =========================================== */ +/* NON ISOC Handling ... */ +/* =========================================== */ + +static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) +{ + unsigned int pipe = urb->pipe; + struct urb_priv *urb_priv = urb->hcpriv; + struct ep_priv *ep_priv = urb_priv->ep->hcpriv; + int state = urb_priv->state; + int etd_num = ep_priv->etd[0]; + struct etd_priv *etd; + int dmem_offset; + u32 count; + u16 etd_buf_size; + u16 maxpacket; + u8 dir; + u8 bufround; + u8 datatoggle; + u8 interval = 0; + u8 relpolpos = 0; + + if (etd_num < 0) { + dev_err(imx21->dev, "No valid ETD\n"); + return; + } + if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num)) + dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num); + + etd = &imx21->etd[etd_num]; + maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe)); + if (!maxpacket) + maxpacket = 8; + + if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { + if (state == US_CTRL_SETUP) { + dir = TD_DIR_SETUP; + etd->dma_handle = urb->setup_dma; + bufround = 0; + count = 8; + datatoggle = TD_TOGGLE_DATA0; + } else { /* US_CTRL_ACK */ + dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; + etd->dma_handle = urb->transfer_dma; + bufround = 0; + count = 0; + datatoggle = TD_TOGGLE_DATA1; + } + } else { + dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; + bufround = (dir == TD_DIR_IN) ? 1 : 0; + etd->dma_handle = urb->transfer_dma; + if (usb_pipebulk(pipe) && (state == US_BULK0)) + count = 0; + else + count = urb->transfer_buffer_length; + + if (usb_pipecontrol(pipe)) { + datatoggle = TD_TOGGLE_DATA1; + } else { + if (usb_gettoggle( + urb->dev, + usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe))) + datatoggle = TD_TOGGLE_DATA1; + else + datatoggle = TD_TOGGLE_DATA0; + } + } + + etd->urb = urb; + etd->ep = urb_priv->ep; + etd->len = count; + + if (usb_pipeint(pipe)) { + interval = urb->interval; + relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff; + } + + /* Write ETD to device memory */ + setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket); + + etd_writel(imx21, etd_num, 2, + (u32) interval << DW2_POLINTERV | + ((u32) relpolpos << DW2_RELPOLPOS) | + ((u32) dir << DW2_DIRPID) | + ((u32) bufround << DW2_BUFROUND) | + ((u32) datatoggle << DW2_DATATOG) | + ((u32) TD_NOTACCESSED << DW2_COMPCODE)); + + /* DMA will always transfer buffer size even if TOBYCNT in DWORD3 + is smaller. Make sure we don't overrun the buffer! + */ + if (count && count < maxpacket) + etd_buf_size = count; + else + etd_buf_size = maxpacket; + + etd_writel(imx21, etd_num, 3, + ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count); + + if (!count) + etd->dma_handle = 0; + + /* allocate x and y buffer space at once */ + etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; + dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); + if (dmem_offset < 0) { + /* Setup everything we can in HW and update when we get DMEM */ + etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); + + dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num); + debug_urb_queued_for_dmem(imx21, urb); + list_add_tail(&etd->queue, &imx21->queue_for_dmem); + return; + } + + etd_writel(imx21, etd_num, 1, + (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | + (u32) dmem_offset); + + urb_priv->active = 1; + + /* enable the ETD to kick off transfer */ + dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", + etd_num, count, dir != TD_DIR_IN ? "out" : "in"); + activate_etd(imx21, etd_num, etd->dma_handle, dir); + +} + +static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct etd_priv *etd = &imx21->etd[etd_num]; + u32 etd_mask = 1 << etd_num; + struct urb_priv *urb_priv = urb->hcpriv; + int dir; + u16 xbufaddr; + int cc; + u32 bytes_xfrd; + int etd_done; + + disactivate_etd(imx21, etd_num); + + dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; + xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff; + cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; + bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); + + /* save toggle carry */ + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe), + (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1); + + if (dir == TD_DIR_IN) { + clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); + } + free_dmem(imx21, xbufaddr); + + urb->error_count = 0; + if (!(urb->transfer_flags & URB_SHORT_NOT_OK) + && (cc == TD_DATAUNDERRUN)) + cc = TD_CC_NOERROR; + + if (cc != 0) + dev_vdbg(imx21->dev, "cc is 0x%x\n", cc); + + etd_done = (cc_to_error[cc] != 0); /* stop if error */ + + switch (usb_pipetype(urb->pipe)) { + case PIPE_CONTROL: + switch (urb_priv->state) { + case US_CTRL_SETUP: + if (urb->transfer_buffer_length > 0) + urb_priv->state = US_CTRL_DATA; + else + urb_priv->state = US_CTRL_ACK; + break; + case US_CTRL_DATA: + urb->actual_length += bytes_xfrd; + urb_priv->state = US_CTRL_ACK; + break; + case US_CTRL_ACK: + etd_done = 1; + break; + default: + dev_err(imx21->dev, + "Invalid pipe state %d\n", urb_priv->state); + etd_done = 1; + break; + } + break; + + case PIPE_BULK: + urb->actual_length += bytes_xfrd; + if ((urb_priv->state == US_BULK) + && (urb->transfer_flags & URB_ZERO_PACKET) + && urb->transfer_buffer_length > 0 + && ((urb->transfer_buffer_length % + usb_maxpacket(urb->dev, urb->pipe, + usb_pipeout(urb->pipe))) == 0)) { + /* need a 0-packet */ + urb_priv->state = US_BULK0; + } else { + etd_done = 1; + } + break; + + case PIPE_INTERRUPT: + urb->actual_length += bytes_xfrd; + etd_done = 1; + break; + } + + if (!etd_done) { + dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); + schedule_nonisoc_etd(imx21, urb); + } else { + struct usb_host_endpoint *ep = urb->ep; + + urb_done(hcd, urb, cc_to_error[cc]); + etd->urb = NULL; + + if (!list_empty(&ep->urb_list)) { + urb = list_first_entry(&ep->urb_list, + struct urb, urb_list); + dev_vdbg(imx21->dev, "next URB %p\n", urb); + schedule_nonisoc_etd(imx21, urb); + } + } +} + +static struct ep_priv *alloc_ep(void) +{ + int i; + struct ep_priv *ep_priv; + + ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); + if (!ep_priv) + return NULL; + + for (i = 0; i < NUM_ISO_ETDS; ++i) + ep_priv->etd[i] = -1; + + return ep_priv; +} + +static int imx21_hc_urb_enqueue(struct usb_hcd *hcd, + struct urb *urb, gfp_t mem_flags) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct usb_host_endpoint *ep = urb->ep; + struct urb_priv *urb_priv; + struct ep_priv *ep_priv; + struct etd_priv *etd; + int ret; + unsigned long flags; + int new_ep = 0; + + dev_vdbg(imx21->dev, + "enqueue urb=%p ep=%p len=%d " + "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n", + urb, ep, + urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma, + urb->setup_packet, urb->setup_dma); + + if (usb_pipeisoc(urb->pipe)) + return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags); + + urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); + if (!urb_priv) + return -ENOMEM; + + spin_lock_irqsave(&imx21->lock, flags); + + ep_priv = ep->hcpriv; + if (ep_priv == NULL) { + ep_priv = alloc_ep(); + if (!ep_priv) { + ret = -ENOMEM; + goto failed_alloc_ep; + } + ep->hcpriv = ep_priv; + ep_priv->ep = ep; + new_ep = 1; + } + + ret = usb_hcd_link_urb_to_ep(hcd, urb); + if (ret) + goto failed_link; + + urb->status = -EINPROGRESS; + urb->actual_length = 0; + urb->error_count = 0; + urb->hcpriv = urb_priv; + urb_priv->ep = ep; + + switch (usb_pipetype(urb->pipe)) { + case PIPE_CONTROL: + urb_priv->state = US_CTRL_SETUP; + break; + case PIPE_BULK: + urb_priv->state = US_BULK; + break; + } + + debug_urb_submitted(imx21, urb); + if (ep_priv->etd[0] < 0) { + if (ep_priv->waiting_etd) { + dev_dbg(imx21->dev, + "no ETD available already queued %p\n", + ep_priv); + debug_urb_queued_for_etd(imx21, urb); + goto out; + } + ep_priv->etd[0] = alloc_etd(imx21); + if (ep_priv->etd[0] < 0) { + dev_dbg(imx21->dev, + "no ETD available queueing %p\n", ep_priv); + debug_urb_queued_for_etd(imx21, urb); + list_add_tail(&ep_priv->queue, &imx21->queue_for_etd); + ep_priv->waiting_etd = 1; + goto out; + } + } + + /* Schedule if no URB already active for this endpoint */ + etd = &imx21->etd[ep_priv->etd[0]]; + if (etd->urb == NULL) { + DEBUG_LOG_FRAME(imx21, etd, last_req); + schedule_nonisoc_etd(imx21, urb); + } + +out: + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; + +failed_link: +failed_alloc_ep: + spin_unlock_irqrestore(&imx21->lock, flags); + kfree(urb_priv); + return ret; +} + +static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, + int status) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + struct usb_host_endpoint *ep; + struct ep_priv *ep_priv; + struct urb_priv *urb_priv = urb->hcpriv; + int ret = -EINVAL; + + dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n", + urb, usb_pipeisoc(urb->pipe), status); + + spin_lock_irqsave(&imx21->lock, flags); + + ret = usb_hcd_check_unlink_urb(hcd, urb, status); + if (ret) + goto fail; + ep = urb_priv->ep; + ep_priv = ep->hcpriv; + + debug_urb_unlinked(imx21, urb); + + if (usb_pipeisoc(urb->pipe)) { + dequeue_isoc_urb(imx21, urb, ep_priv); + schedule_isoc_etds(hcd, ep); + } else if (urb_priv->active) { + int etd_num = ep_priv->etd[0]; + if (etd_num != -1) { + disactivate_etd(imx21, etd_num); + free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff); + imx21->etd[etd_num].urb = NULL; + } + } + + urb_done(hcd, urb, status); + + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; + +fail: + spin_unlock_irqrestore(&imx21->lock, flags); + return ret; +} + +/* =========================================== */ +/* Interrupt dispatch */ +/* =========================================== */ + +static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof) +{ + int etd_num; + int enable_sof_int = 0; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) { + u32 etd_mask = 1 << etd_num; + u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask; + u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask; + struct etd_priv *etd = &imx21->etd[etd_num]; + + + if (done) { + DEBUG_LOG_FRAME(imx21, etd, last_int); + } else { +/* + * Kludge warning! + * + * When multiple transfers are using the bus we sometimes get into a state + * where the transfer has completed (the CC field of the ETD is != 0x0F), + * the ETD has self disabled but the ETDDONESTAT flag is not set + * (and hence no interrupt occurs). + * This causes the transfer in question to hang. + * The kludge below checks for this condition at each SOF and processes any + * blocked ETDs (after an arbitary 10 frame wait) + * + * With a single active transfer the usbtest test suite will run for days + * without the kludge. + * With other bus activity (eg mass storage) even just test1 will hang without + * the kludge. + */ + u32 dword0; + int cc; + + if (etd->active_count && !enabled) /* suspicious... */ + enable_sof_int = 1; + + if (!sof || enabled || !etd->active_count) + continue; + + cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE; + if (cc == TD_NOTACCESSED) + continue; + + if (++etd->active_count < 10) + continue; + + dword0 = etd_readl(imx21, etd_num, 0); + dev_dbg(imx21->dev, + "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n", + etd_num, dword0 & 0x7F, + (dword0 >> DW0_ENDPNT) & 0x0F, + cc); + +#ifdef DEBUG + dev_dbg(imx21->dev, + "frame: act=%d disact=%d" + " int=%d req=%d cur=%d\n", + etd->activated_frame, + etd->disactivated_frame, + etd->last_int_frame, + etd->last_req_frame, + readl(imx21->regs + USBH_FRMNUB)); + imx21->debug_unblocks++; +#endif + etd->active_count = 0; +/* End of kludge */ + } + + if (etd->ep == NULL || etd->urb == NULL) { + dev_dbg(imx21->dev, + "Interrupt for unexpected etd %d" + " ep=%p urb=%p\n", + etd_num, etd->ep, etd->urb); + disactivate_etd(imx21, etd_num); + continue; + } + + if (usb_pipeisoc(etd->urb->pipe)) + isoc_etd_done(hcd, etd->urb, etd_num); + else + nonisoc_etd_done(hcd, etd->urb, etd_num); + } + + /* only enable SOF interrupt if it may be needed for the kludge */ + if (enable_sof_int) + set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); + else + clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); + + + spin_unlock_irqrestore(&imx21->lock, flags); +} + +static irqreturn_t imx21_irq(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + u32 ints = readl(imx21->regs + USBH_SYSISR); + + if (ints & USBH_SYSIEN_HERRINT) + dev_dbg(imx21->dev, "Scheduling error\n"); + + if (ints & USBH_SYSIEN_SORINT) + dev_dbg(imx21->dev, "Scheduling overrun\n"); + + if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT)) + process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT); + + writel(ints, imx21->regs + USBH_SYSISR); + return IRQ_HANDLED; +} + +static void imx21_hc_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + struct ep_priv *ep_priv; + int i; + + if (ep == NULL) + return; + + spin_lock_irqsave(&imx21->lock, flags); + ep_priv = ep->hcpriv; + dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv); + + if (!list_empty(&ep->urb_list)) + dev_dbg(imx21->dev, "ep's URB list is not empty\n"); + + if (ep_priv != NULL) { + for (i = 0; i < NUM_ISO_ETDS; i++) { + if (ep_priv->etd[i] > -1) + dev_dbg(imx21->dev, "free etd %d for disable\n", + ep_priv->etd[i]); + + free_etd(imx21, ep_priv->etd[i]); + } + kfree(ep_priv); + ep->hcpriv = NULL; + } + + for (i = 0; i < USB_NUM_ETD; i++) { + if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { + dev_err(imx21->dev, + "Active etd %d for disabled ep=%p!\n", i, ep); + free_etd(imx21, i); + } + } + free_epdmem(imx21, ep); + spin_unlock_irqrestore(&imx21->lock, flags); +} + +/* =========================================== */ +/* Hub handling */ +/* =========================================== */ + +static int get_hub_descriptor(struct usb_hcd *hcd, + struct usb_hub_descriptor *desc) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + desc->bDescriptorType = 0x29; /* HUB descriptor */ + desc->bHubContrCurrent = 0; + + desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA) + & USBH_ROOTHUBA_NDNSTMPRT_MASK; + desc->bDescLength = 9; + desc->bPwrOn2PwrGood = 0; + desc->wHubCharacteristics = (__force __u16) cpu_to_le16( + 0x0002 | /* No power switching */ + 0x0010 | /* No over current protection */ + 0); + + desc->bitmap[0] = 1 << 1; + desc->bitmap[1] = ~0; + return 0; +} + +static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + int ports; + int changed = 0; + int i; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + ports = readl(imx21->regs + USBH_ROOTHUBA) + & USBH_ROOTHUBA_NDNSTMPRT_MASK; + if (ports > 7) { + ports = 7; + dev_err(imx21->dev, "ports %d > 7\n", ports); + } + for (i = 0; i < ports; i++) { + if (readl(imx21->regs + USBH_PORTSTAT(i)) & + (USBH_PORTSTAT_CONNECTSC | + USBH_PORTSTAT_PRTENBLSC | + USBH_PORTSTAT_PRTSTATSC | + USBH_PORTSTAT_OVRCURIC | + USBH_PORTSTAT_PRTRSTSC)) { + + changed = 1; + buf[0] |= 1 << (i + 1); + } + } + spin_unlock_irqrestore(&imx21->lock, flags); + + if (changed) + dev_info(imx21->dev, "Hub status changed\n"); + return changed; +} + +static int imx21_hc_hub_control(struct usb_hcd *hcd, + u16 typeReq, + u16 wValue, u16 wIndex, char *buf, u16 wLength) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + int rc = 0; + u32 status_write = 0; + + switch (typeReq) { + case ClearHubFeature: + dev_dbg(imx21->dev, "ClearHubFeature\n"); + switch (wValue) { + case C_HUB_OVER_CURRENT: + dev_dbg(imx21->dev, " OVER_CURRENT\n"); + break; + case C_HUB_LOCAL_POWER: + dev_dbg(imx21->dev, " LOCAL_POWER\n"); + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + break; + + case ClearPortFeature: + dev_dbg(imx21->dev, "ClearPortFeature\n"); + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + dev_dbg(imx21->dev, " ENABLE\n"); + status_write = USBH_PORTSTAT_CURCONST; + break; + case USB_PORT_FEAT_SUSPEND: + dev_dbg(imx21->dev, " SUSPEND\n"); + status_write = USBH_PORTSTAT_PRTOVRCURI; + break; + case USB_PORT_FEAT_POWER: + dev_dbg(imx21->dev, " POWER\n"); + status_write = USBH_PORTSTAT_LSDEVCON; + break; + case USB_PORT_FEAT_C_ENABLE: + dev_dbg(imx21->dev, " C_ENABLE\n"); + status_write = USBH_PORTSTAT_PRTENBLSC; + break; + case USB_PORT_FEAT_C_SUSPEND: + dev_dbg(imx21->dev, " C_SUSPEND\n"); + status_write = USBH_PORTSTAT_PRTSTATSC; + break; + case USB_PORT_FEAT_C_CONNECTION: + dev_dbg(imx21->dev, " C_CONNECTION\n"); + status_write = USBH_PORTSTAT_CONNECTSC; + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + dev_dbg(imx21->dev, " C_OVER_CURRENT\n"); + status_write = USBH_PORTSTAT_OVRCURIC; + break; + case USB_PORT_FEAT_C_RESET: + dev_dbg(imx21->dev, " C_RESET\n"); + status_write = USBH_PORTSTAT_PRTRSTSC; + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + + break; + + case GetHubDescriptor: + dev_dbg(imx21->dev, "GetHubDescriptor\n"); + rc = get_hub_descriptor(hcd, (void *)buf); + break; + + case GetHubStatus: + dev_dbg(imx21->dev, " GetHubStatus\n"); + *(__le32 *) buf = 0; + break; + + case GetPortStatus: + dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n", + wIndex, USBH_PORTSTAT(wIndex - 1)); + *(__le32 *) buf = readl(imx21->regs + + USBH_PORTSTAT(wIndex - 1)); + break; + + case SetHubFeature: + dev_dbg(imx21->dev, "SetHubFeature\n"); + switch (wValue) { + case C_HUB_OVER_CURRENT: + dev_dbg(imx21->dev, " OVER_CURRENT\n"); + break; + + case C_HUB_LOCAL_POWER: + dev_dbg(imx21->dev, " LOCAL_POWER\n"); + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + + break; + + case SetPortFeature: + dev_dbg(imx21->dev, "SetPortFeature\n"); + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + dev_dbg(imx21->dev, " SUSPEND\n"); + status_write = USBH_PORTSTAT_PRTSUSPST; + break; + case USB_PORT_FEAT_POWER: + dev_dbg(imx21->dev, " POWER\n"); + status_write = USBH_PORTSTAT_PRTPWRST; + break; + case USB_PORT_FEAT_RESET: + dev_dbg(imx21->dev, " RESET\n"); + status_write = USBH_PORTSTAT_PRTRSTST; + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + break; + + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + + if (status_write) + writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1)); + return rc; +} + +/* =========================================== */ +/* Host controller management */ +/* =========================================== */ + +static int imx21_hc_reset(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long timeout; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + /* Reset the Host controler modules */ + writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH | + USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC, + imx21->regs + USBOTG_RST_CTRL); + + /* Wait for reset to finish */ + timeout = jiffies + HZ; + while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) { + if (time_after(jiffies, timeout)) { + spin_unlock_irqrestore(&imx21->lock, flags); + dev_err(imx21->dev, "timeout waiting for reset\n"); + return -ETIMEDOUT; + } + spin_unlock_irq(&imx21->lock); + schedule_timeout(1); + spin_lock_irq(&imx21->lock); + } + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; +} + +static int __devinit imx21_hc_start(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + int i, j; + u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST; + u32 usb_control = 0; + + hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) & + USBOTG_HWMODE_HOSTXCVR_MASK); + hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) & + USBOTG_HWMODE_OTGXCVR_MASK); + + if (imx21->pdata->host1_txenoe) + usb_control |= USBCTRL_HOST1_TXEN_OE; + + if (!imx21->pdata->host1_xcverless) + usb_control |= USBCTRL_HOST1_BYP_TLL; + + if (imx21->pdata->otg_ext_xcvr) + usb_control |= USBCTRL_OTC_RCV_RXDP; + + + spin_lock_irqsave(&imx21->lock, flags); + + writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN), + imx21->regs + USBOTG_CLK_CTRL); + writel(hw_mode, imx21->regs + USBOTG_HWMODE); + writel(usb_control, imx21->regs + USBCTRL); + writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE, + imx21->regs + USB_MISCCONTROL); + + /* Clear the ETDs */ + for (i = 0; i < USB_NUM_ETD; i++) + for (j = 0; j < 4; j++) + etd_writel(imx21, i, j, 0); + + /* Take the HC out of reset */ + writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1, + imx21->regs + USBH_HOST_CTRL); + + /* Enable ports */ + if (imx21->pdata->enable_otg_host) + writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, + imx21->regs + USBH_PORTSTAT(0)); + + if (imx21->pdata->enable_host1) + writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, + imx21->regs + USBH_PORTSTAT(1)); + + if (imx21->pdata->enable_host2) + writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, + imx21->regs + USBH_PORTSTAT(2)); + + + hcd->state = HC_STATE_RUNNING; + + /* Enable host controller interrupts */ + set_register_bits(imx21, USBH_SYSIEN, + USBH_SYSIEN_HERRINT | + USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT); + set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static void imx21_hc_stop(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + writel(0, imx21->regs + USBH_SYSIEN); + clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); + clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN, + USBOTG_CLK_CTRL); + spin_unlock_irqrestore(&imx21->lock, flags); +} + +/* =========================================== */ +/* Driver glue */ +/* =========================================== */ + +static struct hc_driver imx21_hc_driver = { + .description = hcd_name, + .product_desc = "IMX21 USB Host Controller", + .hcd_priv_size = sizeof(struct imx21), + + .flags = HCD_USB11, + .irq = imx21_irq, + + .reset = imx21_hc_reset, + .start = imx21_hc_start, + .stop = imx21_hc_stop, + + /* I/O requests */ + .urb_enqueue = imx21_hc_urb_enqueue, + .urb_dequeue = imx21_hc_urb_dequeue, + .endpoint_disable = imx21_hc_endpoint_disable, + + /* scheduling support */ + .get_frame_number = imx21_hc_get_frame, + + /* Root hub support */ + .hub_status_data = imx21_hc_hub_status_data, + .hub_control = imx21_hc_hub_control, + +}; + +static struct mx21_usbh_platform_data default_pdata = { + .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF, + .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF, + .enable_host1 = 1, + .enable_host2 = 1, + .enable_otg_host = 1, + +}; + +static int imx21_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + remove_debug_files(imx21); + usb_remove_hcd(hcd); + + if (res != NULL) { + clk_disable(imx21->clk); + clk_put(imx21->clk); + iounmap(imx21->regs); + release_mem_region(res->start, resource_size(res)); + } + + kfree(hcd); + return 0; +} + + +static int imx21_probe(struct platform_device *pdev) +{ + struct usb_hcd *hcd; + struct imx21 *imx21; + struct resource *res; + int ret; + int irq; + + printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -ENXIO; + + hcd = usb_create_hcd(&imx21_hc_driver, + &pdev->dev, dev_name(&pdev->dev)); + if (hcd == NULL) { + dev_err(&pdev->dev, "Cannot create hcd (%s)\n", + dev_name(&pdev->dev)); + return -ENOMEM; + } + + imx21 = hcd_to_imx21(hcd); + imx21->dev = &pdev->dev; + imx21->pdata = pdev->dev.platform_data; + if (!imx21->pdata) + imx21->pdata = &default_pdata; + + spin_lock_init(&imx21->lock); + INIT_LIST_HEAD(&imx21->dmem_list); + INIT_LIST_HEAD(&imx21->queue_for_etd); + INIT_LIST_HEAD(&imx21->queue_for_dmem); + create_debug_files(imx21); + + res = request_mem_region(res->start, resource_size(res), hcd_name); + if (!res) { + ret = -EBUSY; + goto failed_request_mem; + } + + imx21->regs = ioremap(res->start, resource_size(res)); + if (imx21->regs == NULL) { + dev_err(imx21->dev, "Cannot map registers\n"); + ret = -ENOMEM; + goto failed_ioremap; + } + + /* Enable clocks source */ + imx21->clk = clk_get(imx21->dev, NULL); + if (IS_ERR(imx21->clk)) { + dev_err(imx21->dev, "no clock found\n"); + ret = PTR_ERR(imx21->clk); + goto failed_clock_get; + } + + ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000)); + if (ret) + goto failed_clock_set; + ret = clk_enable(imx21->clk); + if (ret) + goto failed_clock_enable; + + dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n", + (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF); + + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); + if (ret != 0) { + dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret); + goto failed_add_hcd; + } + + return 0; + +failed_add_hcd: + clk_disable(imx21->clk); +failed_clock_enable: +failed_clock_set: + clk_put(imx21->clk); +failed_clock_get: + iounmap(imx21->regs); +failed_ioremap: + release_mem_region(res->start, res->end - res->start); +failed_request_mem: + remove_debug_files(imx21); + usb_put_hcd(hcd); + return ret; +} + +static struct platform_driver imx21_hcd_driver = { + .driver = { + .name = (char *)hcd_name, + }, + .probe = imx21_probe, + .remove = imx21_remove, + .suspend = NULL, + .resume = NULL, +}; + +static int __init imx21_hcd_init(void) +{ + return platform_driver_register(&imx21_hcd_driver); +} + +static void __exit imx21_hcd_cleanup(void) +{ + platform_driver_unregister(&imx21_hcd_driver); +} + +module_init(imx21_hcd_init); +module_exit(imx21_hcd_cleanup); + +MODULE_DESCRIPTION("i.MX21 USB Host controller"); +MODULE_AUTHOR("Martin Fuzzey"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:imx21-hcd"); diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h new file mode 100644 index 00000000000..1b0d913780a --- /dev/null +++ b/drivers/usb/host/imx21-hcd.h @@ -0,0 +1,436 @@ +/* + * Macros and prototypes for i.MX21 + * + * Copyright (C) 2006 Loping Dog Embedded Systems + * Copyright (C) 2009 Martin Fuzzey + * Originally written by Jay Monkman <jtm@lopingdog.com> + * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_IMX21_HCD_H__ +#define __LINUX_IMX21_HCD_H__ + +#include <mach/mx21-usbhost.h> + +#define NUM_ISO_ETDS 2 +#define USB_NUM_ETD 32 +#define DMEM_SIZE 4096 + +/* Register definitions */ +#define USBOTG_HWMODE 0x00 +#define USBOTG_HWMODE_ANASDBEN (1 << 14) +#define USBOTG_HWMODE_OTGXCVR_SHIFT 6 +#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6) +#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6) +#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6) +#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6) +#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6) +#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4 +#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4) +#define USBOTG_HWMODE_CRECFG_MASK (3 << 0) +#define USBOTG_HWMODE_CRECFG_HOST (1 << 0) +#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0) +#define USBOTG_HWMODE_CRECFG_HNP (3 << 0) + +#define USBOTG_CINT_STAT 0x04 +#define USBOTG_CINT_STEN 0x08 +#define USBOTG_ASHNPINT (1 << 5) +#define USBOTG_ASFCINT (1 << 4) +#define USBOTG_ASHCINT (1 << 3) +#define USBOTG_SHNPINT (1 << 2) +#define USBOTG_FCINT (1 << 1) +#define USBOTG_HCINT (1 << 0) + +#define USBOTG_CLK_CTRL 0x0c +#define USBOTG_CLK_CTRL_FUNC (1 << 2) +#define USBOTG_CLK_CTRL_HST (1 << 1) +#define USBOTG_CLK_CTRL_MAIN (1 << 0) + +#define USBOTG_RST_CTRL 0x10 +#define USBOTG_RST_RSTI2C (1 << 15) +#define USBOTG_RST_RSTCTRL (1 << 5) +#define USBOTG_RST_RSTFC (1 << 4) +#define USBOTG_RST_RSTFSKE (1 << 3) +#define USBOTG_RST_RSTRH (1 << 2) +#define USBOTG_RST_RSTHSIE (1 << 1) +#define USBOTG_RST_RSTHC (1 << 0) + +#define USBOTG_FRM_INTVL 0x14 +#define USBOTG_FRM_REMAIN 0x18 +#define USBOTG_HNP_CSR 0x1c +#define USBOTG_HNP_ISR 0x2c +#define USBOTG_HNP_IEN 0x30 + +#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x)) +#define USBOTG_I2C_XCVR_DEVAD 0x118 +#define USBOTG_I2C_SEQ_OP_REG 0x119 +#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a +#define USBOTG_I2C_OP_CTRL_REG 0x11b +#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e +#define USBOTG_I2C_MASTER_INT_REG 0x11f + +#define USBH_HOST_CTRL 0x80 +#define USBH_HOST_CTRL_HCRESET (1 << 31) +#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16) +#define USBH_HOST_CTRL_RMTWUEN (1 << 4) +#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2) +#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2) +#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2) +#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2) +#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0) +#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0) +#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0) +#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0) + +#define USBH_SYSISR 0x88 +#define USBH_SYSISR_PSCINT (1 << 6) +#define USBH_SYSISR_FMOFINT (1 << 5) +#define USBH_SYSISR_HERRINT (1 << 4) +#define USBH_SYSISR_RESDETINT (1 << 3) +#define USBH_SYSISR_SOFINT (1 << 2) +#define USBH_SYSISR_DONEINT (1 << 1) +#define USBH_SYSISR_SORINT (1 << 0) + +#define USBH_SYSIEN 0x8c +#define USBH_SYSIEN_PSCINT (1 << 6) +#define USBH_SYSIEN_FMOFINT (1 << 5) +#define USBH_SYSIEN_HERRINT (1 << 4) +#define USBH_SYSIEN_RESDETINT (1 << 3) +#define USBH_SYSIEN_SOFINT (1 << 2) +#define USBH_SYSIEN_DONEINT (1 << 1) +#define USBH_SYSIEN_SORINT (1 << 0) + +#define USBH_XBUFSTAT 0x98 +#define USBH_YBUFSTAT 0x9c +#define USBH_XYINTEN 0xa0 +#define USBH_XFILLSTAT 0xa8 +#define USBH_YFILLSTAT 0xac +#define USBH_ETDENSET 0xc0 +#define USBH_ETDENCLR 0xc4 +#define USBH_IMMEDINT 0xcc +#define USBH_ETDDONESTAT 0xd0 +#define USBH_ETDDONEEN 0xd4 +#define USBH_FRMNUB 0xe0 +#define USBH_LSTHRESH 0xe4 + +#define USBH_ROOTHUBA 0xe8 +#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff) +#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24) +#define USBH_ROOTHUBA_NOOVRCURP (1 << 12) +#define USBH_ROOTHUBA_OVRCURPM (1 << 11) +#define USBH_ROOTHUBA_DEVTYPE (1 << 10) +#define USBH_ROOTHUBA_PWRSWTMD (1 << 9) +#define USBH_ROOTHUBA_NOPWRSWT (1 << 8) +#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff) + +#define USBH_ROOTHUBB 0xec +#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16)) +#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x)) + +#define USBH_ROOTSTAT 0xf0 +#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31) +#define USBH_ROOTSTAT_OVRCURCHG (1 << 17) +#define USBH_ROOTSTAT_DEVCONWUE (1 << 15) +#define USBH_ROOTSTAT_OVRCURI (1 << 1) +#define USBH_ROOTSTAT_LOCPWRS (1 << 0) + +#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4)) +#define USBH_PORTSTAT_PRTRSTSC (1 << 20) +#define USBH_PORTSTAT_OVRCURIC (1 << 19) +#define USBH_PORTSTAT_PRTSTATSC (1 << 18) +#define USBH_PORTSTAT_PRTENBLSC (1 << 17) +#define USBH_PORTSTAT_CONNECTSC (1 << 16) +#define USBH_PORTSTAT_LSDEVCON (1 << 9) +#define USBH_PORTSTAT_PRTPWRST (1 << 8) +#define USBH_PORTSTAT_PRTRSTST (1 << 4) +#define USBH_PORTSTAT_PRTOVRCURI (1 << 3) +#define USBH_PORTSTAT_PRTSUSPST (1 << 2) +#define USBH_PORTSTAT_PRTENABST (1 << 1) +#define USBH_PORTSTAT_CURCONST (1 << 0) + +#define USB_DMAREV 0x800 +#define USB_DMAINTSTAT 0x804 +#define USB_DMAINTSTAT_EPERR (1 << 1) +#define USB_DMAINTSTAT_ETDERR (1 << 0) + +#define USB_DMAINTEN 0x808 +#define USB_DMAINTEN_EPERRINTEN (1 << 1) +#define USB_DMAINTEN_ETDERRINTEN (1 << 0) + +#define USB_ETDDMAERSTAT 0x80c +#define USB_EPDMAERSTAT 0x810 +#define USB_ETDDMAEN 0x820 +#define USB_EPDMAEN 0x824 +#define USB_ETDDMAXTEN 0x828 +#define USB_EPDMAXTEN 0x82c +#define USB_ETDDMAENXYT 0x830 +#define USB_EPDMAENXYT 0x834 +#define USB_ETDDMABST4EN 0x838 +#define USB_EPDMABST4EN 0x83c + +#define USB_MISCCONTROL 0x840 +#define USB_MISCCONTROL_ISOPREVFRM (1 << 3) +#define USB_MISCCONTROL_SKPRTRY (1 << 2) +#define USB_MISCCONTROL_ARBMODE (1 << 1) +#define USB_MISCCONTROL_FILTCC (1 << 0) + +#define USB_ETDDMACHANLCLR 0x848 +#define USB_EPDMACHANLCLR 0x84c +#define USB_ETDSMSA(x) (0x900 + ((x) * 4)) +#define USB_EPSMSA(x) (0x980 + ((x) * 4)) +#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4)) +#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4)) + +#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4)) +#define DW0_ADDRESS 0 +#define DW0_ENDPNT 7 +#define DW0_DIRECT 11 +#define DW0_SPEED 13 +#define DW0_FORMAT 14 +#define DW0_MAXPKTSIZ 16 +#define DW0_HALTED 27 +#define DW0_TOGCRY 28 +#define DW0_SNDNAK 30 + +#define DW1_XBUFSRTAD 0 +#define DW1_YBUFSRTAD 16 + +#define DW2_RTRYDELAY 0 +#define DW2_POLINTERV 0 +#define DW2_STARTFRM 0 +#define DW2_RELPOLPOS 8 +#define DW2_DIRPID 16 +#define DW2_BUFROUND 18 +#define DW2_DELAYINT 19 +#define DW2_DATATOG 22 +#define DW2_ERRORCNT 24 +#define DW2_COMPCODE 28 + +#define DW3_TOTBYECNT 0 +#define DW3_PKTLEN0 0 +#define DW3_COMPCODE0 12 +#define DW3_PKTLEN1 16 +#define DW3_BUFSIZE 21 +#define DW3_COMPCODE1 28 + +#define USBCTRL 0x600 +#define USBCTRL_I2C_WU_INT_STAT (1 << 27) +#define USBCTRL_OTG_WU_INT_STAT (1 << 26) +#define USBCTRL_HOST_WU_INT_STAT (1 << 25) +#define USBCTRL_FNT_WU_INT_STAT (1 << 24) +#define USBCTRL_I2C_WU_INT_EN (1 << 19) +#define USBCTRL_OTG_WU_INT_EN (1 << 18) +#define USBCTRL_HOST_WU_INT_EN (1 << 17) +#define USBCTRL_FNT_WU_INT_EN (1 << 16) +#define USBCTRL_OTC_RCV_RXDP (1 << 13) +#define USBCTRL_HOST1_BYP_TLL (1 << 12) +#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10) +#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8) +#define USBCTRL_OTG_PWR_MASK (1 << 6) +#define USBCTRL_HOST1_PWR_MASK (1 << 5) +#define USBCTRL_HOST2_PWR_MASK (1 << 4) +#define USBCTRL_USB_BYP (1 << 2) +#define USBCTRL_HOST1_TXEN_OE (1 << 1) + + +/* Values in TD blocks */ +#define TD_DIR_SETUP 0 +#define TD_DIR_OUT 1 +#define TD_DIR_IN 2 +#define TD_FORMAT_CONTROL 0 +#define TD_FORMAT_ISO 1 +#define TD_FORMAT_BULK 2 +#define TD_FORMAT_INT 3 +#define TD_TOGGLE_CARRY 0 +#define TD_TOGGLE_DATA0 2 +#define TD_TOGGLE_DATA1 3 + +/* control transfer states */ +#define US_CTRL_SETUP 2 +#define US_CTRL_DATA 1 +#define US_CTRL_ACK 0 + +/* bulk transfer main state and 0-length packet */ +#define US_BULK 1 +#define US_BULK0 0 + +/*ETD format description*/ +#define IMX_FMT_CTRL 0x0 +#define IMX_FMT_ISO 0x1 +#define IMX_FMT_BULK 0x2 +#define IMX_FMT_INT 0x3 + +static char fmt_urb_to_etd[4] = { +/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO, +/*PIPE_INTERRUPT*/ IMX_FMT_INT, +/*PIPE_CONTROL*/ IMX_FMT_CTRL, +/*PIPE_BULK*/ IMX_FMT_BULK +}; + +/* condition (error) CC codes and mapping (OHCI like) */ + +#define TD_CC_NOERROR 0x00 +#define TD_CC_CRC 0x01 +#define TD_CC_BITSTUFFING 0x02 +#define TD_CC_DATATOGGLEM 0x03 +#define TD_CC_STALL 0x04 +#define TD_DEVNOTRESP 0x05 +#define TD_PIDCHECKFAIL 0x06 +/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/ +#define TD_DATAOVERRUN 0x08 +#define TD_DATAUNDERRUN 0x09 +#define TD_BUFFEROVERRUN 0x0C +#define TD_BUFFERUNDERRUN 0x0D +#define TD_SCHEDULEOVERRUN 0x0E +#define TD_NOTACCESSED 0x0F + +static const int cc_to_error[16] = { + /* No Error */ 0, + /* CRC Error */ -EILSEQ, + /* Bit Stuff */ -EPROTO, + /* Data Togg */ -EILSEQ, + /* Stall */ -EPIPE, + /* DevNotResp */ -ETIMEDOUT, + /* PIDCheck */ -EPROTO, + /* UnExpPID */ -EPROTO, + /* DataOver */ -EOVERFLOW, + /* DataUnder */ -EREMOTEIO, + /* (for hw) */ -EIO, + /* (for hw) */ -EIO, + /* BufferOver */ -ECOMM, + /* BuffUnder */ -ENOSR, + /* (for HCD) */ -ENOSPC, + /* (for HCD) */ -EALREADY +}; + +/* HCD data associated with a usb core URB */ +struct urb_priv { + struct urb *urb; + struct usb_host_endpoint *ep; + int active; + int state; + struct td *isoc_td; + int isoc_remaining; + int isoc_status; +}; + +/* HCD data associated with a usb core endpoint */ +struct ep_priv { + struct usb_host_endpoint *ep; + struct list_head td_list; + struct list_head queue; + int etd[NUM_ISO_ETDS]; + int waiting_etd; +}; + +/* isoc packet */ +struct td { + struct list_head list; + struct urb *urb; + struct usb_host_endpoint *ep; + dma_addr_t data; + unsigned long buf_addr; + int len; + int frame; + int isoc_index; +}; + +/* HCD data associated with a hardware ETD */ +struct etd_priv { + struct usb_host_endpoint *ep; + struct urb *urb; + struct td *td; + struct list_head queue; + dma_addr_t dma_handle; + int alloc; + int len; + int dmem_size; + int dmem_offset; + int active_count; +#ifdef DEBUG + int activated_frame; + int disactivated_frame; + int last_int_frame; + int last_req_frame; + u32 submitted_dwords[4]; +#endif +}; + +/* Hardware data memory info */ +struct imx21_dmem_area { + struct usb_host_endpoint *ep; + unsigned int offset; + unsigned int size; + struct list_head list; +}; + +#ifdef DEBUG +struct debug_usage_stats { + unsigned int value; + unsigned int maximum; +}; + +struct debug_stats { + unsigned long submitted; + unsigned long completed_ok; + unsigned long completed_failed; + unsigned long unlinked; + unsigned long queue_etd; + unsigned long queue_dmem; +}; + +struct debug_isoc_trace { + int schedule_frame; + int submit_frame; + int request_len; + int done_frame; + int done_len; + int cc; + struct td *td; +}; +#endif + +/* HCD data structure */ +struct imx21 { + spinlock_t lock; + struct device *dev; + struct mx21_usbh_platform_data *pdata; + struct list_head dmem_list; + struct list_head queue_for_etd; /* eps queued due to etd shortage */ + struct list_head queue_for_dmem; /* etds queued due to dmem shortage */ + struct etd_priv etd[USB_NUM_ETD]; + struct clk *clk; + void __iomem *regs; +#ifdef DEBUG + struct dentry *debug_root; + struct debug_stats nonisoc_stats; + struct debug_stats isoc_stats; + struct debug_usage_stats etd_usage; + struct debug_usage_stats dmem_usage; + struct debug_isoc_trace isoc_trace[20]; + struct debug_isoc_trace isoc_trace_failed[20]; + unsigned long debug_unblocks; + int isoc_trace_index; + int isoc_trace_index_failed; +#endif +}; + +#endif diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index a2b305477af..d9e82123de2 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -62,8 +62,10 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> +#include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/isp116x.h> +#include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <asm/io.h> @@ -71,7 +73,6 @@ #include <asm/system.h> #include <asm/byteorder.h> -#include "../core/hcd.h" #include "isp116x.h" #define DRIVER_VERSION "03 Nov 2005" diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index e35d82808ba..0587ad4ce5c 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -77,10 +77,11 @@ #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/usb/isp1362.h> +#include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/io.h> -#include <linux/bitops.h> +#include <linux/bitmap.h> #include <asm/irq.h> #include <asm/system.h> @@ -95,7 +96,6 @@ module_param(dbg_level, int, 0); #define STUB_DEBUG_FILE #endif -#include "../core/hcd.h" #include "../core/usb.h" #include "isp1362.h" @@ -190,10 +190,8 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep, u16 len) { int ptd_offset = -EINVAL; - int index; int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; - int found = -1; - int last = -1; + int found; BUG_ON(len > epq->buf_size); @@ -205,20 +203,9 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq, epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); BUG_ON(ep->num_ptds != 0); - for (index = 0; index <= epq->buf_count - num_ptds; index++) { - if (test_bit(index, &epq->buf_map)) - continue; - found = index; - for (last = index + 1; last < index + num_ptds; last++) { - if (test_bit(last, &epq->buf_map)) { - found = -1; - break; - } - } - if (found >= 0) - break; - } - if (found < 0) + found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0, + num_ptds, 0); + if (found >= epq->buf_count) return -EOVERFLOW; DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, @@ -230,8 +217,7 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq, epq->buf_avail -= num_ptds; BUG_ON(epq->buf_avail > epq->buf_count); ep->ptd_index = found; - for (index = found; index < last; index++) - __set_bit(index, &epq->buf_map); + bitmap_set(&epq->buf_map, found, num_ptds); DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", __func__, epq->name, ep->ptd_index, ep->ptd_offset, epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); @@ -1271,7 +1257,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd, /* avoid all allocations within spinlocks: request or endpoint */ if (!hep->hcpriv) { - ep = kcalloc(1, sizeof *ep, mem_flags); + ep = kzalloc(sizeof *ep, mem_flags); if (!ep) return -ENOMEM; } @@ -1279,7 +1265,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd, /* don't submit to a dead or disabled port */ if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) & - (1 << USB_PORT_FEAT_ENABLE)) || + USB_PORT_STAT_ENABLE) || !HC_IS_RUNNING(hcd->state)) { kfree(ep); retval = -ENODEV; @@ -2231,19 +2217,16 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd) static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) { if (isp1362_hcd->pde) - remove_proc_entry(proc_filename, 0); + remove_proc_entry(proc_filename, NULL); } #endif /*-------------------------------------------------------------------------*/ -static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) +static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) { int tmp = 20; - unsigned long flags; - - spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); @@ -2254,6 +2237,14 @@ static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) } if (!tmp) pr_err("Software reset timeout\n"); +} + +static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) +{ + unsigned long flags; + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + __isp1362_sw_reset(isp1362_hcd); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); } @@ -2284,10 +2275,10 @@ static int isp1362_mem_config(struct usb_hcd *hcd) dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", istl_size / 2, istl_size, 0, istl_size / 2); - dev_info(hcd->self.controller, " INTL: %4d * (%3u+8): %4d @ $%04x\n", + dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n", ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, intl_size, istl_size); - dev_info(hcd->self.controller, " ATL : %4d * (%3u+8): %4d @ $%04x\n", + dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n", atl_buffers, atl_blksize - PTD_HEADER_SIZE, atl_size, istl_size + intl_size); dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, @@ -2432,7 +2423,7 @@ static void isp1362_hc_stop(struct usb_hcd *hcd) if (isp1362_hcd->board && isp1362_hcd->board->reset) isp1362_hcd->board->reset(hcd->self.controller, 1); else - isp1362_sw_reset(isp1362_hcd); + __isp1362_sw_reset(isp1362_hcd); if (isp1362_hcd->board && isp1362_hcd->board->clock) isp1362_hcd->board->clock(hcd->self.controller, 0); @@ -2677,12 +2668,12 @@ static int __devexit isp1362_remove(struct platform_device *pdev) DBG(0, "%s: Removing HCD\n", __func__); usb_remove_hcd(hcd); - DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, - (u32)isp1362_hcd->data_reg); + DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, + isp1362_hcd->data_reg); iounmap(isp1362_hcd->data_reg); - DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, - (u32)isp1362_hcd->addr_reg); + DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, + isp1362_hcd->addr_reg); iounmap(isp1362_hcd->addr_reg); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -2711,6 +2702,8 @@ static int __init isp1362_probe(struct platform_device *pdev) void __iomem *data_reg; int irq; int retval = 0; + struct resource *irq_res; + unsigned int irq_flags = 0; /* basic sanity checks first. board-specific init logic should * have initialized this the three resources and probably board @@ -2724,30 +2717,18 @@ static int __init isp1362_probe(struct platform_device *pdev) data = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = platform_get_resource(pdev, IORESOURCE_MEM, 1); - irq = platform_get_irq(pdev, 0); - if (!addr || !data || irq < 0) { + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!addr || !data || !irq_res) { retval = -ENODEV; goto err1; } + irq = irq_res->start; -#ifdef CONFIG_USB_HCD_DMA - if (pdev->dev.dma_mask) { - struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - - if (!dma_res) { - retval = -ENODEV; - goto err1; - } - isp1362_hcd->data_dma = dma_res->start; - isp1362_hcd->max_dma_size = resource_len(dma_res); - } -#else if (pdev->dev.dma_mask) { DBG(1, "won't do DMA"); retval = -ENODEV; goto err1; } -#endif if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { retval = -EBUSY; @@ -2795,12 +2776,16 @@ static int __init isp1362_probe(struct platform_device *pdev) } #endif -#ifdef CONFIG_ARM - if (isp1362_hcd->board) - set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING); -#endif + if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) + irq_flags |= IRQF_TRIGGER_RISING; + if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) + irq_flags |= IRQF_TRIGGER_FALLING; + if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) + irq_flags |= IRQF_TRIGGER_HIGH; + if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) + irq_flags |= IRQF_TRIGGER_LOW; - retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED); + retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED); if (retval != 0) goto err6; pr_info("%s, irq %d\n", hcd->product_desc, irq); @@ -2810,16 +2795,16 @@ static int __init isp1362_probe(struct platform_device *pdev) return 0; err6: - DBG(0, "%s: Freeing dev %08x\n", __func__, (u32)isp1362_hcd); + DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd); usb_put_hcd(hcd); err5: - DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, (u32)data_reg); + DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg); iounmap(data_reg); err4: DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start); release_mem_region(data->start, resource_len(data)); err3: - DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, (u32)addr_reg); + DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg); iounmap(addr_reg); err2: DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start); diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h index fe60f62a32f..d995351f9be 100644 --- a/drivers/usb/host/isp1362.h +++ b/drivers/usb/host/isp1362.h @@ -65,7 +65,7 @@ static inline void delayed_insw(unsigned int addr, void *buf, int len) unsigned short *bp = (unsigned short *)buf; while (len--) { DUMMY_DELAY_ACCESS; - *bp++ = inw((void *)addr); + *bp++ = inw(addr); } } @@ -534,8 +534,8 @@ struct isp1362_hcd { /* periodic schedule: isochronous */ struct list_head isoc; - int istl_flip:1; - int irq_active:1; + unsigned int istl_flip:1; + unsigned int irq_active:1; /* Schedules for the current frame */ struct isp1362_ep_queue atl_queue; @@ -580,7 +580,7 @@ static inline const char *ISP1362_INT_NAME(int n) static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr) { - unsigned p = (unsigned)ptr; + unsigned long p = (unsigned long)ptr; if (!(p & 0xf)) isp1362_hcd->stat16++; else if (!(p & 0x7)) @@ -770,7 +770,7 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l if (!len) return; - if ((unsigned)dp & 0x1) { + if ((unsigned long)dp & 0x1) { /* not aligned */ for (; len > 1; len -= 2) { data = *dp++; @@ -962,8 +962,8 @@ static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 isp1362_write_diraddr(isp1362_hcd, offset, len); - DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %08x\n", __func__, - len, offset, (u32)buf); + DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %p\n", + __func__, len, offset, buf); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); @@ -982,8 +982,8 @@ static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 isp1362_write_diraddr(isp1362_hcd, offset, len); - DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %08x\n", __func__, - len, offset, (u32)buf); + DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %p\n", + __func__, len, offset, buf); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 9600a58299d..dbcafa29c77 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -14,12 +14,14 @@ #include <linux/slab.h> #include <linux/list.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/mm.h> #include <asm/unaligned.h> +#include <asm/cacheflush.h> -#include "../core/hcd.h" #include "isp1760-hcd.h" static struct kmem_cache *qtd_cachep; @@ -109,7 +111,7 @@ struct isp1760_qh { u32 ping; }; -#define ehci_port_speed(priv, portsc) (1 << USB_PORT_FEAT_HIGHSPEED) +#define ehci_port_speed(priv, portsc) USB_PORT_STAT_HIGH_SPEED static unsigned int isp1760_readl(__u32 __iomem *regs) { @@ -711,12 +713,11 @@ static int check_error(struct ptd *ptd) u32 dw3; dw3 = le32_to_cpu(ptd->dw3); - if (dw3 & DW3_HALT_BIT) + if (dw3 & DW3_HALT_BIT) { error = -EPIPE; - if (dw3 & DW3_ERROR_BIT) { - printk(KERN_ERR "error bit is set in DW3\n"); - error = -EPIPE; + if (dw3 & DW3_ERROR_BIT) + pr_err("error bit is set in DW3\n"); } if (dw3 & DW3_QTD_ACTIVE) { @@ -904,6 +905,14 @@ __acquires(priv->lock) status = 0; } + if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { + void *ptr; + for (ptr = urb->transfer_buffer; + ptr < urb->transfer_buffer + urb->transfer_buffer_length; + ptr += PAGE_SIZE) + flush_dcache_page(virt_to_page(ptr)); + } + /* complete() can reenter this HCD */ usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb); spin_unlock(&priv->lock); @@ -1039,12 +1048,12 @@ static void do_atl_int(struct usb_hcd *usb_hcd) if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) { u32 buffstatus; - /* XXX + /* * NAKs are handled in HW by the chip. Usually if the * device is not able to send data fast enough. - * This did not trigger for a long time now. + * This happens mostly on slower hardware. */ - printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: " + printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " "%d of %zu done: %08x cur: %08x\n", qtd, urb, qh, PTD_XFERRED_LENGTH(dw3), qtd->length, done_map, @@ -1913,7 +1922,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq, * Even if OWNER is set, so the port is owned by the * companion controller, khubd needs to be able to clear * the port-change status bits (especially - * USB_PORT_FEAT_C_CONNECTION). + * USB_PORT_STAT_C_CONNECTION). */ switch (wValue) { @@ -1977,7 +1986,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq, /* wPortChange bits */ if (temp & PORT_CSC) - status |= 1 << USB_PORT_FEAT_C_CONNECTION; + status |= USB_PORT_STAT_C_CONNECTION << 16; /* whoever resumes must GetPortStatus to complete it!! */ @@ -1997,7 +2006,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq, /* resume completed? */ else if (time_after_eq(jiffies, priv->reset_done)) { - status |= 1 << USB_PORT_FEAT_C_SUSPEND; + status |= USB_PORT_STAT_C_SUSPEND << 16; priv->reset_done = 0; /* stop resume signaling */ @@ -2021,7 +2030,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq, if ((temp & PORT_RESET) && time_after_eq(jiffies, priv->reset_done)) { - status |= 1 << USB_PORT_FEAT_C_RESET; + status |= USB_PORT_STAT_C_RESET << 16; priv->reset_done = 0; /* force reset to complete */ @@ -2052,18 +2061,18 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq, printk(KERN_ERR "Warning: PORT_OWNER is set\n"); if (temp & PORT_CONNECT) { - status |= 1 << USB_PORT_FEAT_CONNECTION; + status |= USB_PORT_STAT_CONNECTION; /* status may be from integrated TT */ status |= ehci_port_speed(priv, temp); } if (temp & PORT_PE) - status |= 1 << USB_PORT_FEAT_ENABLE; + status |= USB_PORT_STAT_ENABLE; if (temp & (PORT_SUSPEND|PORT_RESUME)) - status |= 1 << USB_PORT_FEAT_SUSPEND; + status |= USB_PORT_STAT_SUSPEND; if (temp & PORT_RESET) - status |= 1 << USB_PORT_FEAT_RESET; + status |= USB_PORT_STAT_RESET; if (temp & PORT_POWER) - status |= 1 << USB_PORT_FEAT_POWER; + status |= USB_PORT_STAT_POWER; put_unaligned(cpu_to_le32(status), (__le32 *) buf); break; diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index 1c9f977a5c9..ec85d0c3cc3 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c @@ -13,8 +13,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/usb/isp1760.h> +#include <linux/usb/hcd.h> -#include "../core/hcd.h" #include "isp1760-hcd.h" #ifdef CONFIG_PPC_OF @@ -31,12 +31,12 @@ static int of_isp1760_probe(struct of_device *dev, const struct of_device_id *match) { struct usb_hcd *hcd; - struct device_node *dp = dev->node; + struct device_node *dp = dev->dev.of_node; struct resource *res; struct resource memory; struct of_irq oirq; int virq; - u64 res_len; + resource_size_t res_len; int ret; const unsigned int *prop; unsigned int devflags = 0; @@ -45,13 +45,12 @@ static int of_isp1760_probe(struct of_device *dev, if (ret) return -ENXIO; - res = request_mem_region(memory.start, memory.end - memory.start + 1, - dev_name(&dev->dev)); + res_len = resource_size(&memory); + + res = request_mem_region(memory.start, res_len, dev_name(&dev->dev)); if (!res) return -EBUSY; - res_len = memory.end - memory.start + 1; - if (of_irq_map_one(dp, 0, &oirq)) { ret = -ENODEV; goto release_reg; @@ -92,7 +91,7 @@ static int of_isp1760_probe(struct of_device *dev, return ret; release_reg: - release_mem_region(memory.start, memory.end - memory.start + 1); + release_mem_region(memory.start, res_len); return ret; } @@ -109,7 +108,7 @@ static int of_isp1760_remove(struct of_device *dev) return 0; } -static struct of_device_id of_isp1760_match[] = { +static const struct of_device_id of_isp1760_match[] = { { .compatible = "nxp,usb-isp1760", }, @@ -121,8 +120,11 @@ static struct of_device_id of_isp1760_match[] = { MODULE_DEVICE_TABLE(of, of_isp1760_match); static struct of_platform_driver isp1760_of_driver = { - .name = "nxp-isp1760", - .match_table = of_isp1760_match, + .driver = { + .name = "nxp-isp1760", + .owner = THIS_MODULE, + .of_match_table = of_isp1760_match, + }, .probe = of_isp1760_probe, .remove = of_isp1760_remove, }; diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 7ccffcbe7b6..944291e10f9 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -35,7 +35,7 @@ extern int usb_disabled(void); static void at91_start_clock(void) { - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_enable(hclk); clk_enable(iclk); clk_enable(fclk); @@ -46,7 +46,7 @@ static void at91_stop_clock(void) { clk_disable(fclk); clk_disable(iclk); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_disable(hclk); clocked = 0; } @@ -142,7 +142,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, iclk = clk_get(&pdev->dev, "ohci_clk"); fclk = clk_get(&pdev->dev, "uhpck"); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) hclk = clk_get(&pdev->dev, "hck0"); at91_start_hc(pdev); @@ -155,7 +155,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, /* Error handling */ at91_stop_hc(pdev); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_put(hclk); clk_put(fclk); clk_put(iclk); @@ -192,7 +192,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd, release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_put(hclk); clk_put(fclk); clk_put(iclk); @@ -331,6 +331,8 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg) */ if (at91_suspend_entering_slow_clock()) { ohci_usb_reset (ohci); + /* flush the writes */ + (void) ohci_readl (ohci, &ohci->regs->control); at91_stop_clock(); } diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index e4380082ebb..17a6043c1fa 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c @@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ohci_pmops = { +static const struct dev_pm_ops au1xxx_ohci_pmops = { .suspend = ohci_hcd_au1xxx_drv_suspend, .resume = ohci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c new file mode 100644 index 00000000000..d22fb4d577b --- /dev/null +++ b/drivers/usb/host/ohci-da8xx.c @@ -0,0 +1,456 @@ +/* + * OHCI HCD (Host Controller Driver) for USB. + * + * TI DA8xx (OMAP-L1x) Bus Glue + * + * Derived from: ohci-omap.c and ohci-s3c2410.c + * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#include <mach/da8xx.h> +#include <mach/usb.h> + +#ifndef CONFIG_ARCH_DAVINCI_DA8XX +#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." +#endif + +#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG) + +static struct clk *usb11_clk; +static struct clk *usb20_clk; + +/* Over-current indicator change bitmask */ +static volatile u16 ocic_mask; + +static void ohci_da8xx_clock(int on) +{ + u32 cfgchip2; + + cfgchip2 = __raw_readl(CFGCHIP2); + if (on) { + clk_enable(usb11_clk); + + /* + * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we + * need to enable the USB 2.0 module clocking, start its PHY, + * and not allow it to stop the clock during USB 2.0 suspend. + */ + if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) { + clk_enable(usb20_clk); + + cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN); + cfgchip2 |= CFGCHIP2_PHY_PLLON; + __raw_writel(cfgchip2, CFGCHIP2); + + pr_info("Waiting for USB PHY clock good...\n"); + while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD)) + cpu_relax(); + } + + /* Enable USB 1.1 PHY */ + cfgchip2 |= CFGCHIP2_USB1SUSPENDM; + } else { + clk_disable(usb11_clk); + if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) + clk_disable(usb20_clk); + + /* Disable USB 1.1 PHY */ + cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM; + } + __raw_writel(cfgchip2, CFGCHIP2); +} + +/* + * Handle the port over-current indicator change. + */ +static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub, + unsigned port) +{ + ocic_mask |= 1 << port; + + /* Once over-current is detected, the port needs to be powered down */ + if (hub->get_oci(port) > 0) + hub->set_power(port, 0); +} + +static int ohci_da8xx_init(struct usb_hcd *hcd) +{ + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev->platform_data; + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int result; + u32 rh_a; + + dev_dbg(dev, "starting USB controller\n"); + + ohci_da8xx_clock(1); + + /* + * DA8xx only have 1 port connected to the pins but the HC root hub + * register A reports 2 ports, thus we'll have to override it... + */ + ohci->num_ports = 1; + + result = ohci_init(ohci); + if (result < 0) + return result; + + /* + * Since we're providing a board-specific root hub port power control + * and over-current reporting, we have to override the HC root hub A + * register's default value, so that ohci_hub_control() could return + * the correct hub descriptor... + */ + rh_a = ohci_readl(ohci, &ohci->regs->roothub.a); + if (hub->set_power) { + rh_a &= ~RH_A_NPS; + rh_a |= RH_A_PSM; + } + if (hub->get_oci) { + rh_a &= ~RH_A_NOCP; + rh_a |= RH_A_OCPM; + } + rh_a &= ~RH_A_POTPGT; + rh_a |= hub->potpgt << 24; + ohci_writel(ohci, rh_a, &ohci->regs->roothub.a); + + return result; +} + +static void ohci_da8xx_stop(struct usb_hcd *hcd) +{ + ohci_stop(hcd); + ohci_da8xx_clock(0); +} + +static int ohci_da8xx_start(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int result; + + result = ohci_run(ohci); + if (result < 0) + ohci_da8xx_stop(hcd); + + return result; +} + +/* + * Update the status data from the hub with the over-current indicator change. + */ +static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + int length = ohci_hub_status_data(hcd, buf); + + /* See if we have OCIC bit set on port 1 */ + if (ocic_mask & (1 << 1)) { + dev_dbg(hcd->self.controller, "over-current indicator change " + "on port 1\n"); + + if (!length) + length = 1; + + buf[0] |= 1 << 1; + } + return length; +} + +/* + * Look at the control requests to the root hub and see if we need to override. + */ +static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev->platform_data; + int temp; + + switch (typeReq) { + case GetPortStatus: + /* Check the port number */ + if (wIndex != 1) + break; + + dev_dbg(dev, "GetPortStatus(%u)\n", wIndex); + + temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1); + + /* The port power status (PPS) bit defaults to 1 */ + if (hub->get_power && hub->get_power(wIndex) == 0) + temp &= ~RH_PS_PPS; + + /* The port over-current indicator (POCI) bit is always 0 */ + if (hub->get_oci && hub->get_oci(wIndex) > 0) + temp |= RH_PS_POCI; + + /* The over-current indicator change (OCIC) bit is 0 too */ + if (ocic_mask & (1 << wIndex)) + temp |= RH_PS_OCIC; + + put_unaligned(cpu_to_le32(temp), (__le32 *)buf); + return 0; + case SetPortFeature: + temp = 1; + goto check_port; + case ClearPortFeature: + temp = 0; + +check_port: + /* Check the port number */ + if (wIndex != 1) + break; + + switch (wValue) { + case USB_PORT_FEAT_POWER: + dev_dbg(dev, "%sPortFeature(%u): %s\n", + temp ? "Set" : "Clear", wIndex, "POWER"); + + if (!hub->set_power) + return -EPIPE; + + return hub->set_power(wIndex, temp) ? -EPIPE : 0; + case USB_PORT_FEAT_C_OVER_CURRENT: + dev_dbg(dev, "%sPortFeature(%u): %s\n", + temp ? "Set" : "Clear", wIndex, + "C_OVER_CURRENT"); + + if (temp) + ocic_mask |= 1 << wIndex; + else + ocic_mask &= ~(1 << wIndex); + return 0; + } + } + + return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); +} + +static const struct hc_driver ohci_da8xx_hc_driver = { + .description = hcd_name, + .product_desc = "DA8xx OHCI", + .hcd_priv_size = sizeof(struct ohci_hcd), + + /* + * generic hardware linkage + */ + .irq = ohci_irq, + .flags = HCD_USB11 | HCD_MEMORY, + + /* + * basic lifecycle operations + */ + .reset = ohci_da8xx_init, + .start = ohci_da8xx_start, + .stop = ohci_da8xx_stop, + .shutdown = ohci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .endpoint_disable = ohci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ohci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ohci_da8xx_hub_status_data, + .hub_control = ohci_da8xx_hub_control, + +#ifdef CONFIG_PM + .bus_suspend = ohci_bus_suspend, + .bus_resume = ohci_bus_resume, +#endif + .start_port_reset = ohci_start_port_reset, +}; + +/*-------------------------------------------------------------------------*/ + + +/** + * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs + * Context: !in_interrupt() + * + * Allocates basic resources for this USB host controller, and + * then invokes the start() method for the HCD associated with it + * through the hotplug entry's driver_data. + */ +static int usb_hcd_da8xx_probe(const struct hc_driver *driver, + struct platform_device *pdev) +{ + struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data; + struct usb_hcd *hcd; + struct resource *mem; + int error, irq; + + if (hub == NULL) + return -ENODEV; + + usb11_clk = clk_get(&pdev->dev, "usb11"); + if (IS_ERR(usb11_clk)) + return PTR_ERR(usb11_clk); + + usb20_clk = clk_get(&pdev->dev, "usb20"); + if (IS_ERR(usb20_clk)) { + error = PTR_ERR(usb20_clk); + goto err0; + } + + hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); + if (!hcd) { + error = -ENOMEM; + goto err1; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + error = -ENODEV; + goto err2; + } + hcd->rsrc_start = mem->start; + hcd->rsrc_len = mem->end - mem->start + 1; + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + dev_dbg(&pdev->dev, "request_mem_region failed\n"); + error = -EBUSY; + goto err2; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(&pdev->dev, "ioremap failed\n"); + error = -ENOMEM; + goto err3; + } + + ohci_hcd_init(hcd_to_ohci(hcd)); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + error = -ENODEV; + goto err4; + } + error = usb_add_hcd(hcd, irq, IRQF_DISABLED); + if (error) + goto err4; + + if (hub->ocic_notify) { + error = hub->ocic_notify(ohci_da8xx_ocic_handler); + if (!error) + return 0; + } + + usb_remove_hcd(hcd); +err4: + iounmap(hcd->regs); +err3: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +err2: + usb_put_hcd(hcd); +err1: + clk_put(usb20_clk); +err0: + clk_put(usb11_clk); + return error; +} + +/** + * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs + * @dev: USB Host Controller being removed + * Context: !in_interrupt() + * + * Reverses the effect of usb_hcd_da8xx_probe(), first invoking + * the HCD's stop() method. It is always called from a thread + * context, normally "rmmod", "apmd", or something similar. + */ +static inline void +usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev) +{ + struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data; + + hub->ocic_notify(NULL); + usb_remove_hcd(hcd); + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + usb_put_hcd(hcd); + clk_put(usb20_clk); + clk_put(usb11_clk); +} + +static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev) +{ + return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev); +} + +static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + + usb_hcd_da8xx_remove(hcd, dev); + platform_set_drvdata(dev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + + if (time_before(jiffies, ohci->next_statechange)) + msleep(5); + ohci->next_statechange = jiffies; + + ohci_da8xx_clock(0); + hcd->state = HC_STATE_SUSPENDED; + dev->dev.power.power_state = PMSG_SUSPEND; + return 0; +} + +static int ohci_da8xx_resume(struct platform_device *dev) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + + if (time_before(jiffies, ohci->next_statechange)) + msleep(5); + ohci->next_statechange = jiffies; + + ohci_da8xx_clock(1); + dev->dev.power.power_state = PMSG_ON; + usb_hcd_resume_root_hub(hcd); + return 0; +} +#endif + +/* + * Driver definition to register with platform structure. + */ +static struct platform_driver ohci_hcd_da8xx_driver = { + .probe = ohci_hcd_da8xx_drv_probe, + .remove = ohci_hcd_da8xx_drv_remove, + .shutdown = usb_hcd_platform_shutdown, +#ifdef CONFIG_PM + .suspend = ohci_da8xx_suspend, + .resume = ohci_da8xx_resume, +#endif + .driver = { + .owner = THIS_MODULE, + .name = "ohci", + }, +}; diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c index 811f5dfdc58..8ad2441b028 100644 --- a/drivers/usb/host/ohci-dbg.c +++ b/drivers/usb/host/ohci-dbg.c @@ -53,13 +53,13 @@ urb_print(struct urb * urb, char * str, int small, int status) int i, len; if (usb_pipecontrol (pipe)) { - printk (KERN_DEBUG __FILE__ ": setup(8):"); + printk (KERN_DEBUG "%s: setup(8):", __FILE__); for (i = 0; i < 8 ; i++) printk (" %02x", ((__u8 *) urb->setup_packet) [i]); printk ("\n"); } if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) { - printk (KERN_DEBUG __FILE__ ": data(%d/%d):", + printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__, urb->actual_length, urb->transfer_buffer_length); len = usb_pipeout (pipe)? diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 78bb7710f36..02864a237a2 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -32,6 +32,7 @@ #include <linux/list.h> #include <linux/usb.h> #include <linux/usb/otg.h> +#include <linux/usb/hcd.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/workqueue.h> @@ -43,7 +44,6 @@ #include <asm/unaligned.h> #include <asm/byteorder.h> -#include "../core/hcd.h" #define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell" #define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver" @@ -87,6 +87,7 @@ static int ohci_restart (struct ohci_hcd *ohci); #ifdef CONFIG_PCI static void quirk_amd_pll(int state); static void amd_iso_dev_put(void); +static void sb800_prefetch(struct ohci_hcd *ohci, int on); #else static inline void quirk_amd_pll(int state) { @@ -96,6 +97,10 @@ static inline void amd_iso_dev_put(void) { return; } +static inline void sb800_prefetch(struct ohci_hcd *ohci, int on) +{ + return; +} #endif @@ -1001,9 +1006,14 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ohci_hcd_s3c2410_driver #endif -#ifdef CONFIG_ARCH_OMAP +#ifdef CONFIG_USB_OHCI_HCD_OMAP1 #include "ohci-omap.c" -#define PLATFORM_DRIVER ohci_hcd_omap_driver +#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver +#endif + +#ifdef CONFIG_USB_OHCI_HCD_OMAP3 +#include "ohci-omap3.c" +#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver #endif #ifdef CONFIG_ARCH_LH7A404 @@ -1021,7 +1031,7 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ohci_hcd_ep93xx_driver #endif -#ifdef CONFIG_SOC_AU1X00 +#ifdef CONFIG_MIPS_ALCHEMY #include "ohci-au1xxx.c" #define PLATFORM_DRIVER ohci_hcd_au1xxx_driver #endif @@ -1046,6 +1056,11 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER usb_hcd_pnx4008_driver #endif +#ifdef CONFIG_ARCH_DAVINCI_DA8XX +#include "ohci-da8xx.c" +#define PLATFORM_DRIVER ohci_hcd_da8xx_driver +#endif + #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ defined(CONFIG_CPU_SUBTYPE_SH7763) || \ @@ -1080,8 +1095,15 @@ MODULE_LICENSE ("GPL"); #define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver #endif +#ifdef CONFIG_MACH_JZ4740 +#include "ohci-jz4740.c" +#define PLATFORM_DRIVER ohci_hcd_jz4740_driver +#endif + #if !defined(PCI_DRIVER) && \ !defined(PLATFORM_DRIVER) && \ + !defined(OMAP1_PLATFORM_DRIVER) && \ + !defined(OMAP3_PLATFORM_DRIVER) && \ !defined(OF_PLATFORM_DRIVER) && \ !defined(SA1111_DRIVER) && \ !defined(PS3_SYSTEM_BUS_DRIVER) && \ @@ -1123,6 +1145,18 @@ static int __init ohci_hcd_mod_init(void) goto error_platform; #endif +#ifdef OMAP1_PLATFORM_DRIVER + retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER); + if (retval < 0) + goto error_omap1_platform; +#endif + +#ifdef OMAP3_PLATFORM_DRIVER + retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER); + if (retval < 0) + goto error_omap3_platform; +#endif + #ifdef OF_PLATFORM_DRIVER retval = of_register_platform_driver(&OF_PLATFORM_DRIVER); if (retval < 0) @@ -1190,6 +1224,14 @@ static int __init ohci_hcd_mod_init(void) platform_driver_unregister(&PLATFORM_DRIVER); error_platform: #endif +#ifdef OMAP1_PLATFORM_DRIVER + platform_driver_unregister(&OMAP1_PLATFORM_DRIVER); + error_omap1_platform: +#endif +#ifdef OMAP3_PLATFORM_DRIVER + platform_driver_unregister(&OMAP3_PLATFORM_DRIVER); + error_omap3_platform: +#endif #ifdef PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); error_ps3: diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 32bbce9718f..65cac8cc892 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -697,7 +697,7 @@ static int ohci_hub_control ( u16 wLength ) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); - int ports = hcd_to_bus (hcd)->root_hub->maxchild; + int ports = ohci->num_ports; u32 temp; int retval = 0; diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c new file mode 100644 index 00000000000..10e1872f3ab --- /dev/null +++ b/drivers/usb/host/ohci-jz4740.c @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> + +struct jz4740_ohci_hcd { + struct ohci_hcd ohci_hcd; + + struct regulator *vbus; + bool vbus_enabled; + struct clk *clk; +}; + +static inline struct jz4740_ohci_hcd *hcd_to_jz4740_hcd(struct usb_hcd *hcd) +{ + return (struct jz4740_ohci_hcd *)(hcd->hcd_priv); +} + +static inline struct usb_hcd *jz4740_hcd_to_hcd(struct jz4740_ohci_hcd *jz4740_ohci) +{ + return container_of((void *)jz4740_ohci, struct usb_hcd, hcd_priv); +} + +static int ohci_jz4740_start(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int ret; + + ret = ohci_init(ohci); + if (ret < 0) + return ret; + + ohci->num_ports = 1; + + ret = ohci_run(ohci); + if (ret < 0) { + dev_err(hcd->self.controller, "Can not start %s", + hcd->self.bus_name); + ohci_stop(hcd); + return ret; + } + return 0; +} + +static int ohci_jz4740_set_vbus_power(struct jz4740_ohci_hcd *jz4740_ohci, + bool enabled) +{ + int ret = 0; + + if (!jz4740_ohci->vbus) + return 0; + + if (enabled && !jz4740_ohci->vbus_enabled) { + ret = regulator_enable(jz4740_ohci->vbus); + if (ret) + dev_err(jz4740_hcd_to_hcd(jz4740_ohci)->self.controller, + "Could not power vbus\n"); + } else if (!enabled && jz4740_ohci->vbus_enabled) { + ret = regulator_disable(jz4740_ohci->vbus); + } + + if (ret == 0) + jz4740_ohci->vbus_enabled = enabled; + + return ret; +} + +static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd); + int ret; + + switch (typeReq) { + case SetHubFeature: + if (wValue == USB_PORT_FEAT_POWER) + ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true); + break; + case ClearHubFeature: + if (wValue == USB_PORT_FEAT_POWER) + ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false); + break; + } + + if (ret) + return ret; + + return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); +} + + +static const struct hc_driver ohci_jz4740_hc_driver = { + .description = hcd_name, + .product_desc = "JZ4740 OHCI", + .hcd_priv_size = sizeof(struct jz4740_ohci_hcd), + + /* + * generic hardware linkage + */ + .irq = ohci_irq, + .flags = HCD_USB11 | HCD_MEMORY, + + /* + * basic lifecycle operations + */ + .start = ohci_jz4740_start, + .stop = ohci_stop, + .shutdown = ohci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .endpoint_disable = ohci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ohci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ohci_hub_status_data, + .hub_control = ohci_jz4740_hub_control, +#ifdef CONFIG_PM + .bus_suspend = ohci_bus_suspend, + .bus_resume = ohci_bus_resume, +#endif + .start_port_reset = ohci_start_port_reset, +}; + + +static __devinit int jz4740_ohci_probe(struct platform_device *pdev) +{ + int ret; + struct usb_hcd *hcd; + struct jz4740_ohci_hcd *jz4740_ohci; + struct resource *res; + int irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENOENT; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get platform irq\n"); + return irq; + } + + hcd = usb_create_hcd(&ohci_jz4740_hc_driver, &pdev->dev, "jz4740"); + if (!hcd) { + dev_err(&pdev->dev, "Failed to create hcd.\n"); + return -ENOMEM; + } + + jz4740_ohci = hcd_to_jz4740_hcd(hcd); + + res = request_mem_region(res->start, resource_size(res), hcd_name); + if (!res) { + dev_err(&pdev->dev, "Failed to request mem region.\n"); + ret = -EBUSY; + goto err_free; + } + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + hcd->regs = ioremap(res->start, resource_size(res)); + + if (!hcd->regs) { + dev_err(&pdev->dev, "Failed to ioremap registers.\n"); + ret = -EBUSY; + goto err_release_mem; + } + + jz4740_ohci->clk = clk_get(&pdev->dev, "uhc"); + if (IS_ERR(jz4740_ohci->clk)) { + ret = PTR_ERR(jz4740_ohci->clk); + dev_err(&pdev->dev, "Failed to get clock: %d\n", ret); + goto err_iounmap; + } + + jz4740_ohci->vbus = regulator_get(&pdev->dev, "vbus"); + if (IS_ERR(jz4740_ohci->vbus)) + jz4740_ohci->vbus = NULL; + + + clk_set_rate(jz4740_ohci->clk, 48000000); + clk_enable(jz4740_ohci->clk); + if (jz4740_ohci->vbus) + ohci_jz4740_set_vbus_power(jz4740_ohci, true); + + platform_set_drvdata(pdev, hcd); + + ohci_hcd_init(hcd_to_ohci(hcd)); + + ret = usb_add_hcd(hcd, irq, 0); + if (ret) { + dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret); + goto err_disable; + } + + return 0; + +err_disable: + platform_set_drvdata(pdev, NULL); + if (jz4740_ohci->vbus) { + regulator_disable(jz4740_ohci->vbus); + regulator_put(jz4740_ohci->vbus); + } + clk_disable(jz4740_ohci->clk); + + clk_put(jz4740_ohci->clk); +err_iounmap: + iounmap(hcd->regs); +err_release_mem: + release_mem_region(res->start, resource_size(res)); +err_free: + usb_put_hcd(hcd); + + return ret; +} + +static __devexit int jz4740_ohci_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd); + + usb_remove_hcd(hcd); + + platform_set_drvdata(pdev, NULL); + + if (jz4740_ohci->vbus) { + regulator_disable(jz4740_ohci->vbus); + regulator_put(jz4740_ohci->vbus); + } + + clk_disable(jz4740_ohci->clk); + clk_put(jz4740_ohci->clk); + + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + + usb_put_hcd(hcd); + + return 0; +} + +static struct platform_driver ohci_hcd_jz4740_driver = { + .probe = jz4740_ohci_probe, + .remove = __devexit_p(jz4740_ohci_remove), + .driver = { + .name = "jz4740-ohci", + .owner = THIS_MODULE, + }, +}; + +MODULE_ALIAS("platfrom:jz4740-ohci"); diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c index de42283149c..18d39f0463e 100644 --- a/drivers/usb/host/ohci-lh7a404.c +++ b/drivers/usb/host/ohci-lh7a404.c @@ -28,8 +28,8 @@ extern int usb_disabled(void); static void lh7a404_start_hc(struct platform_device *dev) { - printk(KERN_DEBUG __FILE__ - ": starting LH7A404 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n", + __FILE__); /* * Now, carefully enable the USB clock, and take @@ -39,14 +39,13 @@ static void lh7a404_start_hc(struct platform_device *dev) udelay(1000); USBH_CMDSTATUS = OHCI_HCR; - printk(KERN_DEBUG __FILE__ - ": Clock to USB host has been enabled \n"); + printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__); } static void lh7a404_stop_hc(struct platform_device *dev) { - printk(KERN_DEBUG __FILE__ - ": stopping LH7A404 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n", + __FILE__); CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */ } diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 83cbecd2a1e..5645f70b921 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -24,10 +24,10 @@ #include <asm/io.h> #include <asm/mach-types.h> -#include <mach/mux.h> +#include <plat/mux.h> #include <mach/irqs.h> -#include <mach/fpga.h> -#include <mach/usb.h> +#include <plat/fpga.h> +#include <plat/usb.h> /* OMAP-1510 OHCI has its own MMU for DMA */ diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c new file mode 100644 index 00000000000..2cc8a504b18 --- /dev/null +++ b/drivers/usb/host/ohci-omap3.c @@ -0,0 +1,735 @@ +/* + * ohci-omap3.c - driver for OHCI on OMAP3 and later processors + * + * Bus Glue for OMAP3 USBHOST 3 port OHCI controller + * This controller is also used in later OMAPs and AM35x chips + * + * Copyright (C) 2007-2010 Texas Instruments, Inc. + * Author: Vikram Pandita <vikram.pandita@ti.com> + * Author: Anand Gadiyar <gadiyar@ti.com> + * + * Based on ehci-omap.c and some other ohci glue layers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * TODO (last updated Mar 10th, 2010): + * - add kernel-doc + * - Factor out code common to EHCI to a separate file + * - Make EHCI and OHCI coexist together + * - needs newer silicon versions to actually work + * - the last one to be loaded currently steps on the other's toes + * - Add hooks for configuring transceivers, etc. at init/exit + * - Add aggressive clock-management code + */ + +#include <linux/platform_device.h> +#include <linux/clk.h> + +#include <plat/usb.h> + +/* + * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES + * Use ohci_omap_readl()/ohci_omap_writel() functions + */ + +/* TLL Register Set */ +#define OMAP_USBTLL_REVISION (0x00) +#define OMAP_USBTLL_SYSCONFIG (0x10) +#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8) +#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3) +#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2) +#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1) +#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0) + +#define OMAP_USBTLL_SYSSTATUS (0x14) +#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0) + +#define OMAP_USBTLL_IRQSTATUS (0x18) +#define OMAP_USBTLL_IRQENABLE (0x1C) + +#define OMAP_TLL_SHARED_CONF (0x30) +#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6) +#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5) +#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2) +#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1) +#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0) + +#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num) +#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24 +#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11) +#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10) +#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9) +#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8) +#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1) +#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0) + +#define OMAP_TLL_CHANNEL_COUNT 3 + +/* UHH Register Set */ +#define OMAP_UHH_REVISION (0x00) +#define OMAP_UHH_SYSCONFIG (0x10) +#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12) +#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8) +#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3) +#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2) +#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1) +#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0) + +#define OMAP_UHH_SYSSTATUS (0x14) +#define OMAP_UHH_SYSSTATUS_UHHRESETDONE (1 << 0) +#define OMAP_UHH_SYSSTATUS_OHCIRESETDONE (1 << 1) +#define OMAP_UHH_SYSSTATUS_EHCIRESETDONE (1 << 2) +#define OMAP_UHH_HOSTCONFIG (0x40) +#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0) +#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0) +#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11) +#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12) +#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2) +#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3) +#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4) +#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5) +#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8) +#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9) +#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10) + +#define OMAP_UHH_DEBUG_CSR (0x44) + +/*-------------------------------------------------------------------------*/ + +static inline void ohci_omap_writel(void __iomem *base, u32 reg, u32 val) +{ + __raw_writel(val, base + reg); +} + +static inline u32 ohci_omap_readl(void __iomem *base, u32 reg) +{ + return __raw_readl(base + reg); +} + +static inline void ohci_omap_writeb(void __iomem *base, u8 reg, u8 val) +{ + __raw_writeb(val, base + reg); +} + +static inline u8 ohci_omap_readb(void __iomem *base, u8 reg) +{ + return __raw_readb(base + reg); +} + +/*-------------------------------------------------------------------------*/ + +struct ohci_hcd_omap3 { + struct ohci_hcd *ohci; + struct device *dev; + + struct clk *usbhost_ick; + struct clk *usbhost2_120m_fck; + struct clk *usbhost1_48m_fck; + struct clk *usbtll_fck; + struct clk *usbtll_ick; + + /* port_mode: TLL/PHY, 2/3/4/6-PIN, DP-DM/DAT-SE0 */ + enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS]; + void __iomem *uhh_base; + void __iomem *tll_base; + void __iomem *ohci_base; + + unsigned es2_compatibility:1; +}; + +/*-------------------------------------------------------------------------*/ + +static void ohci_omap3_clock_power(struct ohci_hcd_omap3 *omap, int on) +{ + if (on) { + clk_enable(omap->usbtll_ick); + clk_enable(omap->usbtll_fck); + clk_enable(omap->usbhost_ick); + clk_enable(omap->usbhost1_48m_fck); + clk_enable(omap->usbhost2_120m_fck); + } else { + clk_disable(omap->usbhost2_120m_fck); + clk_disable(omap->usbhost1_48m_fck); + clk_disable(omap->usbhost_ick); + clk_disable(omap->usbtll_fck); + clk_disable(omap->usbtll_ick); + } +} + +static int ohci_omap3_init(struct usb_hcd *hcd) +{ + dev_dbg(hcd->self.controller, "starting OHCI controller\n"); + + return ohci_init(hcd_to_ohci(hcd)); +} + + +/*-------------------------------------------------------------------------*/ + +static int ohci_omap3_start(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int ret; + + /* + * RemoteWakeupConnected has to be set explicitly before + * calling ohci_run. The reset value of RWC is 0. + */ + ohci->hc_control = OHCI_CTRL_RWC; + writel(OHCI_CTRL_RWC, &ohci->regs->control); + + ret = ohci_run(ohci); + + if (ret < 0) { + dev_err(hcd->self.controller, "can't start\n"); + ohci_stop(hcd); + } + + return ret; +} + +/*-------------------------------------------------------------------------*/ + +/* + * convert the port-mode enum to a value we can use in the FSLSMODE + * field of USBTLL_CHANNEL_CONF + */ +static unsigned ohci_omap3_fslsmode(enum ohci_omap3_port_mode mode) +{ + switch (mode) { + case OMAP_OHCI_PORT_MODE_UNUSED: + case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: + return 0x0; + + case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: + return 0x1; + + case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: + return 0x2; + + case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: + return 0x3; + + case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: + return 0x4; + + case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: + return 0x5; + + case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: + return 0x6; + + case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: + return 0x7; + + case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: + return 0xA; + + case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: + return 0xB; + default: + pr_warning("Invalid port mode, using default\n"); + return 0x0; + } +} + +static void ohci_omap3_tll_config(struct ohci_hcd_omap3 *omap) +{ + u32 reg; + int i; + + /* Program TLL SHARED CONF */ + reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF); + reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN; + reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN; + reg |= OMAP_TLL_SHARED_CONF_USB_DIVRATION; + reg |= OMAP_TLL_SHARED_CONF_FCLK_IS_ON; + ohci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg); + + /* Program each TLL channel */ + /* + * REVISIT: Only the 3-pin and 4-pin PHY modes have + * actually been tested. + */ + for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) { + + /* Enable only those channels that are actually used */ + if (omap->port_mode[i] == OMAP_OHCI_PORT_MODE_UNUSED) + continue; + + reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); + reg |= ohci_omap3_fslsmode(omap->port_mode[i]) + << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT; + reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS; + reg |= OMAP_TLL_CHANNEL_CONF_CHANEN; + ohci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg); + } +} + +/* omap3_start_ohci + * - Start the TI USBHOST controller + */ +static int omap3_start_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + u32 reg = 0; + int ret = 0; + + dev_dbg(omap->dev, "starting TI OHCI USB Controller\n"); + + /* Get all the clock handles we need */ + omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick"); + if (IS_ERR(omap->usbhost_ick)) { + dev_err(omap->dev, "could not get usbhost_ick\n"); + ret = PTR_ERR(omap->usbhost_ick); + goto err_host_ick; + } + + omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck"); + if (IS_ERR(omap->usbhost2_120m_fck)) { + dev_err(omap->dev, "could not get usbhost_120m_fck\n"); + ret = PTR_ERR(omap->usbhost2_120m_fck); + goto err_host_120m_fck; + } + + omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck"); + if (IS_ERR(omap->usbhost1_48m_fck)) { + dev_err(omap->dev, "could not get usbhost_48m_fck\n"); + ret = PTR_ERR(omap->usbhost1_48m_fck); + goto err_host_48m_fck; + } + + omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck"); + if (IS_ERR(omap->usbtll_fck)) { + dev_err(omap->dev, "could not get usbtll_fck\n"); + ret = PTR_ERR(omap->usbtll_fck); + goto err_tll_fck; + } + + omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick"); + if (IS_ERR(omap->usbtll_ick)) { + dev_err(omap->dev, "could not get usbtll_ick\n"); + ret = PTR_ERR(omap->usbtll_ick); + goto err_tll_ick; + } + + /* Now enable all the clocks in the correct order */ + ohci_omap3_clock_power(omap, 1); + + /* perform TLL soft reset, and wait until reset is complete */ + ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, + OMAP_USBTLL_SYSCONFIG_SOFTRESET); + + /* Wait for TLL reset to complete */ + while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) + & OMAP_USBTLL_SYSSTATUS_RESETDONE)) { + cpu_relax(); + + if (time_after(jiffies, timeout)) { + dev_dbg(omap->dev, "operation timed out\n"); + ret = -EINVAL; + goto err_sys_status; + } + } + + dev_dbg(omap->dev, "TLL reset done\n"); + + /* (1<<3) = no idle mode only for initial debugging */ + ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, + OMAP_USBTLL_SYSCONFIG_ENAWAKEUP | + OMAP_USBTLL_SYSCONFIG_SIDLEMODE | + OMAP_USBTLL_SYSCONFIG_CACTIVITY); + + + /* Put UHH in NoIdle/NoStandby mode */ + reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG); + reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP + | OMAP_UHH_SYSCONFIG_SIDLEMODE + | OMAP_UHH_SYSCONFIG_CACTIVITY + | OMAP_UHH_SYSCONFIG_MIDLEMODE); + reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE; + reg &= ~OMAP_UHH_SYSCONFIG_SOFTRESET; + + ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg); + + reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG); + + /* setup ULPI bypass and burst configurations */ + reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN + | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN + | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN); + reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN; + + /* + * REVISIT: Pi_CONNECT_STATUS controls MStandby + * assertion and Swakeup generation - let us not + * worry about this for now. OMAP HWMOD framework + * might take care of this later. If not, we can + * update these registers when adding aggressive + * clock management code. + * + * For now, turn off all the Pi_CONNECT_STATUS bits + * + if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED) + reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; + if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED) + reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; + if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED) + reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; + */ + reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; + reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; + reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; + + if (omap->es2_compatibility) { + /* + * All OHCI modes need to go through the TLL, + * unlike in the EHCI case. So use UTMI mode + * for all ports for OHCI, on ES2.x silicon + */ + dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n"); + reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + } else { + dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n"); + if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; + else + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; + + if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; + else + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; + + if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; + else + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; + + } + ohci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg); + dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg); + + ohci_omap3_tll_config(omap); + + return 0; + +err_sys_status: + ohci_omap3_clock_power(omap, 0); + clk_put(omap->usbtll_ick); + +err_tll_ick: + clk_put(omap->usbtll_fck); + +err_tll_fck: + clk_put(omap->usbhost1_48m_fck); + +err_host_48m_fck: + clk_put(omap->usbhost2_120m_fck); + +err_host_120m_fck: + clk_put(omap->usbhost_ick); + +err_host_ick: + return ret; +} + +static void omap3_stop_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n"); + + /* Reset USBHOST for insmod/rmmod to work */ + ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, + OMAP_UHH_SYSCONFIG_SOFTRESET); + while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & OMAP_UHH_SYSSTATUS_UHHRESETDONE)) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & OMAP_UHH_SYSSTATUS_OHCIRESETDONE)) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & OMAP_UHH_SYSSTATUS_EHCIRESETDONE)) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1)); + + while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) + & (1 << 0))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + ohci_omap3_clock_power(omap, 0); + + if (omap->usbtll_fck != NULL) { + clk_put(omap->usbtll_fck); + omap->usbtll_fck = NULL; + } + + if (omap->usbhost_ick != NULL) { + clk_put(omap->usbhost_ick); + omap->usbhost_ick = NULL; + } + + if (omap->usbhost1_48m_fck != NULL) { + clk_put(omap->usbhost1_48m_fck); + omap->usbhost1_48m_fck = NULL; + } + + if (omap->usbhost2_120m_fck != NULL) { + clk_put(omap->usbhost2_120m_fck); + omap->usbhost2_120m_fck = NULL; + } + + if (omap->usbtll_ick != NULL) { + clk_put(omap->usbtll_ick); + omap->usbtll_ick = NULL; + } + + dev_dbg(omap->dev, "Clock to USB host has been disabled\n"); +} + +/*-------------------------------------------------------------------------*/ + +static const struct hc_driver ohci_omap3_hc_driver = { + .description = hcd_name, + .product_desc = "OMAP3 OHCI Host Controller", + .hcd_priv_size = sizeof(struct ohci_hcd), + + /* + * generic hardware linkage + */ + .irq = ohci_irq, + .flags = HCD_USB11 | HCD_MEMORY, + + /* + * basic lifecycle operations + */ + .reset = ohci_omap3_init, + .start = ohci_omap3_start, + .stop = ohci_stop, + .shutdown = ohci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .endpoint_disable = ohci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ohci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ohci_hub_status_data, + .hub_control = ohci_hub_control, +#ifdef CONFIG_PM + .bus_suspend = ohci_bus_suspend, + .bus_resume = ohci_bus_resume, +#endif + .start_port_reset = ohci_start_port_reset, +}; + +/*-------------------------------------------------------------------------*/ + +/* + * configure so an HC device and id are always provided + * always called with process context; sleeping is OK + */ + +/** + * ohci_hcd_omap3_probe - initialize OMAP-based HCDs + * + * Allocates basic resources for this USB host controller, and + * then invokes the start() method for the HCD associated with it + * through the hotplug entry's driver_data. + */ +static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev) +{ + struct ohci_hcd_omap_platform_data *pdata = pdev->dev.platform_data; + struct ohci_hcd_omap3 *omap; + struct resource *res; + struct usb_hcd *hcd; + int ret = -ENODEV; + int irq; + + if (usb_disabled()) + goto err_disabled; + + if (!pdata) { + dev_dbg(&pdev->dev, "missing platform_data\n"); + goto err_pdata; + } + + irq = platform_get_irq(pdev, 0); + + omap = kzalloc(sizeof(*omap), GFP_KERNEL); + if (!omap) { + ret = -ENOMEM; + goto err_disabled; + } + + hcd = usb_create_hcd(&ohci_omap3_hc_driver, &pdev->dev, + dev_name(&pdev->dev)); + if (!hcd) { + ret = -ENOMEM; + goto err_create_hcd; + } + + platform_set_drvdata(pdev, omap); + omap->dev = &pdev->dev; + omap->port_mode[0] = pdata->port_mode[0]; + omap->port_mode[1] = pdata->port_mode[1]; + omap->port_mode[2] = pdata->port_mode[2]; + omap->es2_compatibility = pdata->es2_compatibility; + omap->ohci = hcd_to_ohci(hcd); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(&pdev->dev, "OHCI ioremap failed\n"); + ret = -ENOMEM; + goto err_ioremap; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + omap->uhh_base = ioremap(res->start, resource_size(res)); + if (!omap->uhh_base) { + dev_err(&pdev->dev, "UHH ioremap failed\n"); + ret = -ENOMEM; + goto err_uhh_ioremap; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + omap->tll_base = ioremap(res->start, resource_size(res)); + if (!omap->tll_base) { + dev_err(&pdev->dev, "TLL ioremap failed\n"); + ret = -ENOMEM; + goto err_tll_ioremap; + } + + ret = omap3_start_ohci(omap, hcd); + if (ret) { + dev_dbg(&pdev->dev, "failed to start ehci\n"); + goto err_start; + } + + ohci_hcd_init(omap->ohci); + + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); + if (ret) { + dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); + goto err_add_hcd; + } + + return 0; + +err_add_hcd: + omap3_stop_ohci(omap, hcd); + +err_start: + iounmap(omap->tll_base); + +err_tll_ioremap: + iounmap(omap->uhh_base); + +err_uhh_ioremap: + iounmap(hcd->regs); + +err_ioremap: + usb_put_hcd(hcd); + +err_create_hcd: + kfree(omap); +err_pdata: +err_disabled: + return ret; +} + +/* + * may be called without controller electrically present + * may be called with controller, bus, and devices active + */ + +/** + * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs + * @pdev: USB Host Controller being removed + * + * Reverses the effect of ohci_hcd_omap3_probe(), first invoking + * the HCD's stop() method. It is always called from a thread + * context, normally "rmmod", "apmd", or something similar. + */ +static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev) +{ + struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev); + struct usb_hcd *hcd = ohci_to_hcd(omap->ohci); + + usb_remove_hcd(hcd); + omap3_stop_ohci(omap, hcd); + iounmap(hcd->regs); + iounmap(omap->tll_base); + iounmap(omap->uhh_base); + usb_put_hcd(hcd); + kfree(omap); + + return 0; +} + +static void ohci_hcd_omap3_shutdown(struct platform_device *pdev) +{ + struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev); + struct usb_hcd *hcd = ohci_to_hcd(omap->ohci); + + if (hcd->driver->shutdown) + hcd->driver->shutdown(hcd); +} + +static struct platform_driver ohci_hcd_omap3_driver = { + .probe = ohci_hcd_omap3_probe, + .remove = __devexit_p(ohci_hcd_omap3_remove), + .shutdown = ohci_hcd_omap3_shutdown, + .driver = { + .name = "ohci-omap3", + }, +}; + +MODULE_ALIAS("platform:ohci-omap3"); +MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>"); diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index d2ba04dd785..b8a1148f248 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -177,6 +177,13 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); + + /* SB800 needs pre-fetch fix */ + if ((rev >= 0x40) && (rev <= 0x4f)) { + ohci->flags |= OHCI_QUIRK_AMD_PREFETCH; + ohci_dbg(ohci, "enabled AMD prefetch quirk\n"); + } + if ((rev > 0x3b) || (rev < 0x30)) { pci_dev_put(amd_smbus_dev); amd_smbus_dev = NULL; @@ -262,6 +269,19 @@ static void amd_iso_dev_put(void) } +static void sb800_prefetch(struct ohci_hcd *ohci, int on) +{ + struct pci_dev *pdev; + u16 misc; + + pdev = to_pci_dev(ohci_to_hcd(ohci)->self.controller); + pci_read_config_word(pdev, 0x50, &misc); + if (on == 0) + pci_write_config_word(pdev, 0x50, misc & 0xfcff); + else + pci_write_config_word(pdev, 0x50, misc | 0x0300); +} + /* List of quirks for OHCI */ static const struct pci_device_id ohci_pci_quirks[] = { { diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c index 100bf3d8437..cd74bbdd007 100644 --- a/drivers/usb/host/ohci-pnx4008.c +++ b/drivers/usb/host/ohci-pnx4008.c @@ -98,8 +98,8 @@ #define ISP1301_I2C_INTERRUPT_RISING 0xE #define ISP1301_I2C_REG_CLEAR_ADDR 1 -struct i2c_driver isp1301_driver; -struct i2c_client *isp1301_i2c_client; +static struct i2c_driver isp1301_driver; +static struct i2c_client *isp1301_i2c_client; extern int usb_disabled(void); extern int ocpi_enable(void); @@ -120,12 +120,12 @@ static int isp1301_remove(struct i2c_client *client) return 0; } -const struct i2c_device_id isp1301_id[] = { +static const struct i2c_device_id isp1301_id[] = { { "isp1301_pnx", 0 }, { } }; -struct i2c_driver isp1301_driver = { +static struct i2c_driver isp1301_driver = { .driver = { .name = "isp1301_pnx", }, @@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) } i2c_adap = i2c_get_adapter(2); memset(&i2c_info, 0, sizeof(struct i2c_board_info)); - strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); + strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, normal_i2c); i2c_put_adapter(i2c_adap); @@ -411,7 +411,7 @@ out3: out2: clk_put(usb_clk); out1: - i2c_unregister_client(isp1301_i2c_client); + i2c_unregister_device(isp1301_i2c_client); isp1301_i2c_client = NULL; out_i2c_driver: i2c_del_driver(&isp1301_driver); @@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev) pnx4008_unset_usb_bits(); clk_disable(usb_clk); clk_put(usb_clk); - i2c_unregister_client(isp1301_i2c_client); + i2c_unregister_device(isp1301_i2c_client); isp1301_i2c_client = NULL; i2c_del_driver(&isp1301_driver); diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 68a30171029..df165917412 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -83,7 +83,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = { static int __devinit ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dn = op->node; + struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ohci_hcd *ohci; struct resource res; @@ -114,21 +114,21 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) hcd->rsrc_len = res.end - res.start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); + printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { - printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); + printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { - printk(KERN_ERR __FILE__ ": ioremap failed\n"); + printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } @@ -169,7 +169,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) } else release_mem_region(res.start, 0x4); } else - pr_debug(__FILE__ ": cannot get ehci offset from fdt\n"); + pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__); } iounmap(hcd->regs); @@ -212,7 +212,7 @@ static int ohci_hcd_ppc_of_shutdown(struct of_device *op) } -static struct of_device_id ohci_hcd_ppc_of_match[] = { +static const struct of_device_id ohci_hcd_ppc_of_match[] = { #ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE { .name = "usb", @@ -244,18 +244,13 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match); static struct of_platform_driver ohci_hcd_ppc_of_driver = { - .name = "ppc-of-ohci", - .match_table = ohci_hcd_ppc_of_match, .probe = ohci_hcd_ppc_of_probe, .remove = ohci_hcd_ppc_of_remove, .shutdown = ohci_hcd_ppc_of_shutdown, -#ifdef CONFIG_PM - /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/ - /*.resume = ohci_hcd_ppc_soc_drv_resume,*/ -#endif - .driver = { - .name = "ppc-of-ohci", - .owner = THIS_MODULE, + .driver = { + .name = "ppc-of-ohci", + .owner = THIS_MODULE, + .of_match_table = ohci_hcd_ppc_of_match, }, }; diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c index cd3398b675b..89e670e38c1 100644 --- a/drivers/usb/host/ohci-ppc-soc.c +++ b/drivers/usb/host/ohci-ppc-soc.c @@ -41,14 +41,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver, res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { - pr_debug(__FILE__ ": no irq\n"); + pr_debug("%s: no irq\n", __FILE__); return -ENODEV; } irq = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - pr_debug(__FILE__ ": no reg addr\n"); + pr_debug("%s: no reg addr\n", __FILE__); return -ENODEV; } @@ -59,14 +59,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver, hcd->rsrc_len = res->end - res->start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - pr_debug(__FILE__ ": request_mem_region failed\n"); + pr_debug("%s: request_mem_region failed\n", __FILE__); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { - pr_debug(__FILE__ ": ioremap failed\n"); + pr_debug("%s: ioremap failed\n", __FILE__); retval = -ENOMEM; goto err2; } diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index f1c06202fdf..41816389477 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -203,7 +203,7 @@ static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci) __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR); } -#ifdef CONFIG_CPU_PXA27x +#ifdef CONFIG_PXA27x extern void pxa27x_clear_otgph(void); #else #define pxa27x_clear_otgph() do {} while (0) @@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { +static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { .suspend = ohci_hcd_pxa27x_drv_suspend, .resume = ohci_hcd_pxa27x_drv_resume, }; diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index 16fecb8ecc3..83094d067e0 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -8,6 +8,7 @@ */ #include <linux/irq.h> +#include <linux/slab.h> static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv) { @@ -49,9 +50,12 @@ __acquires(ohci->lock) switch (usb_pipetype (urb->pipe)) { case PIPE_ISOCHRONOUS: ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; - if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 - && quirk_amdiso(ohci)) - quirk_amd_pll(1); + if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { + if (quirk_amdiso(ohci)) + quirk_amd_pll(1); + if (quirk_amdprefetch(ohci)) + sb800_prefetch(ohci, 0); + } break; case PIPE_INTERRUPT: ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; @@ -680,9 +684,12 @@ static void td_submit_urb ( data + urb->iso_frame_desc [cnt].offset, urb->iso_frame_desc [cnt].length, urb, cnt); } - if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 - && quirk_amdiso(ohci)) - quirk_amd_pll(0); + if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { + if (quirk_amdiso(ohci)) + quirk_amd_pll(0); + if (quirk_amdprefetch(ohci)) + sb800_prefetch(ohci, 1); + } periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; break; diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c index e4bbe8e188e..d8eb3bdafab 100644 --- a/drivers/usb/host/ohci-sa1111.c +++ b/drivers/usb/host/ohci-sa1111.c @@ -31,8 +31,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev) { unsigned int usb_rst = 0; - printk(KERN_DEBUG __FILE__ - ": starting SA-1111 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n", + __FILE__); #ifdef CONFIG_SA1100_BADGE4 if (machine_is_badge4()) { @@ -65,8 +65,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev) static void sa1111_stop_hc(struct sa1111_dev *dev) { unsigned int usb_rst; - printk(KERN_DEBUG __FILE__ - ": stopping SA-1111 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n", + __FILE__); /* * Put the USB host controller into reset. diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index 222011f6172..5bf15fed0d9 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h @@ -402,6 +402,7 @@ struct ohci_hcd { #define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ #define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/ +#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ // there are also chip quirks/bugs in init logic struct work_struct nec_work; /* Worker for NEC quirk */ @@ -433,6 +434,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci) { return ohci->flags & OHCI_QUIRK_AMD_ISO; } +static inline int quirk_amdprefetch(struct ohci_hcd *ohci) +{ + return ohci->flags & OHCI_QUIRK_AMD_PREFETCH; +} #else static inline int quirk_nec(struct ohci_hcd *ohci) { @@ -446,6 +451,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci) { return 0; } +static inline int quirk_amdprefetch(struct ohci_hcd *ohci) +{ + return 0; +} #endif /* convert between an hcd pointer and the corresponding ohci_hcd */ diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 50f57f46883..f608dfd09a8 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -34,12 +34,11 @@ #include <linux/list.h> #include <linux/interrupt.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/io.h> -#include "../core/hcd.h" - #include <asm/irq.h> #include <asm/system.h> #include <asm/unaligned.h> @@ -660,13 +659,13 @@ static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu) if (qh->dummy == NULL) { oxu_dbg(oxu, "no dummy td\n"); oxu->qh_used[i] = 0; - - return NULL; + qh = NULL; + goto unlock; } oxu->qh_used[i] = 1; } - +unlock: spin_unlock(&oxu->mem_lock); return qh; @@ -3154,10 +3153,10 @@ static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu, case 0: return 0; case 1: - return 1 << USB_PORT_FEAT_LOWSPEED; + return USB_PORT_STAT_LOW_SPEED; case 2: default: - return 1 << USB_PORT_FEAT_HIGHSPEED; + return USB_PORT_STAT_HIGH_SPEED; } } @@ -3202,7 +3201,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, * Even if OWNER is set, so the port is owned by the * companion controller, khubd needs to be able to clear * the port-change status bits (especially - * USB_PORT_FEAT_C_CONNECTION). + * USB_PORT_STAT_C_CONNECTION). */ switch (wValue) { @@ -3264,11 +3263,11 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, /* wPortChange bits */ if (temp & PORT_CSC) - status |= 1 << USB_PORT_FEAT_C_CONNECTION; + status |= USB_PORT_STAT_C_CONNECTION << 16; if (temp & PORT_PEC) - status |= 1 << USB_PORT_FEAT_C_ENABLE; + status |= USB_PORT_STAT_C_ENABLE << 16; if ((temp & PORT_OCC) && !ignore_oc) - status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; + status |= USB_PORT_STAT_C_OVERCURRENT << 16; /* whoever resumes must GetPortStatus to complete it!! */ if (temp & PORT_RESUME) { @@ -3286,7 +3285,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, /* resume completed? */ else if (time_after_eq(jiffies, oxu->reset_done[wIndex])) { - status |= 1 << USB_PORT_FEAT_C_SUSPEND; + status |= USB_PORT_STAT_C_SUSPEND << 16; oxu->reset_done[wIndex] = 0; /* stop resume signaling */ @@ -3309,7 +3308,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, if ((temp & PORT_RESET) && time_after_eq(jiffies, oxu->reset_done[wIndex])) { - status |= 1 << USB_PORT_FEAT_C_RESET; + status |= USB_PORT_STAT_C_RESET << 16; oxu->reset_done[wIndex] = 0; /* force reset to complete */ @@ -3348,20 +3347,20 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, */ if (temp & PORT_CONNECT) { - status |= 1 << USB_PORT_FEAT_CONNECTION; + status |= USB_PORT_STAT_CONNECTION; /* status may be from integrated TT */ status |= oxu_port_speed(oxu, temp); } if (temp & PORT_PE) - status |= 1 << USB_PORT_FEAT_ENABLE; + status |= USB_PORT_STAT_ENABLE; if (temp & (PORT_SUSPEND|PORT_RESUME)) - status |= 1 << USB_PORT_FEAT_SUSPEND; + status |= USB_PORT_STAT_SUSPEND; if (temp & PORT_OC) - status |= 1 << USB_PORT_FEAT_OVER_CURRENT; + status |= USB_PORT_STAT_OVERCURRENT; if (temp & PORT_RESET) - status |= 1 << USB_PORT_FEAT_RESET; + status |= USB_PORT_STAT_RESET; if (temp & PORT_POWER) - status |= 1 << USB_PORT_FEAT_POWER; + status |= USB_PORT_STAT_POWER; #ifndef OXU_VERBOSE_DEBUG if (status & ~0xffff) /* only if wPortChange is interesting */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 23cf3bde476..83b5f9cea85 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) quirk_usb_handoff_xhci(pdev); } -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); +DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 749b5374282..77be3c24a42 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -33,11 +33,14 @@ #include <linux/list.h> #include <linux/interrupt.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/mm.h> #include <linux/irq.h> +#include <linux/slab.h> +#include <asm/cacheflush.h> -#include "../core/hcd.h" #include "r8a66597.h" MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver"); @@ -216,8 +219,17 @@ static void disable_controller(struct r8a66597 *r8a66597) { int port; + /* disable interrupts */ r8a66597_write(r8a66597, 0, INTENB0); - r8a66597_write(r8a66597, 0, INTSTS0); + r8a66597_write(r8a66597, 0, INTENB1); + r8a66597_write(r8a66597, 0, BRDYENB); + r8a66597_write(r8a66597, 0, BEMPENB); + r8a66597_write(r8a66597, 0, NRDYENB); + + /* clear status */ + r8a66597_write(r8a66597, 0, BRDYSTS); + r8a66597_write(r8a66597, 0, NRDYSTS); + r8a66597_write(r8a66597, 0, BEMPSTS); for (port = 0; port < r8a66597->max_root_hub; port++) r8a66597_disable_port(r8a66597, port); @@ -407,7 +419,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb) /* this function must be called with interrupt disabled */ static void free_usb_address(struct r8a66597 *r8a66597, - struct r8a66597_device *dev) + struct r8a66597_device *dev, int reset) { int port; @@ -419,7 +431,13 @@ static void free_usb_address(struct r8a66597 *r8a66597, dev->state = USB_STATE_DEFAULT; r8a66597->address_map &= ~(1 << dev->address); dev->address = 0; - dev_set_drvdata(&dev->udev->dev, NULL); + /* + * Only when resetting USB, it is necessary to erase drvdata. When + * a usb device with usb hub is disconnect, "dev->udev" is already + * freed on usb_desconnect(). So we cannot access the data. + */ + if (reset) + dev_set_drvdata(&dev->udev->dev, NULL); list_del(&dev->device_list); kfree(dev); @@ -811,6 +829,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); } +static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, + int status) +__releases(r8a66597->lock) +__acquires(r8a66597->lock) +{ + if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { + void *ptr; + + for (ptr = urb->transfer_buffer; + ptr < urb->transfer_buffer + urb->transfer_buffer_length; + ptr += PAGE_SIZE) + flush_dcache_page(virt_to_page(ptr)); + } + + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); + spin_lock(&r8a66597->lock); +} + /* this function must be called with interrupt disabled */ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) { @@ -822,8 +860,6 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) return; list_for_each_entry_safe(td, next, list, queue) { - if (!td) - continue; if (td->address != address) continue; @@ -831,15 +867,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) list_del(&td->queue); kfree(td); - if (urb) { - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), - urb); + if (urb) + r8a66597_urb_done(r8a66597, urb, -ENODEV); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, - -ENODEV); - spin_lock(&r8a66597->lock); - } break; } } @@ -988,10 +1018,10 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; rh->scount = R8A66597_MAX_SAMPLING; if (connect) - rh->port |= 1 << USB_PORT_FEAT_CONNECTION; + rh->port |= USB_PORT_STAT_CONNECTION; else - rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION); - rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION; + rh->port &= ~USB_PORT_STAT_CONNECTION; + rh->port |= USB_PORT_STAT_C_CONNECTION << 16; r8a66597_root_hub_start_polling(r8a66597); } @@ -999,23 +1029,28 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, /* this function must be called with interrupt disabled */ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, u16 syssts) +__releases(r8a66597->lock) +__acquires(r8a66597->lock) { if (syssts == SE0) { r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); - return; - } + } else { + if (syssts == FS_JSTS) + r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port)); + else if (syssts == LS_JSTS) + r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port)); - if (syssts == FS_JSTS) - r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port)); - else if (syssts == LS_JSTS) - r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port)); + r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); + r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); - r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); - r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); + if (r8a66597->bus_suspended) + usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); + } - if (r8a66597->bus_suspended) - usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); + spin_unlock(&r8a66597->lock); + usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); + spin_lock(&r8a66597->lock); } /* this function must be called with interrupt disabled */ @@ -1024,13 +1059,14 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port) u16 speed = get_rh_usb_speed(r8a66597, port); struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; + rh->port &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED); if (speed == HSMODE) - rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED); + rh->port |= USB_PORT_STAT_HIGH_SPEED; else if (speed == LSMODE) - rh->port |= (1 << USB_PORT_FEAT_LOWSPEED); + rh->port |= USB_PORT_STAT_LOW_SPEED; - rh->port &= ~(1 << USB_PORT_FEAT_RESET); - rh->port |= 1 << USB_PORT_FEAT_ENABLE; + rh->port &= ~USB_PORT_STAT_RESET; + rh->port |= USB_PORT_STAT_ENABLE; } /* this function must be called with interrupt disabled */ @@ -1039,7 +1075,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port) struct r8a66597_device *dev = r8a66597->root_hub[port].dev; disable_r8a66597_pipe_all(r8a66597, dev); - free_usb_address(r8a66597, dev); + free_usb_address(r8a66597, dev, 0); start_root_hub_sampling(r8a66597, port, 0); } @@ -1273,10 +1309,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) if (usb_pipeisoc(urb->pipe)) urb->start_frame = r8a66597_get_frame(hcd); - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(hcd, urb, status); - spin_lock(&r8a66597->lock); + r8a66597_urb_done(r8a66597, urb, status); } if (restart) { @@ -1672,7 +1705,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port) u16 tmp; struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; - if (rh->port & (1 << USB_PORT_FEAT_RESET)) { + if (rh->port & USB_PORT_STAT_RESET) { unsigned long dvstctr_reg = get_dvstctr_reg(port); tmp = r8a66597_read(r8a66597, dvstctr_reg); @@ -1684,7 +1717,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port) r8a66597_usb_connect(r8a66597, port); } - if (!(rh->port & (1 << USB_PORT_FEAT_CONNECTION))) { + if (!(rh->port & USB_PORT_STAT_CONNECTION)) { r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); } @@ -2022,8 +2055,6 @@ static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597, struct list_head *list = &r8a66597->child_device; list_for_each_entry(dev, list, device_list) { - if (!dev) - continue; if (dev->usb_address != addr) continue; @@ -2060,7 +2091,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597, spin_lock_irqsave(&r8a66597->lock, flags); dev = get_r8a66597_device(r8a66597, addr); disable_r8a66597_pipe_all(r8a66597, dev); - free_usb_address(r8a66597, dev); + free_usb_address(r8a66597, dev, 0); put_child_connect_map(r8a66597, addr); spin_unlock_irqrestore(&r8a66597->lock, flags); } @@ -2154,7 +2185,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, switch (wValue) { case USB_PORT_FEAT_ENABLE: - rh->port &= ~(1 << USB_PORT_FEAT_POWER); + rh->port &= ~USB_PORT_STAT_POWER; break; case USB_PORT_FEAT_SUSPEND: break; @@ -2195,15 +2226,15 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case USB_PORT_FEAT_POWER: r8a66597_port_power(r8a66597, port, 1); - rh->port |= (1 << USB_PORT_FEAT_POWER); + rh->port |= USB_PORT_STAT_POWER; break; case USB_PORT_FEAT_RESET: { struct r8a66597_device *dev = rh->dev; - rh->port |= (1 << USB_PORT_FEAT_RESET); + rh->port |= USB_PORT_STAT_RESET; disable_r8a66597_pipe_all(r8a66597, dev); - free_usb_address(r8a66597, dev); + free_usb_address(r8a66597, dev, 1); r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT, get_dvstctr_reg(port)); @@ -2238,12 +2269,12 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd) struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; unsigned long dvstctr_reg = get_dvstctr_reg(port); - if (!(rh->port & (1 << USB_PORT_FEAT_ENABLE))) + if (!(rh->port & USB_PORT_STAT_ENABLE)) continue; dbg("suspend port = %d", port); r8a66597_bclr(r8a66597, UACT, dvstctr_reg); /* suspend */ - rh->port |= 1 << USB_PORT_FEAT_SUSPEND; + rh->port |= USB_PORT_STAT_SUSPEND; if (rh->dev->udev->do_remote_wakeup) { msleep(3); /* waiting last SOF */ @@ -2269,12 +2300,12 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd) struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; unsigned long dvstctr_reg = get_dvstctr_reg(port); - if (!(rh->port & (1 << USB_PORT_FEAT_SUSPEND))) + if (!(rh->port & USB_PORT_STAT_SUSPEND)) continue; dbg("resume port = %d", port); - rh->port &= ~(1 << USB_PORT_FEAT_SUSPEND); - rh->port |= 1 << USB_PORT_FEAT_C_SUSPEND; + rh->port &= ~USB_PORT_STAT_SUSPEND; + rh->port |= USB_PORT_STAT_C_SUSPEND < 16; r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg); msleep(50); r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg); @@ -2354,7 +2385,7 @@ static int r8a66597_resume(struct device *dev) return 0; } -static struct dev_pm_ops r8a66597_dev_pm_ops = { +static const struct dev_pm_ops r8a66597_dev_pm_ops = { .suspend = r8a66597_suspend, .resume = r8a66597_resume, .poweroff = r8a66597_suspend, @@ -2373,7 +2404,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) del_timer_sync(&r8a66597->rh_timer); usb_remove_hcd(hcd); - iounmap((void *)r8a66597->reg); + iounmap(r8a66597->reg); #ifdef CONFIG_HAVE_CLK if (r8a66597->pdata->on_chip) clk_put(r8a66597->clk); @@ -2465,7 +2496,13 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) init_timer(&r8a66597->rh_timer); r8a66597->rh_timer.function = r8a66597_timer; r8a66597->rh_timer.data = (unsigned long)r8a66597; - r8a66597->reg = (unsigned long)reg; + r8a66597->reg = reg; + + /* make sure no interrupts are pending */ + ret = r8a66597_clock_enable(r8a66597); + if (ret < 0) + goto clean_up3; + disable_controller(r8a66597); for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) { INIT_LIST_HEAD(&r8a66597->pipe_queue[i]); diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index 228e3fb2385..95d0f5adfdc 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h @@ -112,7 +112,7 @@ struct r8a66597_root_hub { struct r8a66597 { spinlock_t lock; - unsigned long reg; + void __iomem *reg; #ifdef CONFIG_HAVE_CLK struct clk *clk; #endif @@ -170,67 +170,67 @@ static inline struct urb *r8a66597_get_urb(struct r8a66597 *r8a66597, static inline u16 r8a66597_read(struct r8a66597 *r8a66597, unsigned long offset) { - return inw(r8a66597->reg + offset); + return ioread16(r8a66597->reg + offset); } static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597, unsigned long offset, u16 *buf, int len) { - unsigned long fifoaddr = r8a66597->reg + offset; + void __iomem *fifoaddr = r8a66597->reg + offset; unsigned long count; if (r8a66597->pdata->on_chip) { count = len / 4; - insl(fifoaddr, buf, count); + ioread32_rep(fifoaddr, buf, count); if (len & 0x00000003) { - unsigned long tmp = inl(fifoaddr); + unsigned long tmp = ioread32(fifoaddr); memcpy((unsigned char *)buf + count * 4, &tmp, len & 0x03); } } else { len = (len + 1) / 2; - insw(fifoaddr, buf, len); + ioread16_rep(fifoaddr, buf, len); } } static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val, unsigned long offset) { - outw(val, r8a66597->reg + offset); + iowrite16(val, r8a66597->reg + offset); } static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597, unsigned long offset, u16 *buf, int len) { - unsigned long fifoaddr = r8a66597->reg + offset; + void __iomem *fifoaddr = r8a66597->reg + offset; unsigned long count; unsigned char *pb; int i; if (r8a66597->pdata->on_chip) { count = len / 4; - outsl(fifoaddr, buf, count); + iowrite32_rep(fifoaddr, buf, count); if (len & 0x00000003) { pb = (unsigned char *)buf + count * 4; for (i = 0; i < (len & 0x00000003); i++) { if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND) - outb(pb[i], fifoaddr + i); + iowrite8(pb[i], fifoaddr + i); else - outb(pb[i], fifoaddr + 3 - i); + iowrite8(pb[i], fifoaddr + 3 - i); } } } else { int odd = len & 0x0001; len = len / 2; - outsw(fifoaddr, buf, len); + ioread16_rep(fifoaddr, buf, len); if (unlikely(odd)) { buf = &buf[len]; - outb((unsigned char)*buf, fifoaddr); + iowrite8((unsigned char)*buf, fifoaddr); } } } diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 5b22a4d1c9e..bcf9f0e809d 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -45,14 +45,15 @@ #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/usb/sl811.h> +#include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/system.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> -#include "../core/hcd.h" #include "sl811.h" @@ -89,10 +90,10 @@ static void port_power(struct sl811 *sl811, int is_on) /* hub is inactive unless the port is powered */ if (is_on) { - if (sl811->port1 & (1 << USB_PORT_FEAT_POWER)) + if (sl811->port1 & USB_PORT_STAT_POWER) return; - sl811->port1 = (1 << USB_PORT_FEAT_POWER); + sl811->port1 = USB_PORT_STAT_POWER; sl811->irq_enable = SL11H_INTMASK_INSRMV; } else { sl811->port1 = 0; @@ -406,7 +407,7 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank) static inline void start_transfer(struct sl811 *sl811) { - if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) + if (sl811->port1 & USB_PORT_STAT_SUSPEND) return; if (sl811->active_a == NULL) { sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF)); @@ -719,24 +720,24 @@ retry: /* port status seems weird until after reset, so * force the reset and make khubd clean up later. */ - if (sl811->stat_insrmv & 1) - sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION; + if (irqstat & SL11H_INTMASK_RD) + sl811->port1 &= ~USB_PORT_STAT_CONNECTION; else - sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION); + sl811->port1 |= USB_PORT_STAT_CONNECTION; - sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION; + sl811->port1 |= USB_PORT_STAT_C_CONNECTION << 16; } else if (irqstat & SL11H_INTMASK_RD) { - if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) { + if (sl811->port1 & USB_PORT_STAT_SUSPEND) { DBG("wakeup\n"); - sl811->port1 |= 1 << USB_PORT_FEAT_C_SUSPEND; + sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16; sl811->stat_wake++; } else irqstat &= ~SL11H_INTMASK_RD; } if (irqstat) { - if (sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)) + if (sl811->port1 & USB_PORT_STAT_ENABLE) start_transfer(sl811); ret = IRQ_HANDLED; if (retries--) @@ -818,7 +819,7 @@ static int sl811h_urb_enqueue( spin_lock_irqsave(&sl811->lock, flags); /* don't submit to a dead or disabled port */ - if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)) + if (!(sl811->port1 & USB_PORT_STAT_ENABLE) || !HC_IS_RUNNING(hcd->state)) { retval = -ENODEV; kfree(ep); @@ -1118,9 +1119,9 @@ sl811h_timer(unsigned long _sl811) unsigned long flags; u8 irqstat; u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE; - const u32 mask = (1 << USB_PORT_FEAT_CONNECTION) - | (1 << USB_PORT_FEAT_ENABLE) - | (1 << USB_PORT_FEAT_LOWSPEED); + const u32 mask = USB_PORT_STAT_CONNECTION + | USB_PORT_STAT_ENABLE + | USB_PORT_STAT_LOW_SPEED; spin_lock_irqsave(&sl811->lock, flags); @@ -1134,8 +1135,8 @@ sl811h_timer(unsigned long _sl811) switch (signaling) { case SL11H_CTL1MASK_SE0: DBG("end reset\n"); - sl811->port1 = (1 << USB_PORT_FEAT_C_RESET) - | (1 << USB_PORT_FEAT_POWER); + sl811->port1 = (USB_PORT_STAT_C_RESET << 16) + | USB_PORT_STAT_POWER; sl811->ctrl1 = 0; /* don't wrongly ack RD */ if (irqstat & SL11H_INTMASK_INSRMV) @@ -1143,7 +1144,7 @@ sl811h_timer(unsigned long _sl811) break; case SL11H_CTL1MASK_K: DBG("end resume\n"); - sl811->port1 &= ~(1 << USB_PORT_FEAT_SUSPEND); + sl811->port1 &= ~USB_PORT_STAT_SUSPEND; break; default: DBG("odd timer signaling: %02x\n", signaling); @@ -1153,26 +1154,26 @@ sl811h_timer(unsigned long _sl811) if (irqstat & SL11H_INTMASK_RD) { /* usbcore nukes all pending transactions on disconnect */ - if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION)) - sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION) - | (1 << USB_PORT_FEAT_C_ENABLE); + if (sl811->port1 & USB_PORT_STAT_CONNECTION) + sl811->port1 |= (USB_PORT_STAT_C_CONNECTION << 16) + | (USB_PORT_STAT_C_ENABLE << 16); sl811->port1 &= ~mask; sl811->irq_enable = SL11H_INTMASK_INSRMV; } else { sl811->port1 |= mask; if (irqstat & SL11H_INTMASK_DP) - sl811->port1 &= ~(1 << USB_PORT_FEAT_LOWSPEED); + sl811->port1 &= ~USB_PORT_STAT_LOW_SPEED; sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD; } - if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION)) { + if (sl811->port1 & USB_PORT_STAT_CONNECTION) { u8 ctrl2 = SL811HS_CTL2_INIT; sl811->irq_enable |= SL11H_INTMASK_DONE_A; #ifdef USE_B sl811->irq_enable |= SL11H_INTMASK_DONE_B; #endif - if (sl811->port1 & (1 << USB_PORT_FEAT_LOWSPEED)) { + if (sl811->port1 & USB_PORT_STAT_LOW_SPEED) { sl811->ctrl1 |= SL11H_CTL1MASK_LSPD; ctrl2 |= SL811HS_CTL2MASK_DSWAP; } @@ -1232,7 +1233,7 @@ sl811h_hub_control( switch (wValue) { case USB_PORT_FEAT_ENABLE: - sl811->port1 &= (1 << USB_PORT_FEAT_POWER); + sl811->port1 &= USB_PORT_STAT_POWER; sl811->ctrl1 = 0; sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); sl811->irq_enable = SL11H_INTMASK_INSRMV; @@ -1240,7 +1241,7 @@ sl811h_hub_control( sl811->irq_enable); break; case USB_PORT_FEAT_SUSPEND: - if (!(sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))) + if (!(sl811->port1 & USB_PORT_STAT_SUSPEND)) break; /* 20 msec of resume/K signaling, other irqs blocked */ @@ -1272,12 +1273,12 @@ sl811h_hub_control( sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf); break; case GetHubStatus: - *(__le32 *) buf = cpu_to_le32(0); + put_unaligned_le32(0, buf); break; case GetPortStatus: if (wIndex != 1) goto error; - *(__le32 *) buf = cpu_to_le32(sl811->port1); + put_unaligned_le32(sl811->port1, buf); #ifndef VERBOSE if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ @@ -1289,9 +1290,9 @@ sl811h_hub_control( goto error; switch (wValue) { case USB_PORT_FEAT_SUSPEND: - if (sl811->port1 & (1 << USB_PORT_FEAT_RESET)) + if (sl811->port1 & USB_PORT_STAT_RESET) goto error; - if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))) + if (!(sl811->port1 & USB_PORT_STAT_ENABLE)) goto error; DBG("suspend...\n"); @@ -1302,9 +1303,9 @@ sl811h_hub_control( port_power(sl811, 1); break; case USB_PORT_FEAT_RESET: - if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) + if (sl811->port1 & USB_PORT_STAT_SUSPEND) goto error; - if (!(sl811->port1 & (1 << USB_PORT_FEAT_POWER))) + if (!(sl811->port1 & USB_PORT_STAT_POWER)) break; /* 50 msec of reset/SE0 signaling, irqs blocked */ @@ -1313,7 +1314,7 @@ sl811h_hub_control( sl811->irq_enable); sl811->ctrl1 = SL11H_CTL1MASK_SE0; sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); - sl811->port1 |= (1 << USB_PORT_FEAT_RESET); + sl811->port1 |= USB_PORT_STAT_RESET; mod_timer(&sl811->timer, jiffies + msecs_to_jiffies(50)); break; diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c index 516848dd9b4..0e13a00eb2e 100644 --- a/drivers/usb/host/sl811_cs.c +++ b/drivers/usb/host/sl811_cs.c @@ -20,7 +20,6 @@ #include <linux/ioport.h> #include <linux/platform_device.h> -#include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> @@ -37,37 +36,14 @@ MODULE_LICENSE("GPL"); /* MACROS */ /*====================================================================*/ -#if defined(DEBUG) || defined(PCMCIA_DEBUG) - -static int pc_debug = 0; -module_param(pc_debug, int, 0644); - -#define DBG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG "sl811_cs: " args) - -#else -#define DBG(n, args...) do{}while(0) -#endif /* no debugging */ - #define INFO(args...) printk(KERN_INFO "sl811_cs: " args) -#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) - -#define CS_CHECK(fn, ret) \ - do { \ - last_fn = (fn); \ - if ((last_ret = (ret)) != 0) \ - goto cs_failed; \ - } while (0) - /*====================================================================*/ /* VARIABLES */ /*====================================================================*/ -static const char driver_name[DEV_NAME_LEN] = "sl811_cs"; - typedef struct local_info_t { struct pcmcia_device *p_dev; - dev_node_t node; } local_info_t; static void sl811_cs_release(struct pcmcia_device * link); @@ -76,7 +52,7 @@ static void sl811_cs_release(struct pcmcia_device * link); static void release_platform_dev(struct device * dev) { - DBG(0, "sl811_cs platform_dev release\n"); + dev_dbg(dev, "sl811_cs platform_dev release\n"); dev->parent = NULL; } @@ -140,7 +116,7 @@ static int sl811_hc_init(struct device *parent, resource_size_t base_addr, static void sl811_cs_detach(struct pcmcia_device *link) { - DBG(0, "sl811_cs_detach(0x%p)\n", link); + dev_dbg(&link->dev, "sl811_cs_detach\n"); sl811_cs_release(link); @@ -150,7 +126,7 @@ static void sl811_cs_detach(struct pcmcia_device *link) static void sl811_cs_release(struct pcmcia_device * link) { - DBG(0, "sl811_cs_release(0x%p)\n", link); + dev_dbg(&link->dev, "sl811_cs_release\n"); pcmcia_disable_device(link); platform_device_unregister(&platform_dev); @@ -183,20 +159,19 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev, dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; /* we need an interrupt */ - if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; + p_dev->conf.Attributes |= CONF_ENABLE_IRQ; /* IO window settings */ - p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; + p_dev->resource[0]->end = p_dev->resource[1]->end = 0; if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; + p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; - p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->io.BasePort1 = io->win[0].base; - p_dev->io.NumPorts1 = io->win[0].len; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->start = io->win[0].base; + p_dev->resource[0]->end = io->win[0].len; - return pcmcia_request_io(p_dev, &p_dev->io); + return pcmcia_request_io(p_dev); } pcmcia_disable_device(p_dev); return -ENODEV; @@ -205,44 +180,35 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev, static int sl811_cs_config(struct pcmcia_device *link) { - struct device *parent = &handle_to_dev(link); - local_info_t *dev = link->priv; - int last_fn, last_ret; + struct device *parent = &link->dev; + int ret; - DBG(0, "sl811_cs_config(0x%p)\n", link); + dev_dbg(&link->dev, "sl811_cs_config\n"); if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) goto failed; /* require an IRQ and two registers */ - if (!link->io.NumPorts1 || link->io.NumPorts1 < 2) - goto failed; - if (link->conf.Attributes & CONF_ENABLE_IRQ) - CS_CHECK(RequestIRQ, - pcmcia_request_irq(link, &link->irq)); - else + if (resource_size(link->resource[0]) < 2) goto failed; - CS_CHECK(RequestConfiguration, - pcmcia_request_configuration(link, &link->conf)); + if (!link->irq) + goto failed; - sprintf(dev->node.dev_name, driver_name); - dev->node.major = dev->node.minor = 0; - link->dev_node = &dev->node; + ret = pcmcia_request_configuration(link, &link->conf); + if (ret) + goto failed; - printk(KERN_INFO "%s: index 0x%02x: ", - dev->node.dev_name, link->conf.ConfigIndex); + dev_info(&link->dev, "index 0x%02x: ", + link->conf.ConfigIndex); if (link->conf.Vpp) printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); - printk(", irq %d", link->irq.AssignedIRQ); - printk(", io 0x%04x-0x%04x", link->io.BasePort1, - link->io.BasePort1+link->io.NumPorts1-1); + printk(", irq %d", link->irq); + printk(", io %pR", link->resource[0]); printk("\n"); - if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) + if (sl811_hc_init(parent, link->resource[0]->start, link->irq) < 0) { -cs_failed: - cs_error(link, last_fn, last_ret); failed: printk(KERN_WARNING "sl811_cs_config failed\n"); sl811_cs_release(link); @@ -261,11 +227,6 @@ static int sl811_cs_probe(struct pcmcia_device *link) local->p_dev = link; link->priv = local; - /* Initialize */ - link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; - link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID; - link->irq.Handler = NULL; - link->conf.Attributes = 0; link->conf.IntType = INT_MEMORY_AND_IO; @@ -281,7 +242,7 @@ MODULE_DEVICE_TABLE(pcmcia, sl811_ids); static struct pcmcia_driver sl811_cs_driver = { .owner = THIS_MODULE, .drv = { - .name = (char *)driver_name, + .name = "sl811_cs", }, .probe = sl811_cs_probe, .remove = sl811_cs_detach, diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 228f2b070f2..5b31bae92db 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -49,6 +49,7 @@ #include <linux/list.h> #include <linux/interrupt.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/mutex.h> @@ -56,7 +57,6 @@ #include <asm/irq.h> #include <asm/system.h> #include <asm/byteorder.h> -#include "../core/hcd.h" /* FIXME ohci.h is ONLY for internal use by the OHCI driver. * If you're going to try stuff like this, you need to split @@ -1446,9 +1446,9 @@ static void u132_hcd_endp_work_scheduler(struct work_struct *work) return; } else { int retval; - u8 address = u132->addr[endp->usb_addr].address; struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]; + address = u132->addr[endp->usb_addr].address; endp->active = 1; ring->curr_endp = endp; ring->in_use = 1; @@ -3120,8 +3120,8 @@ static int __devinit u132_probe(struct platform_device *pdev) ftdi_elan_gone_away(pdev); return -ENOMEM; } else { - int retval = 0; struct u132 *u132 = hcd_to_u132(hcd); + retval = 0; hcd->rsrc_start = 0; mutex_lock(&u132_module_lock); list_add_tail(&u132->u132_list, &u132_static_list); diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c index e52b954dda4..98cf0b26b96 100644 --- a/drivers/usb/host/uhci-debug.c +++ b/drivers/usb/host/uhci-debug.c @@ -9,6 +9,7 @@ * (C) Copyright 1999-2001 Johannes Erdfelt */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/smp_lock.h> diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 5cd0e48f67f..6637e52736d 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -38,6 +38,7 @@ #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/bitops.h> #include <linux/dmi.h> @@ -46,7 +47,6 @@ #include <asm/irq.h> #include <asm/system.h> -#include "../core/hcd.h" #include "uhci-hcd.h" #include "pci-quirks.h" @@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hcd) uhci_hc_died(uhci); uhci_scan_schedule(uhci); spin_unlock_irq(&uhci->lock); + synchronize_irq(hcd->irq); del_timer_sync(&uhci->fsbr_timer); release_uhci(uhci); @@ -749,7 +750,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd) spin_lock_irq(&uhci->lock); if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) rc = -ESHUTDOWN; - else if (!uhci->dead) + else if (uhci->dead) + ; /* Dead controllers tell no tales */ + + /* Once the controller is stopped, port resumes that are already + * in progress won't complete. Hence if remote wakeup is enabled + * for the root hub and any ports are in the middle of a resume or + * remote wakeup, we must fail the suspend. + */ + else if (hcd->self.root_hub->do_remote_wakeup && + uhci->resuming_ports) { + dev_dbg(uhci_dev(uhci), "suspend failed because a port " + "is resuming\n"); + rc = -EBUSY; + } else suspend_rh(uhci, UHCI_RH_SUSPENDED); spin_unlock_irq(&uhci->lock); return rc; diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 885b585360b..8270055848c 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci) /* Port received a wakeup request */ set_bit(port, &uhci->resuming_ports); uhci->ports_timeout = jiffies + - msecs_to_jiffies(20); + msecs_to_jiffies(25); /* Make sure we see the port again * after the resuming period is over. */ diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c index c632437c764..77324930603 100644 --- a/drivers/usb/host/whci/asl.c +++ b/drivers/usb/host/whci/asl.c @@ -16,6 +16,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include <linux/usb.h> @@ -115,6 +116,10 @@ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) if (status & QTD_STS_HALTED) { /* Ug, an error. */ process_halted_qtd(whc, qset, td); + /* A halted qTD always triggers an update + because the qset was either removed or + reactivated. */ + update |= WHC_UPDATE_UPDATED; goto done; } @@ -305,6 +310,7 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) struct whc_urb *wurb = urb->hcpriv; struct whc_qset *qset = wurb->qset; struct whc_std *std, *t; + bool has_qtd = false; int ret; unsigned long flags; @@ -315,17 +321,21 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) goto out; list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb == urb) + if (std->urb == urb) { + if (std->qtd) + has_qtd = true; qset_free_std(whc, std); - else + } else std->qtd = NULL; /* so this std is re-added when the qset is */ } - asl_qset_remove(whc, qset); - wurb->status = status; - wurb->is_async = true; - queue_work(whc->workqueue, &wurb->dequeue_work); - + if (has_qtd) { + asl_qset_remove(whc, qset); + wurb->status = status; + wurb->is_async = true; + queue_work(whc->workqueue, &wurb->dequeue_work); + } else + qset_remove_urb(whc, qset, urb, status); out: spin_unlock_irqrestore(&whc->lock, flags); diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c index cf2d45946c5..767af265e00 100644 --- a/drivers/usb/host/whci/debug.c +++ b/drivers/usb/host/whci/debug.c @@ -15,6 +15,7 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/seq_file.h> @@ -29,19 +30,31 @@ struct whc_dbg { struct dentry *pzl_f; }; -void qset_print(struct seq_file *s, struct whc_qset *qset) +static void qset_print(struct seq_file *s, struct whc_qset *qset) { + static const char *qh_type[] = { + "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", }; struct whc_std *std; struct urb *urb = NULL; int i; - seq_printf(s, "qset %08x\n", (u32)qset->qset_dma); + seq_printf(s, "qset %08x", (u32)qset->qset_dma); + if (&qset->list_node == qset->whc->async_list.prev) { + seq_printf(s, " (dummy)\n"); + } else { + seq_printf(s, " ep%d%s-%s maxpkt: %d\n", + qset->qh.info1 & 0x0f, + (qset->qh.info1 >> 4) & 0x1 ? "in" : "out", + qh_type[(qset->qh.info1 >> 5) & 0x7], + (qset->qh.info1 >> 16) & 0xffff); + } seq_printf(s, " -> %08x\n", (u32)qset->qh.link); seq_printf(s, " info: %08x %08x %08x\n", - qset->qh.info1, qset->qh.info2, qset->qh.info3); - seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); + qset->qh.info1, qset->qh.info2, qset->qh.info3); + seq_printf(s, " sts: %04x errs: %d curwin: %08x\n", + qset->qh.status, qset->qh.err_count, qset->qh.cur_window); seq_printf(s, " TD: sts: %08x opts: %08x\n", - qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); + qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); for (i = 0; i < WHCI_QSET_TD_MAX; i++) { seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", @@ -134,7 +147,7 @@ static int pzl_open(struct inode *inode, struct file *file) return single_open(file, pzl_print, inode->i_private); } -static struct file_operations di_fops = { +static const struct file_operations di_fops = { .open = di_open, .read = seq_read, .llseek = seq_lseek, @@ -142,7 +155,7 @@ static struct file_operations di_fops = { .owner = THIS_MODULE, }; -static struct file_operations asl_fops = { +static const struct file_operations asl_fops = { .open = asl_open, .read = seq_read, .llseek = seq_lseek, @@ -150,7 +163,7 @@ static struct file_operations asl_fops = { .owner = THIS_MODULE, }; -static struct file_operations pzl_fops = { +static const struct file_operations pzl_fops = { .open = pzl_open, .read = seq_read, .llseek = seq_lseek, diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c index 687b622a161..e0d3401285c 100644 --- a/drivers/usb/host/whci/hcd.c +++ b/drivers/usb/host/whci/hcd.c @@ -250,6 +250,7 @@ static int whc_probe(struct umc_dev *umc) } usb_hcd->wireless = 1; + usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */ wusbhc = usb_hcd_to_wusbhc(usb_hcd); whc = wusbhc_to_whc(wusbhc); diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c index 34a783cb013..f7582e8e216 100644 --- a/drivers/usb/host/whci/init.c +++ b/drivers/usb/host/whci/init.c @@ -16,6 +16,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c index a9e05bac664..33c5580b4d2 100644 --- a/drivers/usb/host/whci/pzl.c +++ b/drivers/usb/host/whci/pzl.c @@ -16,6 +16,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include <linux/usb.h> @@ -121,6 +122,10 @@ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) if (status & QTD_STS_HALTED) { /* Ug, an error. */ process_halted_qtd(whc, qset, td); + /* A halted qTD always triggers an update + because the qset was either removed or + reactivated. */ + update |= WHC_UPDATE_UPDATED; goto done; } @@ -333,6 +338,7 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) struct whc_urb *wurb = urb->hcpriv; struct whc_qset *qset = wurb->qset; struct whc_std *std, *t; + bool has_qtd = false; int ret; unsigned long flags; @@ -343,17 +349,22 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) goto out; list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb == urb) + if (std->urb == urb) { + if (std->qtd) + has_qtd = true; qset_free_std(whc, std); - else + } else std->qtd = NULL; /* so this std is re-added when the qset is */ } - pzl_qset_remove(whc, qset); - wurb->status = status; - wurb->is_async = false; - queue_work(whc->workqueue, &wurb->dequeue_work); - + if (has_qtd) { + pzl_qset_remove(whc, qset); + update_pzl_hw_view(whc); + wurb->status = status; + wurb->is_async = false; + queue_work(whc->workqueue, &wurb->dequeue_work); + } else + qset_remove_urb(whc, qset, urb, status); out: spin_unlock_irqrestore(&whc->lock, flags); diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index 1b9dc157157..ab5a14fbfee 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c @@ -17,6 +17,7 @@ */ #include <linux/kernel.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <linux/uwb/umc.h> #include <linux/usb.h> @@ -49,16 +50,19 @@ struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) * state * @urb: an urb for a transfer to this endpoint */ -static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) +static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb) { struct usb_device *usb_dev = urb->dev; + struct wusb_dev *wusb_dev = usb_dev->wusb_dev; struct usb_wireless_ep_comp_descriptor *epcd; bool is_out; + uint8_t phy_rate; is_out = usb_pipeout(urb->pipe); - epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; + qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); + epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; if (epcd) { qset->max_seq = epcd->bMaxSequence; qset->max_burst = epcd->bMaxBurst; @@ -67,12 +71,28 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) qset->max_burst = 1; } + /* + * Initial PHY rate is 53.3 Mbit/s for control endpoints or + * the maximum supported by the device for other endpoints + * (unless limited by the user). + */ + if (usb_pipecontrol(urb->pipe)) + phy_rate = UWB_PHY_RATE_53; + else { + uint16_t phy_rates; + + phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates); + phy_rate = fls(phy_rates) - 1; + if (phy_rate > whc->wusbhc.phy_rate) + phy_rate = whc->wusbhc.phy_rate; + } + qset->qh.info1 = cpu_to_le32( QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) | usb_pipe_to_qh_type(urb->pipe) | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) - | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out)) + | QH_INFO1_MAX_PKT_LEN(qset->max_packet) ); qset->qh.info2 = cpu_to_le32( QH_INFO2_BURST(qset->max_burst) @@ -86,7 +106,7 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) * strength and can presumably guess the Tx power required * from that? */ qset->qh.info3 = cpu_to_le32( - QH_INFO3_TX_RATE_53_3 + QH_INFO3_TX_RATE(phy_rate) | QH_INFO3_TX_PWR(0) /* 0 == max power */ ); @@ -148,7 +168,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb, qset->ep = urb->ep; urb->ep->hcpriv = qset; - qset_fill_qh(qset, urb); + qset_fill_qh(whc, qset, urb); } return qset; } @@ -241,6 +261,36 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) qset->ntds--; } +static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) +{ + struct scatterlist *sg; + void *bounce; + size_t remaining, offset; + + bounce = std->bounce_buf; + remaining = std->len; + + sg = std->bounce_sg; + offset = std->bounce_offset; + + while (remaining) { + size_t len; + + len = min(sg->length - offset, remaining); + memcpy(sg_virt(sg) + offset, bounce, len); + + bounce += len; + remaining -= len; + + offset += len; + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + } + } + +} + /** * qset_free_std - remove an sTD and free it. * @whc: the WHCI host controller @@ -249,13 +299,29 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) void qset_free_std(struct whc *whc, struct whc_std *std) { list_del(&std->list_node); - if (std->num_pointers) { - dma_unmap_single(whc->wusbhc.dev, std->dma_addr, - std->num_pointers * sizeof(struct whc_page_list_entry), - DMA_TO_DEVICE); + if (std->bounce_buf) { + bool is_out = usb_pipeout(std->urb->pipe); + dma_addr_t dma_addr; + + if (std->num_pointers) + dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); + else + dma_addr = std->dma_addr; + + dma_unmap_single(whc->wusbhc.dev, dma_addr, + std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (!is_out) + qset_copy_bounce_to_sg(whc, std); + kfree(std->bounce_buf); + } + if (std->pl_virt) { + if (std->dma_addr) + dma_unmap_single(whc->wusbhc.dev, std->dma_addr, + std->num_pointers * sizeof(struct whc_page_list_entry), + DMA_TO_DEVICE); kfree(std->pl_virt); + std->pl_virt = NULL; } - kfree(std); } @@ -293,12 +359,17 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f { dma_addr_t dma_addr = std->dma_addr; dma_addr_t sp, ep; - size_t std_len = std->len; size_t pl_len; int p; - sp = ALIGN(dma_addr, WHCI_PAGE_SIZE); - ep = dma_addr + std_len; + /* Short buffers don't need a page list. */ + if (std->len <= WHCI_PAGE_SIZE) { + std->num_pointers = 0; + return 0; + } + + sp = dma_addr & ~(WHCI_PAGE_SIZE-1); + ep = dma_addr + std->len; std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); @@ -309,7 +380,7 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f for (p = 0; p < std->num_pointers; p++) { std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); - dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE); + dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); } return 0; @@ -339,6 +410,218 @@ static void urb_dequeue_work(struct work_struct *work) spin_unlock_irqrestore(&whc->lock, flags); } +static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, + struct urb *urb, gfp_t mem_flags) +{ + struct whc_std *std; + + std = kzalloc(sizeof(struct whc_std), mem_flags); + if (std == NULL) + return NULL; + + std->urb = urb; + std->qtd = NULL; + + INIT_LIST_HEAD(&std->list_node); + list_add_tail(&std->list_node, &qset->stds); + + return std; +} + +static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, + gfp_t mem_flags) +{ + size_t remaining; + struct scatterlist *sg; + int i; + int ntds = 0; + struct whc_std *std = NULL; + struct whc_page_list_entry *entry; + dma_addr_t prev_end = 0; + size_t pl_len; + int p = 0; + + remaining = urb->transfer_buffer_length; + + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + dma_addr_t dma_addr; + size_t dma_remaining; + dma_addr_t sp, ep; + int num_pointers; + + if (remaining == 0) { + break; + } + + dma_addr = sg_dma_address(sg); + dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); + + while (dma_remaining) { + size_t dma_len; + + /* + * We can use the previous std (if it exists) provided that: + * - the previous one ended on a page boundary. + * - the current one begins on a page boundary. + * - the previous one isn't full. + * + * If a new std is needed but the previous one + * was not a whole number of packets then this + * sg list cannot be mapped onto multiple + * qTDs. Return an error and let the caller + * sort it out. + */ + if (!std + || (prev_end & (WHCI_PAGE_SIZE-1)) + || (dma_addr & (WHCI_PAGE_SIZE-1)) + || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { + if (std->len % qset->max_packet != 0) + return -EINVAL; + std = qset_new_std(whc, qset, urb, mem_flags); + if (std == NULL) { + return -ENOMEM; + } + ntds++; + p = 0; + } + + dma_len = dma_remaining; + + /* + * If the remainder of this element doesn't + * fit in a single qTD, limit the qTD to a + * whole number of packets. This allows the + * remainder to go into the next qTD. + */ + if (std->len + dma_len > QTD_MAX_XFER_SIZE) { + dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet) + * qset->max_packet - std->len; + } + + std->len += dma_len; + std->ntds_remaining = -1; /* filled in later */ + + sp = dma_addr & ~(WHCI_PAGE_SIZE-1); + ep = dma_addr + dma_len; + num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); + std->num_pointers += num_pointers; + + pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); + + std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); + if (std->pl_virt == NULL) { + return -ENOMEM; + } + + for (;p < std->num_pointers; p++, entry++) { + std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); + dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); + } + + prev_end = dma_addr = ep; + dma_remaining -= dma_len; + remaining -= dma_len; + } + } + + /* Now the number of stds is know, go back and fill in + std->ntds_remaining. */ + list_for_each_entry(std, &qset->stds, list_node) { + if (std->ntds_remaining == -1) { + pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); + std->ntds_remaining = ntds--; + std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, + pl_len, DMA_TO_DEVICE); + } + } + return 0; +} + +/** + * qset_add_urb_sg_linearize - add an urb with sg list, copying the data + * + * If the URB contains an sg list whose elements cannot be directly + * mapped to qTDs then the data must be transferred via bounce + * buffers. + */ +static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, + struct urb *urb, gfp_t mem_flags) +{ + bool is_out = usb_pipeout(urb->pipe); + size_t max_std_len; + size_t remaining; + int ntds = 0; + struct whc_std *std = NULL; + void *bounce = NULL; + struct scatterlist *sg; + int i; + + /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ + max_std_len = qset->max_burst * qset->max_packet; + + remaining = urb->transfer_buffer_length; + + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + size_t len; + size_t sg_remaining; + void *orig; + + if (remaining == 0) { + break; + } + + sg_remaining = min_t(size_t, remaining, sg->length); + orig = sg_virt(sg); + + while (sg_remaining) { + if (!std || std->len == max_std_len) { + std = qset_new_std(whc, qset, urb, mem_flags); + if (std == NULL) + return -ENOMEM; + std->bounce_buf = kmalloc(max_std_len, mem_flags); + if (std->bounce_buf == NULL) + return -ENOMEM; + std->bounce_sg = sg; + std->bounce_offset = orig - sg_virt(sg); + bounce = std->bounce_buf; + ntds++; + } + + len = min(sg_remaining, max_std_len - std->len); + + if (is_out) + memcpy(bounce, orig, len); + + std->len += len; + std->ntds_remaining = -1; /* filled in later */ + + bounce += len; + orig += len; + sg_remaining -= len; + remaining -= len; + } + } + + /* + * For each of the new sTDs, map the bounce buffers, create + * page lists (if necessary), and fill in std->ntds_remaining. + */ + list_for_each_entry(std, &qset->stds, list_node) { + if (std->ntds_remaining != -1) + continue; + + std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, + is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (qset_fill_page_list(whc, std, mem_flags) < 0) + return -ENOMEM; + + std->ntds_remaining = ntds--; + } + + return 0; +} + /** * qset_add_urb - add an urb to the qset's queue. * @@ -353,10 +636,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, int remaining = urb->transfer_buffer_length; u64 transfer_dma = urb->transfer_dma; int ntds_remaining; - - ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); - if (ntds_remaining == 0) - ntds_remaining = 1; + int ret; wurb = kzalloc(sizeof(struct whc_urb), mem_flags); if (wurb == NULL) @@ -366,32 +646,39 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, wurb->urb = urb; INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); + if (urb->num_sgs) { + ret = qset_add_urb_sg(whc, qset, urb, mem_flags); + if (ret == -EINVAL) { + qset_free_stds(qset, urb); + ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); + } + if (ret < 0) + goto err_no_mem; + return 0; + } + + ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); + if (ntds_remaining == 0) + ntds_remaining = 1; + while (ntds_remaining) { struct whc_std *std; size_t std_len; - std = kmalloc(sizeof(struct whc_std), mem_flags); - if (std == NULL) - goto err_no_mem; - std_len = remaining; if (std_len > QTD_MAX_XFER_SIZE) std_len = QTD_MAX_XFER_SIZE; - std->urb = urb; + std = qset_new_std(whc, qset, urb, mem_flags); + if (std == NULL) + goto err_no_mem; + std->dma_addr = transfer_dma; std->len = std_len; std->ntds_remaining = ntds_remaining; - std->qtd = NULL; - INIT_LIST_HEAD(&std->list_node); - list_add_tail(&std->list_node, &qset->stds); - - if (std_len > WHCI_PAGE_SIZE) { - if (qset_fill_page_list(whc, std, mem_flags) < 0) - goto err_no_mem; - } else - std->num_pointers = 0; + if (qset_fill_page_list(whc, std, mem_flags) < 0) + goto err_no_mem; ntds_remaining--; remaining -= std_len; diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h index 24e94d983c5..c80c7d93bc4 100644 --- a/drivers/usb/host/whci/whcd.h +++ b/drivers/usb/host/whci/whcd.h @@ -84,6 +84,11 @@ struct whc { * @len: the length of data in the associated TD. * @ntds_remaining: number of TDs (starting from this one) in this transfer. * + * @bounce_buf: a bounce buffer if the std was from an urb with a sg + * list that could not be mapped to qTDs directly. + * @bounce_sg: the first scatterlist element bounce_buf is for. + * @bounce_offset: the offset into bounce_sg for the start of bounce_buf. + * * Queued URBs may require more TDs than are available in a qset so we * use a list of these "software TDs" (sTDs) to hold per-TD data. */ @@ -97,6 +102,10 @@ struct whc_std { int num_pointers; dma_addr_t dma_addr; struct whc_page_list_entry *pl_virt; + + void *bounce_buf; + struct scatterlist *bounce_sg; + unsigned bounce_offset; }; /** diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h index e8d0001605b..4d4cbc0730b 100644 --- a/drivers/usb/host/whci/whci-hc.h +++ b/drivers/usb/host/whci/whci-hc.h @@ -172,14 +172,7 @@ struct whc_qhead { #define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */ #define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */ -#define QH_INFO3_TX_RATE_53_3 (0 << 24) -#define QH_INFO3_TX_RATE_80 (1 << 24) -#define QH_INFO3_TX_RATE_106_7 (2 << 24) -#define QH_INFO3_TX_RATE_160 (3 << 24) -#define QH_INFO3_TX_RATE_200 (4 << 24) -#define QH_INFO3_TX_RATE_320 (5 << 24) -#define QH_INFO3_TX_RATE_400 (6 << 24) -#define QH_INFO3_TX_RATE_480 (7 << 24) +#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */ #define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */ #define QH_STATUS_FLOW_CTRL (1 << 15) @@ -267,8 +260,9 @@ struct whc_qset { unsigned reset:1; struct urb *pause_after_urb; struct completion remove_complete; - int max_burst; - int max_seq; + uint16_t max_packet; + uint8_t max_burst; + uint8_t max_seq; }; static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 33128d52f21..fcbf4abbf38 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -364,6 +364,30 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring) xhci_debug_segment(xhci, seg); } +void xhci_dbg_ep_rings(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + struct xhci_virt_ep *ep) +{ + int i; + struct xhci_ring *ring; + + if (ep->ep_state & EP_HAS_STREAMS) { + for (i = 1; i < ep->stream_info->num_streams; i++) { + ring = ep->stream_info->stream_rings[i]; + xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n", + slot_id, ep_index, i); + xhci_debug_segment(xhci, ring->deq_seg); + } + } else { + ring = ep->ring; + if (!ring) + return; + xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", + slot_id, ep_index); + xhci_debug_segment(xhci, ring->deq_seg); + } +} + void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) { u32 addr = (u32) erst->erst_dma_addr; @@ -406,6 +430,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) } } +char *xhci_get_slot_state(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); + + switch (GET_SLOT_STATE(slot_ctx->dev_state)) { + case 0: + return "enabled/disabled"; + case 1: + return "default"; + case 2: + return "addressed"; + case 3: + return "configured"; + default: + return "reserved"; + } +} + void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { /* Fields are 32 bits wide, DMA addresses are in bytes */ diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index ecc131c3fe3..78c4edac1db 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset) next = readl(base + ext_offset); - if (ext_offset == XHCI_HCC_PARAMS_OFFSET) + if (ext_offset == XHCI_HCC_PARAMS_OFFSET) { /* Find the first extended capability */ next = XHCI_HCC_EXT_CAPS(next); - else + ext_offset = 0; + } else { /* Find the next extended capability */ next = XHCI_EXT_CAPS_NEXT(next); + } + if (!next) return 0; /* diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index eac5b53aa9e..a1a7a979553 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -64,15 +64,15 @@ static void xhci_hub_descriptor(struct xhci_hcd *xhci, static unsigned int xhci_port_speed(unsigned int port_status) { if (DEV_LOWSPEED(port_status)) - return 1 << USB_PORT_FEAT_LOWSPEED; + return USB_PORT_STAT_LOW_SPEED; if (DEV_HIGHSPEED(port_status)) - return 1 << USB_PORT_FEAT_HIGHSPEED; + return USB_PORT_STAT_HIGH_SPEED; if (DEV_SUPERSPEED(port_status)) - return 1 << USB_PORT_FEAT_SUPERSPEED; + return USB_PORT_STAT_SUPER_SPEED; /* * FIXME: Yes, we should check for full speed, but the core uses that as * a default in portspeed() in usb/core/hub.c (which is the only place - * USB_PORT_FEAT_*SPEED is used). + * USB_PORT_STAT_*_SPEED is used). */ return 0; } @@ -129,6 +129,50 @@ static u32 xhci_port_state_to_neutral(u32 state) return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); } +static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex, + u32 __iomem *addr, u32 port_status) +{ + /* Write 1 to disable the port */ + xhci_writel(xhci, port_status | PORT_PE, addr); + port_status = xhci_readl(xhci, addr); + xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n", + wIndex, port_status); +} + +static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, + u16 wIndex, u32 __iomem *addr, u32 port_status) +{ + char *port_change_bit; + u32 status; + + switch (wValue) { + case USB_PORT_FEAT_C_RESET: + status = PORT_RC; + port_change_bit = "reset"; + break; + case USB_PORT_FEAT_C_CONNECTION: + status = PORT_CSC; + port_change_bit = "connect"; + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + status = PORT_OCC; + port_change_bit = "over-current"; + break; + case USB_PORT_FEAT_C_ENABLE: + status = PORT_PEC; + port_change_bit = "enable/disable"; + break; + default: + /* Should never happen */ + return; + } + /* Change bits are all write 1 to clear */ + xhci_writel(xhci, port_status | status, addr); + port_status = xhci_readl(xhci, addr); + xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n", + port_change_bit, wIndex, port_status); +} + int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { @@ -138,7 +182,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u32 temp, status; int retval = 0; u32 __iomem *addr; - char *port_change_bit; ports = HCS_MAX_PORTS(xhci->hcs_params1); @@ -162,27 +205,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* wPortChange bits */ if (temp & PORT_CSC) - status |= 1 << USB_PORT_FEAT_C_CONNECTION; + status |= USB_PORT_STAT_C_CONNECTION << 16; if (temp & PORT_PEC) - status |= 1 << USB_PORT_FEAT_C_ENABLE; + status |= USB_PORT_STAT_C_ENABLE << 16; if ((temp & PORT_OCC)) - status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; + status |= USB_PORT_STAT_C_OVERCURRENT << 16; /* * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific * changes */ if (temp & PORT_CONNECT) { - status |= 1 << USB_PORT_FEAT_CONNECTION; + status |= USB_PORT_STAT_CONNECTION; status |= xhci_port_speed(temp); } if (temp & PORT_PE) - status |= 1 << USB_PORT_FEAT_ENABLE; + status |= USB_PORT_STAT_ENABLE; if (temp & PORT_OC) - status |= 1 << USB_PORT_FEAT_OVER_CURRENT; + status |= USB_PORT_STAT_OVERCURRENT; if (temp & PORT_RESET) - status |= 1 << USB_PORT_FEAT_RESET; + status |= USB_PORT_STAT_RESET; if (temp & PORT_POWER) - status |= 1 << USB_PORT_FEAT_POWER; + status |= USB_PORT_STAT_POWER; xhci_dbg(xhci, "Get port status returned 0x%x\n", status); put_unaligned(cpu_to_le32(status), (__le32 *) buf); break; @@ -229,26 +272,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, temp = xhci_port_state_to_neutral(temp); switch (wValue) { case USB_PORT_FEAT_C_RESET: - status = PORT_RC; - port_change_bit = "reset"; - break; case USB_PORT_FEAT_C_CONNECTION: - status = PORT_CSC; - port_change_bit = "connect"; - break; case USB_PORT_FEAT_C_OVER_CURRENT: - status = PORT_OCC; - port_change_bit = "over-current"; + case USB_PORT_FEAT_C_ENABLE: + xhci_clear_port_change_bit(xhci, wValue, wIndex, + addr, temp); + break; + case USB_PORT_FEAT_ENABLE: + xhci_disable_port(xhci, wIndex, addr, temp); break; default: goto error; } - /* Change bits are all write 1 to clear */ - xhci_writel(xhci, temp | status, addr); - temp = xhci_readl(xhci, addr); - xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n", - port_change_bit, wIndex, temp); - temp = xhci_readl(xhci, addr); /* unblock any posted writes */ break; default: error: @@ -263,7 +298,6 @@ error: * Returns 0 if the status hasn't changed, or the number of bytes in buf. * Ports are 0-indexed from the HCD point of view, * and 1-indexed from the USB core pointer of view. - * xHCI instances can have up to 127 ports, so FIXME if you see more than 15. * * Note that the status change bits will be cleared as soon as a port status * change event is generated, so we use the saved status from that event. @@ -280,14 +314,9 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) ports = HCS_MAX_PORTS(xhci->hcs_params1); /* Initial status is no changes */ - buf[0] = 0; + retval = (ports + 8) / 8; + memset(buf, 0, retval); status = 0; - if (ports > 7) { - buf[1] = 0; - retval = 2; - } else { - retval = 1; - } spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ @@ -296,10 +325,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) NUM_PORT_REGS*i; temp = xhci_readl(xhci, addr); if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) { - if (i < 7) - buf[0] |= 1 << (i + 1); - else - buf[1] |= 1 << (i - 7); + buf[(i + 1) / 8] |= 1 << (i + 1) % 8; status = 1; } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1db4fea8c17..2eb658d2639 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -22,6 +22,7 @@ #include <linux/usb.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/dmapool.h> #include "xhci.h" @@ -40,13 +41,13 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag seg = kzalloc(sizeof *seg, flags); if (!seg) - return 0; + return NULL; xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); if (!seg->trbs) { kfree(seg); - return 0; + return NULL; } xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", seg->trbs, (unsigned long long)dma); @@ -125,6 +126,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) kfree(ring); } +static void xhci_initialize_ring_info(struct xhci_ring *ring) +{ + /* The ring is empty, so the enqueue pointer == dequeue pointer */ + ring->enqueue = ring->first_seg->trbs; + ring->enq_seg = ring->first_seg; + ring->dequeue = ring->enqueue; + ring->deq_seg = ring->first_seg; + /* The ring is initialized to 0. The producer must write 1 to the cycle + * bit to handover ownership of the TRB, so PCS = 1. The consumer must + * compare CCS to the cycle bit to check ownership, so CCS = 1. + */ + ring->cycle_state = 1; + /* Not necessary for new rings, but needed for re-initialized rings */ + ring->enq_updates = 0; + ring->deq_updates = 0; +} + /** * Create a new ring with zero or more segments. * @@ -141,7 +159,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ring = kzalloc(sizeof *(ring), flags); xhci_dbg(xhci, "Allocating ring at %p\n", ring); if (!ring) - return 0; + return NULL; INIT_LIST_HEAD(&ring->td_list); if (num_segs == 0) @@ -173,27 +191,63 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, " segment %p (virtual), 0x%llx (DMA)\n", prev, (unsigned long long)prev->dma); } - /* The ring is empty, so the enqueue pointer == dequeue pointer */ - ring->enqueue = ring->first_seg->trbs; - ring->enq_seg = ring->first_seg; - ring->dequeue = ring->enqueue; - ring->deq_seg = ring->first_seg; - /* The ring is initialized to 0. The producer must write 1 to the cycle - * bit to handover ownership of the TRB, so PCS = 1. The consumer must - * compare CCS to the cycle bit to check ownership, so CCS = 1. - */ - ring->cycle_state = 1; - + xhci_initialize_ring_info(ring); return ring; fail: xhci_ring_free(xhci, ring); - return 0; + return NULL; +} + +void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + unsigned int ep_index) +{ + int rings_cached; + + rings_cached = virt_dev->num_rings_cached; + if (rings_cached < XHCI_MAX_RINGS_CACHED) { + virt_dev->num_rings_cached++; + rings_cached = virt_dev->num_rings_cached; + virt_dev->ring_cache[rings_cached] = + virt_dev->eps[ep_index].ring; + xhci_dbg(xhci, "Cached old ring, " + "%d ring%s cached\n", + rings_cached, + (rings_cached > 1) ? "s" : ""); + } else { + xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); + xhci_dbg(xhci, "Ring cache full (%d rings), " + "freeing ring\n", + virt_dev->num_rings_cached); + } + virt_dev->eps[ep_index].ring = NULL; +} + +/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue + * pointers to the beginning of the ring. + */ +static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, + struct xhci_ring *ring) +{ + struct xhci_segment *seg = ring->first_seg; + do { + memset(seg->trbs, 0, + sizeof(union xhci_trb)*TRBS_PER_SEGMENT); + /* All endpoint rings have link TRBs */ + xhci_link_segments(xhci, seg, seg->next, 1); + seg = seg->next; + } while (seg != ring->first_seg); + xhci_initialize_ring_info(ring); + /* td list should be empty since all URBs have been cancelled, + * but just in case... + */ + INIT_LIST_HEAD(&ring->td_list); } #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) -struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, +static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags) { struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); @@ -211,9 +265,11 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, return ctx; } -void xhci_free_container_ctx(struct xhci_hcd *xhci, +static void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { + if (!ctx) + return; dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); kfree(ctx); } @@ -248,6 +304,431 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); } + +/***************** Streams structures manipulation *************************/ + +void xhci_free_stream_ctx(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, + struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) +{ + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + + if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) + pci_free_consistent(pdev, + sizeof(struct xhci_stream_ctx)*num_stream_ctxs, + stream_ctx, dma); + else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) + return dma_pool_free(xhci->small_streams_pool, + stream_ctx, dma); + else + return dma_pool_free(xhci->medium_streams_pool, + stream_ctx, dma); +} + +/* + * The stream context array for each endpoint with bulk streams enabled can + * vary in size, based on: + * - how many streams the endpoint supports, + * - the maximum primary stream array size the host controller supports, + * - and how many streams the device driver asks for. + * + * The stream context array must be a power of 2, and can be as small as + * 64 bytes or as large as 1MB. + */ +struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, dma_addr_t *dma, + gfp_t mem_flags) +{ + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + + if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) + return pci_alloc_consistent(pdev, + sizeof(struct xhci_stream_ctx)*num_stream_ctxs, + dma); + else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) + return dma_pool_alloc(xhci->small_streams_pool, + mem_flags, dma); + else + return dma_pool_alloc(xhci->medium_streams_pool, + mem_flags, dma); +} + +struct xhci_ring *xhci_dma_to_transfer_ring( + struct xhci_virt_ep *ep, + u64 address) +{ + if (ep->ep_state & EP_HAS_STREAMS) + return radix_tree_lookup(&ep->stream_info->trb_address_map, + address >> SEGMENT_SHIFT); + return ep->ring; +} + +/* Only use this when you know stream_info is valid */ +#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING +static struct xhci_ring *dma_to_stream_ring( + struct xhci_stream_info *stream_info, + u64 address) +{ + return radix_tree_lookup(&stream_info->trb_address_map, + address >> SEGMENT_SHIFT); +} +#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ + +struct xhci_ring *xhci_stream_id_to_ring( + struct xhci_virt_device *dev, + unsigned int ep_index, + unsigned int stream_id) +{ + struct xhci_virt_ep *ep = &dev->eps[ep_index]; + + if (stream_id == 0) + return ep->ring; + if (!ep->stream_info) + return NULL; + + if (stream_id > ep->stream_info->num_streams) + return NULL; + return ep->stream_info->stream_rings[stream_id]; +} + +struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id) +{ + struct xhci_virt_ep *ep; + + ep = &xhci->devs[slot_id]->eps[ep_index]; + /* Common case: no streams */ + if (!(ep->ep_state & EP_HAS_STREAMS)) + return ep->ring; + + if (stream_id == 0) { + xhci_warn(xhci, + "WARN: Slot ID %u, ep index %u has streams, " + "but URB has no stream ID.\n", + slot_id, ep_index); + return NULL; + } + + if (stream_id < ep->stream_info->num_streams) + return ep->stream_info->stream_rings[stream_id]; + + xhci_warn(xhci, + "WARN: Slot ID %u, ep index %u has " + "stream IDs 1 to %u allocated, " + "but stream ID %u is requested.\n", + slot_id, ep_index, + ep->stream_info->num_streams - 1, + stream_id); + return NULL; +} + +/* Get the right ring for the given URB. + * If the endpoint supports streams, boundary check the URB's stream ID. + * If the endpoint doesn't support streams, return the singular endpoint ring. + */ +struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, + struct urb *urb) +{ + return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, + xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); +} + +#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING +static int xhci_test_radix_tree(struct xhci_hcd *xhci, + unsigned int num_streams, + struct xhci_stream_info *stream_info) +{ + u32 cur_stream; + struct xhci_ring *cur_ring; + u64 addr; + + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + struct xhci_ring *mapped_ring; + int trb_size = sizeof(union xhci_trb); + + cur_ring = stream_info->stream_rings[cur_stream]; + for (addr = cur_ring->first_seg->dma; + addr < cur_ring->first_seg->dma + SEGMENT_SIZE; + addr += trb_size) { + mapped_ring = dma_to_stream_ring(stream_info, addr); + if (cur_ring != mapped_ring) { + xhci_warn(xhci, "WARN: DMA address 0x%08llx " + "didn't map to stream ID %u; " + "mapped to ring %p\n", + (unsigned long long) addr, + cur_stream, + mapped_ring); + return -EINVAL; + } + } + /* One TRB after the end of the ring segment shouldn't return a + * pointer to the current ring (although it may be a part of a + * different ring). + */ + mapped_ring = dma_to_stream_ring(stream_info, addr); + if (mapped_ring != cur_ring) { + /* One TRB before should also fail */ + addr = cur_ring->first_seg->dma - trb_size; + mapped_ring = dma_to_stream_ring(stream_info, addr); + } + if (mapped_ring == cur_ring) { + xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx " + "mapped to valid stream ID %u; " + "mapped ring = %p\n", + (unsigned long long) addr, + cur_stream, + mapped_ring); + return -EINVAL; + } + } + return 0; +} +#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ + +/* + * Change an endpoint's internal structure so it supports stream IDs. The + * number of requested streams includes stream 0, which cannot be used by device + * drivers. + * + * The number of stream contexts in the stream context array may be bigger than + * the number of streams the driver wants to use. This is because the number of + * stream context array entries must be a power of two. + * + * We need a radix tree for mapping physical addresses of TRBs to which stream + * ID they belong to. We need to do this because the host controller won't tell + * us which stream ring the TRB came from. We could store the stream ID in an + * event data TRB, but that doesn't help us for the cancellation case, since the + * endpoint may stop before it reaches that event data TRB. + * + * The radix tree maps the upper portion of the TRB DMA address to a ring + * segment that has the same upper portion of DMA addresses. For example, say I + * have segments of size 1KB, that are always 64-byte aligned. A segment may + * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the + * key to the stream ID is 0x43244. I can use the DMA address of the TRB to + * pass the radix tree a key to get the right stream ID: + * + * 0x10c90fff >> 10 = 0x43243 + * 0x10c912c0 >> 10 = 0x43244 + * 0x10c91400 >> 10 = 0x43245 + * + * Obviously, only those TRBs with DMA addresses that are within the segment + * will make the radix tree return the stream ID for that ring. + * + * Caveats for the radix tree: + * + * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an + * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be + * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the + * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit + * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit + * extended systems (where the DMA address can be bigger than 32-bits), + * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. + */ +struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, + unsigned int num_streams, gfp_t mem_flags) +{ + struct xhci_stream_info *stream_info; + u32 cur_stream; + struct xhci_ring *cur_ring; + unsigned long key; + u64 addr; + int ret; + + xhci_dbg(xhci, "Allocating %u streams and %u " + "stream context array entries.\n", + num_streams, num_stream_ctxs); + if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { + xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); + return NULL; + } + xhci->cmd_ring_reserved_trbs++; + + stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); + if (!stream_info) + goto cleanup_trbs; + + stream_info->num_streams = num_streams; + stream_info->num_stream_ctxs = num_stream_ctxs; + + /* Initialize the array of virtual pointers to stream rings. */ + stream_info->stream_rings = kzalloc( + sizeof(struct xhci_ring *)*num_streams, + mem_flags); + if (!stream_info->stream_rings) + goto cleanup_info; + + /* Initialize the array of DMA addresses for stream rings for the HW. */ + stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, + num_stream_ctxs, &stream_info->ctx_array_dma, + mem_flags); + if (!stream_info->stream_ctx_array) + goto cleanup_ctx; + memset(stream_info->stream_ctx_array, 0, + sizeof(struct xhci_stream_ctx)*num_stream_ctxs); + + /* Allocate everything needed to free the stream rings later */ + stream_info->free_streams_command = + xhci_alloc_command(xhci, true, true, mem_flags); + if (!stream_info->free_streams_command) + goto cleanup_ctx; + + INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); + + /* Allocate rings for all the streams that the driver will use, + * and add their segment DMA addresses to the radix tree. + * Stream 0 is reserved. + */ + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + stream_info->stream_rings[cur_stream] = + xhci_ring_alloc(xhci, 1, true, mem_flags); + cur_ring = stream_info->stream_rings[cur_stream]; + if (!cur_ring) + goto cleanup_rings; + cur_ring->stream_id = cur_stream; + /* Set deq ptr, cycle bit, and stream context type */ + addr = cur_ring->first_seg->dma | + SCT_FOR_CTX(SCT_PRI_TR) | + cur_ring->cycle_state; + stream_info->stream_ctx_array[cur_stream].stream_ring = addr; + xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", + cur_stream, (unsigned long long) addr); + + key = (unsigned long) + (cur_ring->first_seg->dma >> SEGMENT_SHIFT); + ret = radix_tree_insert(&stream_info->trb_address_map, + key, cur_ring); + if (ret) { + xhci_ring_free(xhci, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + goto cleanup_rings; + } + } + /* Leave the other unused stream ring pointers in the stream context + * array initialized to zero. This will cause the xHC to give us an + * error if the device asks for a stream ID we don't have setup (if it + * was any other way, the host controller would assume the ring is + * "empty" and wait forever for data to be queued to that stream ID). + */ +#if XHCI_DEBUG + /* Do a little test on the radix tree to make sure it returns the + * correct values. + */ + if (xhci_test_radix_tree(xhci, num_streams, stream_info)) + goto cleanup_rings; +#endif + + return stream_info; + +cleanup_rings: + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + addr = cur_ring->first_seg->dma; + radix_tree_delete(&stream_info->trb_address_map, + addr >> SEGMENT_SHIFT); + xhci_ring_free(xhci, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + xhci_free_command(xhci, stream_info->free_streams_command); +cleanup_ctx: + kfree(stream_info->stream_rings); +cleanup_info: + kfree(stream_info); +cleanup_trbs: + xhci->cmd_ring_reserved_trbs--; + return NULL; +} +/* + * Sets the MaxPStreams field and the Linear Stream Array field. + * Sets the dequeue pointer to the stream context array. + */ +void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + struct xhci_stream_info *stream_info) +{ + u32 max_primary_streams; + /* MaxPStreams is the number of stream context array entries, not the + * number we're actually using. Must be in 2^(MaxPstreams + 1) format. + * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. + */ + max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; + xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", + 1 << (max_primary_streams + 1)); + ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; + ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); + ep_ctx->ep_info |= EP_HAS_LSA; + ep_ctx->deq = stream_info->ctx_array_dma; +} + +/* + * Sets the MaxPStreams field and the Linear Stream Array field to 0. + * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, + * not at the beginning of the ring). + */ +void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + struct xhci_virt_ep *ep) +{ + dma_addr_t addr; + ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; + ep_ctx->ep_info &= ~EP_HAS_LSA; + addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); + ep_ctx->deq = addr | ep->ring->cycle_state; +} + +/* Frees all stream contexts associated with the endpoint, + * + * Caller should fix the endpoint context streams fields. + */ +void xhci_free_stream_info(struct xhci_hcd *xhci, + struct xhci_stream_info *stream_info) +{ + int cur_stream; + struct xhci_ring *cur_ring; + dma_addr_t addr; + + if (!stream_info) + return; + + for (cur_stream = 1; cur_stream < stream_info->num_streams; + cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + addr = cur_ring->first_seg->dma; + radix_tree_delete(&stream_info->trb_address_map, + addr >> SEGMENT_SHIFT); + xhci_ring_free(xhci, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + xhci_free_command(xhci, stream_info->free_streams_command); + xhci->cmd_ring_reserved_trbs--; + if (stream_info->stream_ctx_array) + xhci_free_stream_ctx(xhci, + stream_info->num_stream_ctxs, + stream_info->stream_ctx_array, + stream_info->ctx_array_dma); + + if (stream_info) + kfree(stream_info->stream_rings); + kfree(stream_info); +} + + +/***************** Device context manipulation *************************/ + +static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep) +{ + init_timer(&ep->stop_cmd_timer); + ep->stop_cmd_timer.data = (unsigned long) ep; + ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; + ep->xhci = xhci; +} + /* All the xhci_tds in the ring's TD list should be freed at this point */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { @@ -263,9 +744,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (!dev) return; - for (i = 0; i < 31; ++i) + for (i = 0; i < 31; ++i) { if (dev->eps[i].ring) xhci_ring_free(xhci, dev->eps[i].ring); + if (dev->eps[i].stream_info) + xhci_free_stream_info(xhci, + dev->eps[i].stream_info); + } + + if (dev->ring_cache) { + for (i = 0; i < dev->num_rings_cached; i++) + xhci_ring_free(xhci, dev->ring_cache[i]); + kfree(dev->ring_cache); + } if (dev->in_ctx) xhci_free_container_ctx(xhci, dev->in_ctx); @@ -273,7 +764,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) xhci_free_container_ctx(xhci, dev->out_ctx); kfree(xhci->devs[slot_id]); - xhci->devs[slot_id] = 0; + xhci->devs[slot_id] = NULL; } int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, @@ -309,15 +800,25 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, (unsigned long long)dev->in_ctx->dma); - /* Initialize the cancellation list for each endpoint */ - for (i = 0; i < 31; i++) + /* Initialize the cancellation list and watchdog timers for each ep */ + for (i = 0; i < 31; i++) { + xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); + } /* Allocate endpoint 0 ring */ dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); if (!dev->eps[0].ring) goto fail; + /* Allocate pointers to the ring cache */ + dev->ring_cache = kzalloc( + sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, + flags); + if (!dev->ring_cache) + goto fail; + dev->num_rings_cached = 0; + init_completion(&dev->cmd_completion); INIT_LIST_HEAD(&dev->cmd_list); @@ -334,6 +835,27 @@ fail: return 0; } +void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, + struct usb_device *udev) +{ + struct xhci_virt_device *virt_dev; + struct xhci_ep_ctx *ep0_ctx; + struct xhci_ring *ep_ring; + + virt_dev = xhci->devs[udev->slot_id]; + ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); + ep_ring = virt_dev->eps[0].ring; + /* + * FIXME we don't keep track of the dequeue pointer very well after a + * Set TR dequeue pointer, so we're setting the dequeue pointer of the + * host to our enqueue pointer. This should only be called after a + * configured device has reset, so all control transfers should have + * been completed or cancelled before the reset. + */ + ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); + ep0_ctx->deq |= ep_ring->cycle_state; +} + /* Setup an xHCI virtual device for a Set Address command */ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) { @@ -374,7 +896,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud case USB_SPEED_LOW: slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; break; - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); return -EINVAL; break; @@ -418,7 +940,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud case USB_SPEED_LOW: ep0_ctx->ep_info2 |= MAX_PACKET(8); break; - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); return -EINVAL; break; @@ -486,8 +1008,13 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, if (interval < 3) interval = 3; if ((1 << interval) != 8*ep->desc.bInterval) - dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", - ep->desc.bEndpointAddress, 1 << interval); + dev_warn(&udev->dev, + "ep %#x - rounding interval" + " to %d microframes, " + "ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + 8*ep->desc.bInterval); } break; default: @@ -496,6 +1023,20 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, return EP_INTERVAL(interval); } +/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. + * High speed endpoint descriptors can define "the number of additional + * transaction opportunities per microframe", but that goes in the Max Burst + * endpoint context field. + */ +static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + if (udev->speed != USB_SPEED_SUPER || + !usb_endpoint_xfer_isoc(&ep->desc)) + return 0; + return ep->ss_ep_comp.bmAttributes; +} + static inline u32 xhci_get_endpoint_type(struct usb_device *udev, struct usb_host_endpoint *ep) { @@ -526,6 +1067,34 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev, return type; } +/* Return the maximum endpoint service interval time (ESIT) payload. + * Basically, this is the maxpacket size, multiplied by the burst size + * and mult size. + */ +static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + int max_burst; + int max_packet; + + /* Only applies for interrupt or isochronous endpoints */ + if (usb_endpoint_xfer_control(&ep->desc) || + usb_endpoint_xfer_bulk(&ep->desc)) + return 0; + + if (udev->speed == USB_SPEED_SUPER) + return ep->ss_ep_comp.wBytesPerInterval; + + max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; + /* A 0 in max burst means 1 transfer per ESIT */ + return max_packet * (max_burst + 1); +} + +/* Set up an endpoint with one ring segment. Do not allocate stream rings. + * Drivers will have to call usb_alloc_streams() to do that. + */ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, @@ -537,6 +1106,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_ring *ep_ring; unsigned int max_packet; unsigned int max_burst; + u32 max_esit_payload; ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); @@ -544,12 +1114,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = xhci_ring_alloc(xhci, 1, true, mem_flags); - if (!virt_dev->eps[ep_index].new_ring) - return -ENOMEM; + if (!virt_dev->eps[ep_index].new_ring) { + /* Attempt to use the ring cache */ + if (virt_dev->num_rings_cached == 0) + return -ENOMEM; + virt_dev->eps[ep_index].new_ring = + virt_dev->ring_cache[virt_dev->num_rings_cached]; + virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; + virt_dev->num_rings_cached--; + xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); + } ep_ring = virt_dev->eps[ep_index].new_ring; ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); + ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); /* FIXME dig Mult and streams info out of ep companion desc */ @@ -569,12 +1148,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, max_packet = ep->desc.wMaxPacketSize; ep_ctx->ep_info2 |= MAX_PACKET(max_packet); /* dig out max burst from ep companion desc */ - if (!ep->ss_ep_comp) { - xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); - max_packet = 0; - } else { - max_packet = ep->ss_ep_comp->desc.bMaxBurst; - } + max_packet = ep->ss_ep_comp.bMaxBurst; + if (!max_packet) + xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); ep_ctx->ep_info2 |= MAX_BURST(max_packet); break; case USB_SPEED_HIGH: @@ -595,6 +1171,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, default: BUG(); } + max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); + ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); + + /* + * XXX no idea how to calculate the average TRB buffer length for bulk + * endpoints, as the driver gives us no clue how big each scatter gather + * list entry (or buffer) is going to be. + * + * For isochronous and interrupt endpoints, we set it to the max + * available, until we have new API in the USB core to allow drivers to + * declare how much bandwidth they actually need. + * + * Normally, it would be calculated by taking the total of the buffer + * lengths in the TD and then dividing by the number of TRBs in a TD, + * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't + * use Event Data TRBs, and we don't chain in a link TRB on short + * transfers, we're basically dividing by 1. + */ + ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); + /* FIXME Debug endpoint context */ return 0; } @@ -758,7 +1354,8 @@ static void scratchpad_free(struct xhci_hcd *xhci) } struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, - bool allocate_completion, gfp_t mem_flags) + bool allocate_in_ctx, bool allocate_completion, + gfp_t mem_flags) { struct xhci_command *command; @@ -766,16 +1363,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, if (!command) return NULL; - command->in_ctx = - xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); - if (!command->in_ctx) - return NULL; + if (allocate_in_ctx) { + command->in_ctx = + xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, + mem_flags); + if (!command->in_ctx) { + kfree(command); + return NULL; + } + } if (allocate_completion) { command->completion = kzalloc(sizeof(struct completion), mem_flags); if (!command->completion) { xhci_free_container_ctx(xhci, command->in_ctx); + kfree(command); return NULL; } init_completion(command->completion); @@ -802,9 +1405,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) int i; /* Free the Event Ring Segment Table and the actual Event Ring */ - xhci_writel(xhci, 0, &xhci->ir_set->erst_size); - xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); - xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); + if (xhci->ir_set) { + xhci_writel(xhci, 0, &xhci->ir_set->erst_size); + xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); + xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); + } size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); if (xhci->erst.entries) pci_free_consistent(pdev, size, @@ -835,17 +1440,184 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->device_pool = NULL; xhci_dbg(xhci, "Freed device context pool\n"); + if (xhci->small_streams_pool) + dma_pool_destroy(xhci->small_streams_pool); + xhci->small_streams_pool = NULL; + xhci_dbg(xhci, "Freed small stream array pool\n"); + + if (xhci->medium_streams_pool) + dma_pool_destroy(xhci->medium_streams_pool); + xhci->medium_streams_pool = NULL; + xhci_dbg(xhci, "Freed medium stream array pool\n"); + xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); if (xhci->dcbaa) pci_free_consistent(pdev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); xhci->dcbaa = NULL; + scratchpad_free(xhci); xhci->page_size = 0; xhci->page_shift = 0; - scratchpad_free(xhci); } +static int xhci_test_trb_in_td(struct xhci_hcd *xhci, + struct xhci_segment *input_seg, + union xhci_trb *start_trb, + union xhci_trb *end_trb, + dma_addr_t input_dma, + struct xhci_segment *result_seg, + char *test_name, int test_number) +{ + unsigned long long start_dma; + unsigned long long end_dma; + struct xhci_segment *seg; + + start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); + end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); + + seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); + if (seg != result_seg) { + xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", + test_name, test_number); + xhci_warn(xhci, "Tested TRB math w/ seg %p and " + "input DMA 0x%llx\n", + input_seg, + (unsigned long long) input_dma); + xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " + "ending TRB %p (0x%llx DMA)\n", + start_trb, start_dma, + end_trb, end_dma); + xhci_warn(xhci, "Expected seg %p, got seg %p\n", + result_seg, seg); + return -1; + } + return 0; +} + +/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ +static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) +{ + struct { + dma_addr_t input_dma; + struct xhci_segment *result_seg; + } simple_test_vector [] = { + /* A zeroed DMA field should fail */ + { 0, NULL }, + /* One TRB before the ring start should fail */ + { xhci->event_ring->first_seg->dma - 16, NULL }, + /* One byte before the ring start should fail */ + { xhci->event_ring->first_seg->dma - 1, NULL }, + /* Starting TRB should succeed */ + { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, + /* Ending TRB should succeed */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, + xhci->event_ring->first_seg }, + /* One byte after the ring end should fail */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, + /* One TRB after the ring end should fail */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, + /* An address of all ones should fail */ + { (dma_addr_t) (~0), NULL }, + }; + struct { + struct xhci_segment *input_seg; + union xhci_trb *start_trb; + union xhci_trb *end_trb; + dma_addr_t input_dma; + struct xhci_segment *result_seg; + } complex_test_vector [] = { + /* Test feeding a valid DMA address from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->event_ring->first_seg->trbs, + .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* Test feeding a valid end TRB from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->event_ring->first_seg->trbs, + .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* Test feeding a valid start and end TRB from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->cmd_ring->first_seg->trbs, + .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* TRB in this ring, but after this TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[0], + .end_trb = &xhci->event_ring->first_seg->trbs[3], + .input_dma = xhci->event_ring->first_seg->dma + 4*16, + .result_seg = NULL, + }, + /* TRB in this ring, but before this TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[3], + .end_trb = &xhci->event_ring->first_seg->trbs[6], + .input_dma = xhci->event_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + /* TRB in this ring, but after this wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->event_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + /* TRB in this ring, but before this wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, + .result_seg = NULL, + }, + /* TRB not in this ring, and we have a wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + }; + + unsigned int num_tests; + int i, ret; + + num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]); + for (i = 0; i < num_tests; i++) { + ret = xhci_test_trb_in_td(xhci, + xhci->event_ring->first_seg, + xhci->event_ring->first_seg->trbs, + &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + simple_test_vector[i].input_dma, + simple_test_vector[i].result_seg, + "Simple", i); + if (ret < 0) + return ret; + } + + num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]); + for (i = 0; i < num_tests; i++) { + ret = xhci_test_trb_in_td(xhci, + complex_test_vector[i].input_seg, + complex_test_vector[i].start_trb, + complex_test_vector[i].end_trb, + complex_test_vector[i].input_dma, + complex_test_vector[i].result_seg, + "Complex", i); + if (ret < 0) + return ret; + } + xhci_dbg(xhci, "TRB math tests passed.\n"); + return 0; +} + + int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { dma_addr_t dma; @@ -914,6 +1686,22 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->segment_pool || !xhci->device_pool) goto fail; + /* Linear stream context arrays don't have any boundary restrictions, + * and only need to be 16-byte aligned. + */ + xhci->small_streams_pool = + dma_pool_create("xHCI 256 byte stream ctx arrays", + dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); + xhci->medium_streams_pool = + dma_pool_create("xHCI 1KB stream ctx arrays", + dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); + /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE + * will be allocated with pci_alloc_consistent() + */ + + if (!xhci->small_streams_pool || !xhci->medium_streams_pool) + goto fail; + /* Set up the command ring to have one segments for now. */ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); if (!xhci->cmd_ring) @@ -949,6 +1737,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); if (!xhci->event_ring) goto fail; + if (xhci_check_trb_in_td_math(xhci, flags) < 0) + goto fail; xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); @@ -1003,7 +1793,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) */ init_completion(&xhci->addr_dev); for (i = 0; i < MAX_HC_SLOTS; ++i) - xhci->devs[i] = 0; + xhci->devs[i] = NULL; if (scratchpad_alloc(xhci, flags)) goto fail; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 06595ec27bb..11482b6b938 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -54,6 +54,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; + hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; + xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); @@ -76,6 +78,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint\n"); } + if (pdev->vendor == PCI_VENDOR_ID_NEC) + xhci->quirks |= XHCI_NEC_HOST; /* Make sure the HC is halted. */ retval = xhci_halt(xhci); @@ -130,6 +134,8 @@ static const struct hc_driver xhci_pci_hc_driver = { .urb_dequeue = xhci_urb_dequeue, .alloc_dev = xhci_alloc_dev, .free_dev = xhci_free_dev, + .alloc_streams = xhci_alloc_streams, + .free_streams = xhci_free_streams, .add_endpoint = xhci_add_endpoint, .drop_endpoint = xhci_drop_endpoint, .endpoint_reset = xhci_endpoint_reset, @@ -137,6 +143,7 @@ static const struct hc_driver xhci_pci_hc_driver = { .reset_bandwidth = xhci_reset_bandwidth, .address_device = xhci_address_device, .update_hub_device = xhci_update_hub_device, + .reset_device = xhci_reset_device, /* * scheduling support @@ -172,12 +179,12 @@ static struct pci_driver xhci_pci_driver = { .shutdown = usb_hcd_pci_shutdown, }; -int xhci_register_pci() +int xhci_register_pci(void) { return pci_register_driver(&xhci_pci_driver); } -void xhci_unregister_pci() +void xhci_unregister_pci(void) { pci_unregister_driver(&xhci_pci_driver); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 173c39c7648..bfc99a93945 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -65,6 +65,7 @@ */ #include <linux/scatterlist.h> +#include <linux/slab.h> #include "xhci.h" /* @@ -111,6 +112,12 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); } +static inline int enqueue_is_link_trb(struct xhci_ring *ring) +{ + struct xhci_link_trb *link = &ring->enqueue->link; + return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); +} + /* Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. @@ -175,8 +182,12 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer * set, but other sections talk about dealing with the chain bit set. This was * fixed in the 0.96 specification errata, but we have to assume that all 0.95 * xHCI hardware can't handle the chain bit being cleared on a link TRB. + * + * @more_trbs_coming: Will you enqueue more TRBs before calling + * prepare_transfer()? */ -static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) +static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, + bool consumer, bool more_trbs_coming) { u32 chain; union xhci_trb *next; @@ -192,6 +203,17 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer while (last_trb(xhci, ring, ring->enq_seg, next)) { if (!consumer) { if (ring != xhci->event_ring) { + /* + * If the caller doesn't plan on enqueueing more + * TDs before ringing the doorbell, then we + * don't want to give the link TRB to the + * hardware just yet. We'll give the link TRB + * back in prepare_ring() just before we enqueue + * the TD at the top of the ring. + */ + if (!chain && !more_trbs_coming) + break; + /* If we're not dealing with 0.95 hardware, * carry over the chain bit of the previous TRB * (which may mean the chain bit is cleared). @@ -202,10 +224,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer } /* Give this link TRB to the hardware */ wmb(); - if (next->link.control & TRB_CYCLE) - next->link.control &= (u32) ~TRB_CYCLE; - else - next->link.control |= (u32) TRB_CYCLE; + next->link.control ^= TRB_CYCLE; } /* Toggle the cycle bit after the last ring segment. */ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { @@ -241,10 +260,34 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, int i; union xhci_trb *enq = ring->enqueue; struct xhci_segment *enq_seg = ring->enq_seg; + struct xhci_segment *cur_seg; + unsigned int left_on_ring; + + /* If we are currently pointing to a link TRB, advance the + * enqueue pointer before checking for space */ + while (last_trb(xhci, ring, enq_seg, enq)) { + enq_seg = enq_seg->next; + enq = enq_seg->trbs; + } /* Check if ring is empty */ - if (enq == ring->dequeue) + if (enq == ring->dequeue) { + /* Can't use link trbs */ + left_on_ring = TRBS_PER_SEGMENT - 1; + for (cur_seg = enq_seg->next; cur_seg != enq_seg; + cur_seg = cur_seg->next) + left_on_ring += TRBS_PER_SEGMENT - 1; + + /* Always need one TRB free in the ring. */ + left_on_ring -= 1; + if (num_trbs > left_on_ring) { + xhci_warn(xhci, "Not enough room on ring; " + "need %u TRBs, %u TRBs left\n", + num_trbs, left_on_ring); + return 0; + } return 1; + } /* Make sure there's an extra empty TRB available */ for (i = 0; i <= num_trbs; ++i) { if (enq == ring->dequeue) @@ -294,7 +337,8 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci) static void ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, - unsigned int ep_index) + unsigned int ep_index, + unsigned int stream_id) { struct xhci_virt_ep *ep; unsigned int ep_state; @@ -305,11 +349,16 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, ep_state = ep->ep_state; /* Don't ring the doorbell for this endpoint if there are pending * cancellations because the we don't want to interrupt processing. + * We don't want to restart any stream rings if there's a set dequeue + * pointer command pending because the device can choose to start any + * stream once the endpoint is on the HW schedule. + * FIXME - check all the stream rings for pending cancellations. */ - if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) + if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) && !(ep_state & EP_HALTED)) { field = xhci_readl(xhci, db_addr) & DB_MASK; - xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); + field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id); + xhci_writel(xhci, field, db_addr); /* Flush PCI posted writes - FIXME Matthew Wilcox says this * isn't time-critical and we shouldn't make the CPU wait for * the flush. @@ -318,6 +367,31 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, } } +/* Ring the doorbell for any rings with pending URBs */ +static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + unsigned int stream_id; + struct xhci_virt_ep *ep; + + ep = &xhci->devs[slot_id]->eps[ep_index]; + + /* A ring has pending URBs if its TD list is not empty */ + if (!(ep->ep_state & EP_HAS_STREAMS)) { + if (!(list_empty(&ep->ring->td_list))) + ring_ep_doorbell(xhci, slot_id, ep_index, 0); + return; + } + + for (stream_id = 1; stream_id < ep->stream_info->num_streams; + stream_id++) { + struct xhci_stream_info *stream_info = ep->stream_info; + if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) + ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); + } +} + /* * Find the segment that trb is in. Start searching in start_seg. * If we must move past a segment that has a link TRB with a toggle cycle state @@ -333,13 +407,14 @@ static struct xhci_segment *find_trb_seg( while (cur_seg->trbs > trb || &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; - if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && + if ((generic_trb->field[3] & TRB_TYPE_BITMASK) == + TRB_TYPE(TRB_LINK) && (generic_trb->field[3] & LINK_TOGGLE)) *cycle_state = ~(*cycle_state) & 0x1; cur_seg = cur_seg->next; if (cur_seg == start_seg) /* Looped over the entire list. Oops! */ - return 0; + return NULL; } return cur_seg; } @@ -360,14 +435,23 @@ static struct xhci_segment *find_trb_seg( */ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, - struct xhci_td *cur_td, struct xhci_dequeue_state *state) + unsigned int stream_id, struct xhci_td *cur_td, + struct xhci_dequeue_state *state) { struct xhci_virt_device *dev = xhci->devs[slot_id]; - struct xhci_ring *ep_ring = dev->eps[ep_index].ring; + struct xhci_ring *ep_ring; struct xhci_generic_trb *trb; struct xhci_ep_ctx *ep_ctx; dma_addr_t addr; + ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, + ep_index, stream_id); + if (!ep_ring) { + xhci_warn(xhci, "WARN can't find new dequeue state " + "for invalid stream ID %u.\n", + stream_id); + return; + } state->new_cycle_state = 0; xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); state->new_deq_seg = find_trb_seg(cur_td->start_seg, @@ -389,7 +473,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, BUG(); trb = &state->new_deq_ptr->generic; - if (TRB_TYPE(trb->field[3]) == TRB_LINK && + if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && (trb->field[3] & LINK_TOGGLE)) state->new_cycle_state = ~(state->new_cycle_state) & 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); @@ -447,11 +531,13 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, struct xhci_segment *deq_seg, + unsigned int ep_index, unsigned int stream_id, + struct xhci_segment *deq_seg, union xhci_trb *deq_ptr, u32 cycle_state); void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id, struct xhci_dequeue_state *deq_state) { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; @@ -463,7 +549,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, deq_state->new_deq_ptr, (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), deq_state->new_cycle_state); - queue_set_tr_deq(xhci, slot_id, ep_index, + queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, deq_state->new_deq_seg, deq_state->new_deq_ptr, (u32) deq_state->new_cycle_state); @@ -475,6 +561,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, ep->ep_state |= SET_DEQ_PENDING; } +static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep) +{ + ep->ep_state &= ~EP_HALT_PENDING; + /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the + * timer is running on another CPU, we don't decrement stop_cmds_pending + * (since we didn't successfully stop the watchdog timer). + */ + if (del_timer(&ep->stop_cmd_timer)) + ep->stop_cmds_pending--; +} + +/* Must be called with xhci->lock held in interrupt context */ +static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, + struct xhci_td *cur_td, int status, char *adjective) +{ + struct usb_hcd *hcd = xhci_to_hcd(xhci); + + cur_td->urb->hcpriv = NULL; + usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb); + xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb); + + spin_unlock(&xhci->lock); + usb_hcd_giveback_urb(hcd, cur_td->urb, status); + kfree(cur_td); + spin_lock(&xhci->lock); + xhci_dbg(xhci, "%s URB given back\n", adjective); +} + /* * When we get a command completion for a Stop Endpoint Command, we need to * unlink any cancelled TDs from the ring. There are two ways to do that: @@ -493,22 +608,21 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; struct list_head *entry; - struct xhci_td *cur_td = 0; + struct xhci_td *cur_td = NULL; struct xhci_td *last_unlinked_td; struct xhci_dequeue_state deq_state; -#ifdef CONFIG_USB_HCD_STAT - ktime_t stop_time = ktime_get(); -#endif memset(&deq_state, 0, sizeof(deq_state)); slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); ep = &xhci->devs[slot_id]->eps[ep_index]; - ep_ring = ep->ring; - if (list_empty(&ep->cancelled_td_list)) + if (list_empty(&ep->cancelled_td_list)) { + xhci_stop_watchdog_timer_in_irq(xhci, ep); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); return; + } /* Fix up the ep ring first, so HW stops executing cancelled TDs. * We have the xHCI lock, so nothing can modify this list until we drop @@ -520,34 +634,59 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", cur_td->first_trb, (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); + ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); + if (!ep_ring) { + /* This shouldn't happen unless a driver is mucking + * with the stream ID after submission. This will + * leave the TD on the hardware ring, and the hardware + * will try to execute it, and may access a buffer + * that has already been freed. In the best case, the + * hardware will execute it, and the event handler will + * ignore the completion event for that TD, since it was + * removed from the td_list for that endpoint. In + * short, don't muck with the stream ID after + * submission. + */ + xhci_warn(xhci, "WARN Cancelled URB %p " + "has invalid stream ID %u.\n", + cur_td->urb, + cur_td->urb->stream_id); + goto remove_finished_td; + } /* * If we stopped on the TD we need to cancel, then we have to * move the xHC endpoint ring dequeue pointer past this TD. */ if (cur_td == ep->stopped_td) - xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, - &deq_state); + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, + cur_td->urb->stream_id, + cur_td, &deq_state); else td_to_noop(xhci, ep_ring, cur_td); +remove_finished_td: /* * The event handler won't see a completion for this TD anymore, * so remove it from the endpoint ring's TD list. Keep it in * the cancelled TD list for URB completion later. */ list_del(&cur_td->td_list); - ep->cancels_pending--; } last_unlinked_td = cur_td; + xhci_stop_watchdog_timer_in_irq(xhci, ep); /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { xhci_queue_new_dequeue_state(xhci, - slot_id, ep_index, &deq_state); + slot_id, ep_index, + ep->stopped_td->urb->stream_id, + &deq_state); xhci_ring_cmd_db(xhci); } else { - /* Otherwise just ring the doorbell to restart the ring */ - ring_ep_doorbell(xhci, slot_id, ep_index); + /* Otherwise ring the doorbell(s) to restart queued transfers */ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } + ep->stopped_td = NULL; + ep->stopped_trb = NULL; /* * Drop the lock and complete the URBs in the cancelled TD list. @@ -561,27 +700,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, list_del(&cur_td->cancelled_td_list); /* Clean up the cancelled URB */ -#ifdef CONFIG_USB_HCD_STAT - hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length, - ktime_sub(stop_time, cur_td->start_time)); -#endif - cur_td->urb->hcpriv = NULL; - usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb); - - xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb); - spin_unlock(&xhci->lock); /* Doesn't matter what we pass for status, since the core will * just overwrite it (because the URB has been unlinked). */ - usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); - kfree(cur_td); + xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); - spin_lock(&xhci->lock); + /* Stop processing the cancelled list if the watchdog timer is + * running. + */ + if (xhci->xhc_state & XHCI_STATE_DYING) + return; } while (cur_td != last_unlinked_td); /* Return to the event handler with xhci->lock re-acquired */ } +/* Watchdog timer function for when a stop endpoint command fails to complete. + * In this case, we assume the host controller is broken or dying or dead. The + * host may still be completing some other events, so we have to be careful to + * let the event ring handler and the URB dequeueing/enqueueing functions know + * through xhci->state. + * + * The timer may also fire if the host takes a very long time to respond to the + * command, and the stop endpoint command completion handler cannot delete the + * timer before the timer function is called. Another endpoint cancellation may + * sneak in before the timer function can grab the lock, and that may queue + * another stop endpoint command and add the timer back. So we cannot use a + * simple flag to say whether there is a pending stop endpoint command for a + * particular endpoint. + * + * Instead we use a combination of that flag and a counter for the number of + * pending stop endpoint commands. If the timer is the tail end of the last + * stop endpoint command, and the endpoint's command is still pending, we assume + * the host is dying. + */ +void xhci_stop_endpoint_command_watchdog(unsigned long arg) +{ + struct xhci_hcd *xhci; + struct xhci_virt_ep *ep; + struct xhci_virt_ep *temp_ep; + struct xhci_ring *ring; + struct xhci_td *cur_td; + int ret, i, j; + + ep = (struct xhci_virt_ep *) arg; + xhci = ep->xhci; + + spin_lock(&xhci->lock); + + ep->stop_cmds_pending--; + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " + "xHCI as DYING, exiting.\n"); + spin_unlock(&xhci->lock); + return; + } + if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { + xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " + "exiting.\n"); + spin_unlock(&xhci->lock); + return; + } + + xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); + xhci_warn(xhci, "Assuming host is dying, halting host.\n"); + /* Oops, HC is dead or dying or at least not responding to the stop + * endpoint command. + */ + xhci->xhc_state |= XHCI_STATE_DYING; + /* Disable interrupts from the host controller and start halting it */ + xhci_quiesce(xhci); + spin_unlock(&xhci->lock); + + ret = xhci_halt(xhci); + + spin_lock(&xhci->lock); + if (ret < 0) { + /* This is bad; the host is not responding to commands and it's + * not allowing itself to be halted. At least interrupts are + * disabled, so we can set HC_STATE_HALT and notify the + * USB core. But if we call usb_hc_died(), it will attempt to + * disconnect all device drivers under this host. Those + * disconnect() methods will wait for all URBs to be unlinked, + * so we must complete them. + */ + xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); + xhci_warn(xhci, "Completing active URBs anyway.\n"); + /* We could turn all TDs on the rings to no-ops. This won't + * help if the host has cached part of the ring, and is slow if + * we want to preserve the cycle bit. Skip it and hope the host + * doesn't touch the memory. + */ + } + for (i = 0; i < MAX_HC_SLOTS; i++) { + if (!xhci->devs[i]) + continue; + for (j = 0; j < 31; j++) { + temp_ep = &xhci->devs[i]->eps[j]; + ring = temp_ep->ring; + if (!ring) + continue; + xhci_dbg(xhci, "Killing URBs for slot ID %u, " + "ep index %u\n", i, j); + while (!list_empty(&ring->td_list)) { + cur_td = list_first_entry(&ring->td_list, + struct xhci_td, + td_list); + list_del(&cur_td->td_list); + if (!list_empty(&cur_td->cancelled_td_list)) + list_del(&cur_td->cancelled_td_list); + xhci_giveback_urb_in_irq(xhci, cur_td, + -ESHUTDOWN, "killed"); + } + while (!list_empty(&temp_ep->cancelled_td_list)) { + cur_td = list_first_entry( + &temp_ep->cancelled_td_list, + struct xhci_td, + cancelled_td_list); + list_del(&cur_td->cancelled_td_list); + xhci_giveback_urb_in_irq(xhci, cur_td, + -ESHUTDOWN, "killed"); + } + } + } + spin_unlock(&xhci->lock); + xhci_to_hcd(xhci)->state = HC_STATE_HALT; + xhci_dbg(xhci, "Calling usb_hc_died()\n"); + usb_hc_died(xhci_to_hcd(xhci)); + xhci_dbg(xhci, "xHCI host controller is dead.\n"); +} + /* * When we get a completion for a Set Transfer Ring Dequeue Pointer command, * we need to clear the set deq pending flag in the endpoint ring state, so that @@ -595,6 +843,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, { unsigned int slot_id; unsigned int ep_index; + unsigned int stream_id; struct xhci_ring *ep_ring; struct xhci_virt_device *dev; struct xhci_ep_ctx *ep_ctx; @@ -602,8 +851,19 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); + stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); dev = xhci->devs[slot_id]; - ep_ring = dev->eps[ep_index].ring; + + ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); + if (!ep_ring) { + xhci_warn(xhci, "WARN Set TR deq ptr command for " + "freed stream ID %u\n", + stream_id); + /* XXX: Harmless??? */ + dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; + return; + } + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); @@ -648,7 +908,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, } dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; - ring_ep_doorbell(xhci, slot_id, ep_index); + /* Restart any rings with pending URBs */ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } static void handle_reset_ep_completion(struct xhci_hcd *xhci, @@ -657,11 +918,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, { int slot_id; unsigned int ep_index; - struct xhci_ring *ep_ring; slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; /* This command will only fail if the endpoint wasn't halted, * but we don't care. */ @@ -679,9 +938,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, false); xhci_ring_cmd_db(xhci); } else { - /* Clear our internal halted state and restart the ring */ + /* Clear our internal halted state and restart the ring(s) */ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; - ring_ep_doorbell(xhci, slot_id, ep_index); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } } @@ -758,35 +1017,42 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, * Configure endpoint commands can come from the USB core * configuration or alt setting changes, or because the HW * needed an extra configure endpoint command after a reset - * endpoint command. In the latter case, the xHCI driver is - * not waiting on the configure endpoint command. + * endpoint command or streams were being configured. + * If the command was for a halted endpoint, the xHCI driver + * is not waiting on the configure endpoint command. */ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); /* Input ctx add_flags are the endpoint index plus one */ ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; - if (!ep_ring) { - /* This must have been an initial configure endpoint */ - xhci->devs[slot_id]->cmd_status = - GET_COMP_CODE(event->status); - complete(&xhci->devs[slot_id]->cmd_completion); - break; - } - ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; - xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, " - "state = %d\n", ep_index, ep_state); + /* A usb_set_interface() call directly after clearing a halted + * condition may race on this quirky hardware. Not worth + * worrying about, since this is prototype hardware. Not sure + * if this will work for streams, but streams support was + * untested on this prototype. + */ if (xhci->quirks & XHCI_RESET_EP_QUIRK && - ep_state & EP_HALTED) { - /* Clear our internal halted state and restart ring */ + ep_index != (unsigned int) -1 && + ctrl_ctx->add_flags - SLOT_FLAG == + ctrl_ctx->drop_flags) { + ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; + ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; + if (!(ep_state & EP_HALTED)) + goto bandwidth_change; + xhci_dbg(xhci, "Completed config ep cmd - " + "last ep index = %d, state = %d\n", + ep_index, ep_state); + /* Clear internal halted state and restart ring(s) */ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; - ring_ep_doorbell(xhci, slot_id, ep_index); - } else { - xhci->devs[slot_id]->cmd_status = - GET_COMP_CODE(event->status); - complete(&xhci->devs[slot_id]->cmd_completion); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + break; } +bandwidth_change: + xhci_dbg(xhci, "Completed config ep cmd\n"); + xhci->devs[slot_id]->cmd_status = + GET_COMP_CODE(event->status); + complete(&xhci->devs[slot_id]->cmd_completion); break; case TRB_TYPE(TRB_EVAL_CONTEXT): virt_dev = xhci->devs[slot_id]; @@ -811,6 +1077,26 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_TYPE(TRB_RESET_EP): handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); break; + case TRB_TYPE(TRB_RESET_DEV): + xhci_dbg(xhci, "Completed reset device command.\n"); + slot_id = TRB_TO_SLOT_ID( + xhci->cmd_ring->dequeue->generic.field[3]); + virt_dev = xhci->devs[slot_id]; + if (virt_dev) + handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); + else + xhci_warn(xhci, "Reset device command completion " + "for disabled slot %u\n", slot_id); + break; + case TRB_TYPE(TRB_NEC_GET_FW): + if (!(xhci->quirks & XHCI_NEC_HOST)) { + xhci->error_bitmask |= 1 << 6; + break; + } + xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", + NEC_FW_MAJOR(event->status), + NEC_FW_MINOR(event->status)); + break; default: /* Skip over unknown commands on the event ring */ xhci->error_bitmask |= 1 << 6; @@ -819,6 +1105,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, inc_deq(xhci, xhci->cmd_ring, false); } +static void handle_vendor_event(struct xhci_hcd *xhci, + union xhci_trb *event) +{ + u32 trb_type; + + trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]); + xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); + if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) + handle_cmd_completion(xhci, &event->event_cmd); +} + static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) { @@ -849,8 +1146,7 @@ static void handle_port_status(struct xhci_hcd *xhci, * TRB in this TD, this function returns that TRB's segment. Otherwise it * returns 0. */ -static struct xhci_segment *trb_in_td( - struct xhci_segment *start_seg, +struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, dma_addr_t suspect_dma) @@ -864,9 +1160,11 @@ static struct xhci_segment *trb_in_td( cur_seg = start_seg; do { + if (start_dma == 0) + return NULL; /* We may get an event for a Link TRB in the middle of a TD */ end_seg_dma = xhci_trb_virt_to_dma(cur_seg, - &start_seg->trbs[TRBS_PER_SEGMENT - 1]); + &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); /* If the end TRB isn't in this segment, this is set to 0 */ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); @@ -885,7 +1183,7 @@ static struct xhci_segment *trb_in_td( suspect_dma <= end_trb_dma)) return cur_seg; } - return 0; + return NULL; } else { /* Might still be somewhere in this segment */ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) @@ -893,8 +1191,70 @@ static struct xhci_segment *trb_in_td( } cur_seg = cur_seg->next; start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); - } while (1); + } while (cur_seg != start_seg); + return NULL; +} + +static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id, + struct xhci_td *td, union xhci_trb *event_trb) +{ + struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; + ep->ep_state |= EP_HALTED; + ep->stopped_td = td; + ep->stopped_trb = event_trb; + ep->stopped_stream = stream_id; + + xhci_queue_reset_ep(xhci, slot_id, ep_index); + xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); + + ep->stopped_td = NULL; + ep->stopped_trb = NULL; + ep->stopped_stream = 0; + + xhci_ring_cmd_db(xhci); +} + +/* Check if an error has halted the endpoint ring. The class driver will + * cleanup the halt for a non-default control endpoint if we indicate a stall. + * However, a babble and other errors also halt the endpoint ring, and the class + * driver won't clear the halt in that case, so we need to issue a Set Transfer + * Ring Dequeue Pointer command manually. + */ +static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + unsigned int trb_comp_code) +{ + /* TRB completion codes that may require a manual halt cleanup */ + if (trb_comp_code == COMP_TX_ERR || + trb_comp_code == COMP_BABBLE || + trb_comp_code == COMP_SPLIT_ERR) + /* The 0.96 spec says a babbling control endpoint + * is not halted. The 0.96 spec says it is. Some HW + * claims to be 0.95 compliant, but it halts the control + * endpoint anyway. Check if a babble halted the + * endpoint. + */ + if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) + return 1; + + return 0; +} + +int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) +{ + if (trb_comp_code >= 224 && trb_comp_code <= 255) { + /* Vendor defined "informational" completion code, + * treat as not-an-error. + */ + xhci_dbg(xhci, "Vendor defined info completion code %u\n", + trb_comp_code); + xhci_dbg(xhci, "Treating code as success.\n"); + return 1; + } + return 0; } /* @@ -910,11 +1270,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, struct xhci_ring *ep_ring; unsigned int slot_id; int ep_index; - struct xhci_td *td = 0; + struct xhci_td *td = NULL; dma_addr_t event_dma; struct xhci_segment *event_seg; union xhci_trb *event_trb; - struct urb *urb = 0; + struct urb *urb = NULL; int status = -EINPROGRESS; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; @@ -931,10 +1291,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_index = TRB_TO_EP_ID(event->flags) - 1; xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); ep = &xdev->eps[ep_index]; - ep_ring = ep->ring; + ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { - xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); + xhci_err(xhci, "ERROR Transfer event for disabled endpoint " + "or incorrect stream ring\n"); return -ENODEV; } @@ -999,6 +1360,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_warn(xhci, "WARN: TRB error on endpoint\n"); status = -EILSEQ; break; + case COMP_SPLIT_ERR: case COMP_TX_ERR: xhci_warn(xhci, "WARN: transfer error on endpoint\n"); status = -EPROTO; @@ -1012,6 +1374,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, status = -ENOSR; break; default: + if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { + status = 0; + break; + } xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); urb = NULL; goto cleanup; @@ -1040,15 +1406,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, else status = 0; break; - case COMP_BABBLE: - /* The 0.96 spec says a babbling control endpoint - * is not halted. The 0.96 spec says it is. Some HW - * claims to be 0.95 compliant, but it halts the control - * endpoint anyway. Check if a babble halted the - * endpoint. - */ - if (ep_ctx->ep_info != EP_STATE_HALTED) + + default: + if (!xhci_requires_manual_halt_cleanup(xhci, + ep_ctx, trb_comp_code)) break; + xhci_dbg(xhci, "TRB error code %u, " + "halted endpoint index = %u\n", + trb_comp_code, ep_index); /* else fall through */ case COMP_STALL: /* Did we transfer part of the data (middle) phase? */ @@ -1060,15 +1425,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, else td->urb->actual_length = 0; - ep->stopped_td = td; - ep->stopped_trb = event_trb; - xhci_queue_reset_ep(xhci, slot_id, ep_index); - xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); - xhci_ring_cmd_db(xhci); + xhci_cleanup_halted_endpoint(xhci, + slot_id, ep_index, 0, td, event_trb); goto td_cleanup; - default: - /* Others already handled above */ - break; } /* * Did we transfer any data, despite the errors that might have @@ -1183,8 +1542,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && - TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) + if ((cur_trb->generic.field[3] & + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && + (cur_trb->generic.field[3] & + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) td->urb->actual_length += TRB_LEN(cur_trb->generic.field[2]); } @@ -1206,16 +1567,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->stopped_td = td; ep->stopped_trb = event_trb; } else { - if (trb_comp_code == COMP_STALL || - trb_comp_code == COMP_BABBLE) { + if (trb_comp_code == COMP_STALL) { /* The transfer is completed from the driver's * perspective, but we need to issue a set dequeue * command for this stalled endpoint to move the dequeue * pointer past the TD. We can't do that here because - * the halt condition must be cleared first. + * the halt condition must be cleared first. Let the + * USB class driver clear the stall later. */ ep->stopped_td = td; ep->stopped_trb = event_trb; + ep->stopped_stream = ep_ring->stream_id; + } else if (xhci_requires_manual_halt_cleanup(xhci, + ep_ctx, trb_comp_code)) { + /* Other types of errors halt the endpoint, but the + * class driver doesn't call usb_reset_endpoint() unless + * the error is -EPIPE. Clear the halted status in the + * xHCI hardware manually. + */ + xhci_cleanup_halted_endpoint(xhci, + slot_id, ep_index, ep_ring->stream_id, td, event_trb); } else { /* Update ring dequeue pointer */ while (ep_ring->dequeue != td->last_trb) @@ -1246,10 +1617,9 @@ td_cleanup: } list_del(&td->td_list); /* Was this TD slated to be cancelled but completed anyway? */ - if (!list_empty(&td->cancelled_td_list)) { + if (!list_empty(&td->cancelled_td_list)) list_del(&td->cancelled_td_list); - ep->cancels_pending--; - } + /* Leave the TD around for the reset endpoint function to use * (but only if it's not a control endpoint, since we already * queued the Set TR dequeue pointer command for stalled @@ -1326,7 +1696,18 @@ void xhci_handle_event(struct xhci_hcd *xhci) update_ptrs = 0; break; default: - xhci->error_bitmask |= 1 << 3; + if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48)) + handle_vendor_event(xhci, event); + else + xhci->error_bitmask |= 1 << 3; + } + /* Any of the above functions may drop and re-acquire the lock, so check + * to make sure a watchdog timer didn't mark the host as non-responsive. + */ + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "xHCI host dying, returning from " + "event handler.\n"); + return; } if (update_ptrs) { @@ -1343,9 +1724,12 @@ void xhci_handle_event(struct xhci_hcd *xhci) /* * Generic function for queueing a TRB on a ring. * The caller must have checked to make sure there's room on the ring. + * + * @more_trbs_coming: Will you enqueue more TRBs before calling + * prepare_transfer()? */ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, - bool consumer, + bool consumer, bool more_trbs_coming, u32 field1, u32 field2, u32 field3, u32 field4) { struct xhci_generic_trb *trb; @@ -1355,7 +1739,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, trb->field[1] = field2; trb->field[2] = field3; trb->field[3] = field4; - inc_enq(xhci, ring, consumer); + inc_enq(xhci, ring, consumer, more_trbs_coming); } /* @@ -1398,20 +1782,66 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, xhci_err(xhci, "ERROR no room on ep ring\n"); return -ENOMEM; } + + if (enqueue_is_link_trb(ep_ring)) { + struct xhci_ring *ring = ep_ring; + union xhci_trb *next; + + xhci_dbg(xhci, "prepare_ring: pointing to link trb\n"); + next = ring->enqueue; + + while (last_trb(xhci, ring, ring->enq_seg, next)) { + + /* If we're not dealing with 0.95 hardware, + * clear the chain bit. + */ + if (!xhci_link_trb_quirk(xhci)) + next->link.control &= ~TRB_CHAIN; + else + next->link.control |= TRB_CHAIN; + + wmb(); + next->link.control ^= (u32) TRB_CYCLE; + + /* Toggle the cycle bit after the last ring segment. */ + if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { + ring->cycle_state = (ring->cycle_state ? 0 : 1); + if (!in_interrupt()) { + xhci_dbg(xhci, "queue_trb: Toggle cycle " + "state for ring %p = %i\n", + ring, (unsigned int)ring->cycle_state); + } + } + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + next = ring->enqueue; + } + } + return 0; } static int prepare_transfer(struct xhci_hcd *xhci, struct xhci_virt_device *xdev, unsigned int ep_index, + unsigned int stream_id, unsigned int num_trbs, struct urb *urb, struct xhci_td **td, gfp_t mem_flags) { int ret; + struct xhci_ring *ep_ring; struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - ret = prepare_ring(xhci, xdev->eps[ep_index].ring, + + ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); + if (!ep_ring) { + xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", + stream_id); + return -EINVAL; + } + + ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK, num_trbs, mem_flags); if (ret) @@ -1431,9 +1861,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, (*td)->urb = urb; urb->hcpriv = (void *) (*td); /* Add this TD to the tail of the endpoint ring's TD list */ - list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); - (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; - (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; + list_add_tail(&(*td)->td_list, &ep_ring->td_list); + (*td)->start_seg = ep_ring->enq_seg; + (*td)->first_trb = ep_ring->enqueue; return 0; } @@ -1449,7 +1879,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) xhci_dbg(xhci, "count sg list trbs: \n"); num_trbs = 0; - for_each_sg(urb->sg->sg, sg, num_sgs, i) { + for_each_sg(urb->sg, sg, num_sgs, i) { unsigned int previous_total_trbs = num_trbs; unsigned int len = sg_dma_len(sg); @@ -1499,7 +1929,7 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total) } static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, int start_cycle, + unsigned int ep_index, unsigned int stream_id, int start_cycle, struct xhci_generic_trb *start_trb, struct xhci_td *td) { /* @@ -1508,7 +1938,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, */ wmb(); start_trb->field[3] |= start_cycle; - ring_ep_doorbell(xhci, slot_id, ep_index); + ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); } /* @@ -1552,6 +1982,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); } +/* + * The TD size is the number of bytes remaining in the TD (including this TRB), + * right shifted by 10. + * It must fit in bits 21:17, so it can't be bigger than 31. + */ +static u32 xhci_td_remainder(unsigned int remainder) +{ + u32 max = (1 << (21 - 17 + 1)) - 1; + + if ((remainder >> 10) >= max) + return max << 17; + else + return (remainder >> 10) << 17; +} + static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { @@ -1563,16 +2008,21 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, int trb_buff_len, this_sg_len, running_total; bool first_trb; u64 addr; + bool more_trbs_coming; struct xhci_generic_trb *start_trb; int start_cycle; - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ep_ring) + return -EINVAL; + num_trbs = count_sg_trbs_needed(xhci, urb); num_sgs = urb->num_sgs; trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], - ep_index, num_trbs, urb, &td, mem_flags); + ep_index, urb->stream_id, + num_trbs, urb, &td, mem_flags); if (trb_buff_len < 0) return trb_buff_len; /* @@ -1593,7 +2043,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, * the amount of memory allocated for this scatter-gather list. * 3. TRBs buffers can't cross 64KB boundaries. */ - sg = urb->sg->sg; + sg = urb->sg; addr = (u64) sg_dma_address(sg); this_sg_len = sg_dma_len(sg); trb_buff_len = TRB_MAX_BUFF_SIZE - @@ -1609,6 +2059,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, do { u32 field = 0; u32 length_field = 0; + u32 remainder = 0; /* Don't change the cycle bit of the first TRB until later */ if (first_trb) @@ -1638,10 +2089,16 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); } + remainder = xhci_td_remainder(urb->transfer_buffer_length - + running_total) ; length_field = TRB_LEN(trb_buff_len) | - TD_REMAINDER(urb->transfer_buffer_length - running_total) | + remainder | TRB_INTR_TARGET(0); - queue_trb(xhci, ep_ring, false, + if (num_trbs > 1) + more_trbs_coming = true; + else + more_trbs_coming = false; + queue_trb(xhci, ep_ring, false, more_trbs_coming, lower_32_bits(addr), upper_32_bits(addr), length_field, @@ -1678,7 +2135,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } while (running_total < urb->transfer_buffer_length); check_trb_math(urb, num_trbs, running_total); - giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, + start_cycle, start_trb, td); return 0; } @@ -1691,16 +2149,19 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, int num_trbs; struct xhci_generic_trb *start_trb; bool first_trb; + bool more_trbs_coming; int start_cycle; u32 field, length_field; int running_total, trb_buff_len, ret; u64 addr; - if (urb->sg) + if (urb->num_sgs) return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ep_ring) + return -EINVAL; num_trbs = 0; /* How much data is (potentially) left before the 64KB boundary? */ @@ -1727,7 +2188,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned long long)urb->transfer_dma, num_trbs); - ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, num_trbs, urb, &td, mem_flags); if (ret < 0) return ret; @@ -1752,6 +2214,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Queue the first TRB, even if it's zero-length */ do { + u32 remainder = 0; field = 0; /* Don't change the cycle bit of the first TRB until later */ @@ -1770,10 +2233,16 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td->last_trb = ep_ring->enqueue; field |= TRB_IOC; } + remainder = xhci_td_remainder(urb->transfer_buffer_length - + running_total); length_field = TRB_LEN(trb_buff_len) | - TD_REMAINDER(urb->transfer_buffer_length - running_total) | + remainder | TRB_INTR_TARGET(0); - queue_trb(xhci, ep_ring, false, + if (num_trbs > 1) + more_trbs_coming = true; + else + more_trbs_coming = false; + queue_trb(xhci, ep_ring, false, more_trbs_coming, lower_32_bits(addr), upper_32_bits(addr), length_field, @@ -1794,7 +2263,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } while (running_total < urb->transfer_buffer_length); check_trb_math(urb, num_trbs, running_total); - giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, + start_cycle, start_trb, td); return 0; } @@ -1811,7 +2281,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, u32 field, length_field; struct xhci_td *td; - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ep_ring) + return -EINVAL; /* * Need to copy setup packet into setup TRB, so we can't use the setup @@ -1832,8 +2304,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, */ if (urb->transfer_buffer_length > 0) num_trbs++; - ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, - urb, &td, mem_flags); + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, + num_trbs, urb, &td, mem_flags); if (ret < 0) return ret; @@ -1848,7 +2321,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Queue setup TRB - see section 6.4.1.2.1 */ /* FIXME better way to translate setup_packet into two u32 fields? */ setup = (struct usb_ctrlrequest *) urb->setup_packet; - queue_trb(xhci, ep_ring, false, + queue_trb(xhci, ep_ring, false, true, /* FIXME endianness is probably going to bite my ass here. */ setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, setup->wIndex | setup->wLength << 16, @@ -1859,12 +2332,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* If there's data, queue data TRBs */ field = 0; length_field = TRB_LEN(urb->transfer_buffer_length) | - TD_REMAINDER(urb->transfer_buffer_length) | + xhci_td_remainder(urb->transfer_buffer_length) | TRB_INTR_TARGET(0); if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; - queue_trb(xhci, ep_ring, false, + queue_trb(xhci, ep_ring, false, true, lower_32_bits(urb->transfer_dma), upper_32_bits(urb->transfer_dma), length_field, @@ -1881,14 +2354,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field = 0; else field = TRB_DIR_IN; - queue_trb(xhci, ep_ring, false, + queue_trb(xhci, ep_ring, false, false, 0, 0, TRB_INTR_TARGET(0), /* Event on completion */ field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); - giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); + giveback_first_trb(xhci, slot_id, ep_index, 0, + start_cycle, start_trb, td); return 0; } @@ -1906,18 +2380,21 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4, bool command_must_succeed) { int reserved_trbs = xhci->cmd_ring_reserved_trbs; + int ret; + if (!command_must_succeed) reserved_trbs++; - if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) { - if (!in_interrupt()) - xhci_err(xhci, "ERR: No room for command on command ring\n"); + ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, + reserved_trbs, GFP_ATOMIC); + if (ret < 0) { + xhci_err(xhci, "ERR: No room for command on command ring\n"); if (command_must_succeed) xhci_err(xhci, "ERR: Reserved TRB counting for " "unfailable commands failed.\n"); - return -ENOMEM; + return ret; } - queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, + queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, field4 | xhci->cmd_ring->cycle_state); return 0; } @@ -1957,6 +2434,20 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, false); } +int xhci_queue_vendor_command(struct xhci_hcd *xhci, + u32 field1, u32 field2, u32 field3, u32 field4) +{ + return queue_command(xhci, field1, field2, field3, field4, false); +} + +/* Queue a reset device command TRB */ +int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) +{ + return queue_command(xhci, 0, 0, 0, + TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), + false); +} + /* Queue a configure endpoint command TRB */ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) @@ -1992,12 +2483,14 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, * This should not be used for endpoints that have streams enabled. */ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, struct xhci_segment *deq_seg, + unsigned int ep_index, unsigned int stream_id, + struct xhci_segment *deq_seg, union xhci_trb *deq_ptr, u32 cycle_state) { dma_addr_t addr; u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); u32 type = TRB_TYPE(TRB_SET_DEQ); addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); @@ -2008,7 +2501,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, return 0; } return queue_command(xhci, lower_32_bits(addr) | cycle_state, - upper_32_bits(addr), 0, + upper_32_bits(addr), trb_stream_id, trb_slot_id | trb_ep_index | type, false); } diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci.c index 99911e727e0..3998f72cd0c 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci.c @@ -21,8 +21,10 @@ */ #include <linux/irq.h> +#include <linux/log2.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/slab.h> #include "xhci.h" @@ -67,22 +69,14 @@ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, } /* - * Force HC into halt state. - * - * Disable any IRQs and clear the run/stop bit. - * HC will complete any current and actively pipelined transactions, and - * should halt within 16 microframes of the run/stop bit being cleared. - * Read HC Halted bit in the status register to see when the HC is finished. - * XXX: shouldn't we set HC_STATE_HALT here somewhere? + * Disable interrupts and begin the xHCI halting process. */ -int xhci_halt(struct xhci_hcd *xhci) +void xhci_quiesce(struct xhci_hcd *xhci) { u32 halted; u32 cmd; u32 mask; - xhci_dbg(xhci, "// Halt the HC\n"); - /* Disable all interrupts from the host controller */ mask = ~(XHCI_IRQS); halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; if (!halted) @@ -91,12 +85,54 @@ int xhci_halt(struct xhci_hcd *xhci) cmd = xhci_readl(xhci, &xhci->op_regs->command); cmd &= mask; xhci_writel(xhci, cmd, &xhci->op_regs->command); +} + +/* + * Force HC into halt state. + * + * Disable any IRQs and clear the run/stop bit. + * HC will complete any current and actively pipelined transactions, and + * should halt within 16 microframes of the run/stop bit being cleared. + * Read HC Halted bit in the status register to see when the HC is finished. + * XXX: shouldn't we set HC_STATE_HALT here somewhere? + */ +int xhci_halt(struct xhci_hcd *xhci) +{ + xhci_dbg(xhci, "// Halt the HC\n"); + xhci_quiesce(xhci); return handshake(xhci, &xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); } /* + * Set the run bit and wait for the host to be running. + */ +int xhci_start(struct xhci_hcd *xhci) +{ + u32 temp; + int ret; + + temp = xhci_readl(xhci, &xhci->op_regs->command); + temp |= (CMD_RUN); + xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", + temp); + xhci_writel(xhci, temp, &xhci->op_regs->command); + + /* + * Wait for the HCHalted Status bit to be 0 to indicate the host is + * running. + */ + ret = handshake(xhci, &xhci->op_regs->status, + STS_HALT, 0, XHCI_MAX_HALT_USEC); + if (ret == -ETIMEDOUT) + xhci_err(xhci, "Host took too long to start, " + "waited %u microseconds.\n", + XHCI_MAX_HALT_USEC); + return ret; +} + +/* * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. * * This resets pipelines, timers, counters, state machines, etc. @@ -107,6 +143,7 @@ int xhci_reset(struct xhci_hcd *xhci) { u32 command; u32 state; + int ret; state = xhci_readl(xhci, &xhci->op_regs->status); if ((state & STS_HALT) == 0) { @@ -121,32 +158,20 @@ int xhci_reset(struct xhci_hcd *xhci) /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ xhci_to_hcd(xhci)->state = HC_STATE_HALT; - return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); -} + ret = handshake(xhci, &xhci->op_regs->command, + CMD_RESET, 0, 250 * 1000); + if (ret) + return ret; -/* - * Stop the HC from processing the endpoint queues. - */ -static void xhci_quiesce(struct xhci_hcd *xhci) -{ + xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); /* - * Queues are per endpoint, so we need to disable an endpoint or slot. - * - * To disable a slot, we need to insert a disable slot command on the - * command ring and ring the doorbell. This will also free any internal - * resources associated with the slot (which might not be what we want). - * - * A Release Endpoint command sounds better - doesn't free internal HC - * memory, but removes the endpoints from the schedule and releases the - * bandwidth, disables the doorbells, and clears the endpoint enable - * flag. Usually used prior to a set interface command. - * - * TODO: Implement after command ring code is done. + * xHCI cannot write to any doorbells or operational registers other + * than status until the "Controller Not Ready" flag is cleared. */ - BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state)); - xhci_dbg(xhci, "Finished quiescing -- code not written yet\n"); + return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); } + #if 0 /* Set up MSI-X table for entry 0 (may claim other entries later) */ static int xhci_setup_msix(struct xhci_hcd *xhci) @@ -261,8 +286,14 @@ static void xhci_work(struct xhci_hcd *xhci) /* Flush posted writes */ xhci_readl(xhci, &xhci->ir_set->irq_pending); - /* FIXME this should be a delayed service routine that clears the EHB */ - xhci_handle_event(xhci); + if (xhci->xhc_state & XHCI_STATE_DYING) + xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " + "Shouldn't IRQs be disabled?\n"); + else + /* FIXME this should be a delayed service routine + * that clears the EHB. + */ + xhci_handle_event(xhci); /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); @@ -335,6 +366,12 @@ void xhci_event_ring_work(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_dbg(xhci, "op reg status = 0x%x\n", temp); + if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { + xhci_dbg(xhci, "HW died, polling stopped.\n"); + spin_unlock_irqrestore(&xhci->lock, flags); + return; + } + temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); @@ -354,11 +391,7 @@ void xhci_event_ring_work(unsigned long arg) if (!xhci->devs[i]) continue; for (j = 0; j < 31; ++j) { - struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; - if (!ring) - continue; - xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); - xhci_debug_segment(xhci, ring->deq_seg); + xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); } } @@ -453,17 +486,20 @@ int xhci_run(struct usb_hcd *hcd) if (NUM_TEST_NOOPS > 0) doorbell = xhci_setup_one_noop(xhci); + if (xhci->quirks & XHCI_NEC_HOST) + xhci_queue_vendor_command(xhci, 0, 0, 0, + TRB_TYPE(TRB_NEC_GET_FW)); + + if (xhci_start(xhci)) { + xhci_halt(xhci); + return -ENODEV; + } - temp = xhci_readl(xhci, &xhci->op_regs->command); - temp |= (CMD_RUN); - xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", - temp); - xhci_writel(xhci, temp, &xhci->op_regs->command); - /* Flush PCI posted writes */ - temp = xhci_readl(xhci, &xhci->op_regs->command); xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); if (doorbell) (*doorbell)(xhci); + if (xhci->quirks & XHCI_NEC_HOST) + xhci_ring_cmd_db(xhci); xhci_dbg(xhci, "Finished xhci_run\n"); return 0; @@ -484,8 +520,6 @@ void xhci_stop(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); spin_lock_irq(&xhci->lock); - if (HC_IS_RUNNING(hcd->state)) - xhci_quiesce(xhci); xhci_halt(xhci); xhci_reset(xhci); spin_unlock_irq(&xhci->lock); @@ -721,16 +755,35 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) * atomic context to this function, which may allocate memory. */ spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, - slot_id, ep_index); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; + if (xhci->devs[slot_id]->eps[ep_index].ep_state & + EP_GETTING_STREAMS) { + xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " + "is transitioning to using streams.\n"); + ret = -EINVAL; + } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & + EP_GETTING_NO_STREAMS) { + xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " + "is transitioning to " + "not having streams.\n"); + ret = -EINVAL; + } else { + ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + } spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); @@ -739,6 +792,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) } exit: return ret; +dying: + xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + return -ESHUTDOWN; } /* @@ -776,6 +835,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { unsigned long flags; int ret; + u32 temp; struct xhci_hcd *xhci; struct xhci_td *td; unsigned int ep_index; @@ -788,23 +848,54 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret || !urb->hcpriv) goto done; + temp = xhci_readl(xhci, &xhci->op_regs->status); + if (temp == 0xffffffff) { + xhci_dbg(xhci, "HW died, freeing TD.\n"); + td = (struct xhci_td *) urb->hcpriv; + + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); + kfree(td); + return ret; + } + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + /* Let the stop endpoint command watchdog timer (which set this + * state) finish cleaning up the endpoint TD lists. We must + * have caught it in the middle of dropping a lock and giving + * back an URB. + */ + goto done; + } xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Event ring:\n"); xhci_debug_ring(xhci, xhci->event_ring); ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; - ep_ring = ep->ring; + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ep_ring) { + ret = -EINVAL; + goto done; + } + xhci_dbg(xhci, "Endpoint ring:\n"); xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; - ep->cancels_pending++; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ - if (ep->cancels_pending == 1) { + if (!(ep->ep_state & EP_HALT_PENDING)) { + ep->ep_state |= EP_HALT_PENDING; + ep->stop_cmds_pending++; + ep->stop_cmd_timer.expires = jiffies + + XHCI_STOP_EP_CMD_TIMEOUT * HZ; + add_timer(&ep->stop_cmd_timer); xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); xhci_ring_cmd_db(xhci); } @@ -877,7 +968,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ctrl_ctx->drop_flags |= drop_flag; new_drop_flags = ctrl_ctx->drop_flags; - ctrl_ctx->add_flags = ~drop_flag; + ctrl_ctx->add_flags &= ~drop_flag; new_add_flags = ctrl_ctx->add_flags; last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); @@ -973,7 +1064,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, * for usb_set_interface() and usb_set_configuration() claim). */ if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], - udev, ep, GFP_KERNEL) < 0) { + udev, ep, GFP_NOIO) < 0) { dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", __func__, ep->desc.bEndpointAddress); return -ENOMEM; @@ -1139,6 +1230,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, cmd_completion = &virt_dev->cmd_completion; cmd_status = &virt_dev->cmd_status; } + init_completion(cmd_completion); if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, @@ -1147,6 +1239,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, udev->slot_id); if (ret < 0) { + if (command) + list_del(&command->cmd_list); spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); return -ENOMEM; @@ -1228,13 +1322,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); xhci_zero_in_ctx(xhci, virt_dev); - /* Free any old rings */ + /* Install new rings and free or cache any old rings */ for (i = 1; i < 31; ++i) { - if (virt_dev->eps[i].new_ring) { - xhci_ring_free(xhci, virt_dev->eps[i].ring); - virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; - virt_dev->eps[i].new_ring = NULL; + if (!virt_dev->eps[i].new_ring) + continue; + /* Only cache or free the old ring if it exists. + * It may not if this is the first add of an endpoint. + */ + if (virt_dev->eps[i].ring) { + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); } + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; + virt_dev->eps[i].new_ring = NULL; } return ret; @@ -1326,7 +1425,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, * or it will attempt to resend it on the next doorbell ring. */ xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, ep->stopped_td, + ep_index, ep->stopped_stream, ep->stopped_td, &deq_state); /* HW with the reset endpoint quirk will use the saved dequeue state to @@ -1335,10 +1434,12 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { xhci_dbg(xhci, "Queueing new dequeue state\n"); xhci_queue_new_dequeue_state(xhci, udev->slot_id, - ep_index, &deq_state); + ep_index, ep->stopped_stream, &deq_state); } else { /* Better hope no one uses the input context between now and the * reset endpoint completion! + * XXX: No idea how this hardware will react when stream rings + * are enabled. */ xhci_dbg(xhci, "Setting up input context for " "configure endpoint command\n"); @@ -1395,12 +1496,516 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, kfree(virt_ep->stopped_td); xhci_ring_cmd_db(xhci); } + virt_ep->stopped_td = NULL; + virt_ep->stopped_trb = NULL; + virt_ep->stopped_stream = 0; spin_unlock_irqrestore(&xhci->lock, flags); if (ret) xhci_warn(xhci, "FIXME allocate a new ring segment\n"); } +static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, struct usb_host_endpoint *ep, + unsigned int slot_id) +{ + int ret; + unsigned int ep_index; + unsigned int ep_state; + + if (!ep) + return -EINVAL; + ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); + if (ret <= 0) + return -EINVAL; + if (ep->ss_ep_comp.bmAttributes == 0) { + xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" + " descriptor for ep 0x%x does not support streams\n", + ep->desc.bEndpointAddress); + return -EINVAL; + } + + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; + if (ep_state & EP_HAS_STREAMS || + ep_state & EP_GETTING_STREAMS) { + xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " + "already has streams set up.\n", + ep->desc.bEndpointAddress); + xhci_warn(xhci, "Send email to xHCI maintainer and ask for " + "dynamic stream context array reallocation.\n"); + return -EINVAL; + } + if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { + xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " + "endpoint 0x%x; URBs are pending.\n", + ep->desc.bEndpointAddress); + return -EINVAL; + } + return 0; +} + +static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, + unsigned int *num_streams, unsigned int *num_stream_ctxs) +{ + unsigned int max_streams; + + /* The stream context array size must be a power of two */ + *num_stream_ctxs = roundup_pow_of_two(*num_streams); + /* + * Find out how many primary stream array entries the host controller + * supports. Later we may use secondary stream arrays (similar to 2nd + * level page entries), but that's an optional feature for xHCI host + * controllers. xHCs must support at least 4 stream IDs. + */ + max_streams = HCC_MAX_PSA(xhci->hcc_params); + if (*num_stream_ctxs > max_streams) { + xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", + max_streams); + *num_stream_ctxs = max_streams; + *num_streams = max_streams; + } +} + +/* Returns an error code if one of the endpoint already has streams. + * This does not change any data structures, it only checks and gathers + * information. + */ +static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int *num_streams, u32 *changed_ep_bitmask) +{ + unsigned int max_streams; + unsigned int endpoint_flag; + int i; + int ret; + + for (i = 0; i < num_eps; i++) { + ret = xhci_check_streams_endpoint(xhci, udev, + eps[i], udev->slot_id); + if (ret < 0) + return ret; + + max_streams = USB_SS_MAX_STREAMS( + eps[i]->ss_ep_comp.bmAttributes); + if (max_streams < (*num_streams - 1)) { + xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", + eps[i]->desc.bEndpointAddress, + max_streams); + *num_streams = max_streams+1; + } + + endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); + if (*changed_ep_bitmask & endpoint_flag) + return -EINVAL; + *changed_ep_bitmask |= endpoint_flag; + } + return 0; +} + +static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps) +{ + u32 changed_ep_bitmask = 0; + unsigned int slot_id; + unsigned int ep_index; + unsigned int ep_state; + int i; + + slot_id = udev->slot_id; + if (!xhci->devs[slot_id]) + return 0; + + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; + /* Are streams already being freed for the endpoint? */ + if (ep_state & EP_GETTING_NO_STREAMS) { + xhci_warn(xhci, "WARN Can't disable streams for " + "endpoint 0x%x\n, " + "streams are being disabled already.", + eps[i]->desc.bEndpointAddress); + return 0; + } + /* Are there actually any streams to free? */ + if (!(ep_state & EP_HAS_STREAMS) && + !(ep_state & EP_GETTING_STREAMS)) { + xhci_warn(xhci, "WARN Can't disable streams for " + "endpoint 0x%x\n, " + "streams are already disabled!", + eps[i]->desc.bEndpointAddress); + xhci_warn(xhci, "WARN xhci_free_streams() called " + "with non-streams endpoint\n"); + return 0; + } + changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); + } + return changed_ep_bitmask; +} + +/* + * The USB device drivers use this function (though the HCD interface in USB + * core) to prepare a set of bulk endpoints to use streams. Streams are used to + * coordinate mass storage command queueing across multiple endpoints (basically + * a stream ID == a task ID). + * + * Setting up streams involves allocating the same size stream context array + * for each endpoint and issuing a configure endpoint command for all endpoints. + * + * Don't allow the call to succeed if one endpoint only supports one stream + * (which means it doesn't support streams at all). + * + * Drivers may get less stream IDs than they asked for, if the host controller + * hardware or endpoints claim they can't support the number of requested + * stream IDs. + */ +int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int num_streams, gfp_t mem_flags) +{ + int i, ret; + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_command *config_cmd; + unsigned int ep_index; + unsigned int num_stream_ctxs; + unsigned long flags; + u32 changed_ep_bitmask = 0; + + if (!eps) + return -EINVAL; + + /* Add one to the number of streams requested to account for + * stream 0 that is reserved for xHCI usage. + */ + num_streams += 1; + xhci = hcd_to_xhci(hcd); + xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", + num_streams); + + config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); + if (!config_cmd) { + xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); + return -ENOMEM; + } + + /* Check to make sure all endpoints are not already configured for + * streams. While we're at it, find the maximum number of streams that + * all the endpoints will support and check for duplicate endpoints. + */ + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, + num_eps, &num_streams, &changed_ep_bitmask); + if (ret < 0) { + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; + } + if (num_streams <= 1) { + xhci_warn(xhci, "WARN: endpoints can't handle " + "more than one stream.\n"); + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + return -EINVAL; + } + vdev = xhci->devs[udev->slot_id]; + /* Mark each endpoint as being in transistion, so + * xhci_urb_enqueue() will reject all URBs. + */ + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; + } + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Setup internal data structures and allocate HW data structures for + * streams (but don't install the HW structures in the input context + * until we're sure all memory allocation succeeded). + */ + xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); + xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", + num_stream_ctxs, num_streams); + + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, + num_stream_ctxs, + num_streams, mem_flags); + if (!vdev->eps[ep_index].stream_info) + goto cleanup; + /* Set maxPstreams in endpoint context and update deq ptr to + * point to stream context array. FIXME + */ + } + + /* Set up the input context for a configure endpoint command. */ + for (i = 0; i < num_eps; i++) { + struct xhci_ep_ctx *ep_ctx; + + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); + + xhci_endpoint_copy(xhci, config_cmd->in_ctx, + vdev->out_ctx, ep_index); + xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, + vdev->eps[ep_index].stream_info); + } + /* Tell the HW to drop its old copy of the endpoint context info + * and add the updated copy from the input context. + */ + xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, + vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); + + /* Issue and wait for the configure endpoint command */ + ret = xhci_configure_endpoint(xhci, udev, config_cmd, + false, false); + + /* xHC rejected the configure endpoint command for some reason, so we + * leave the old ring intact and free our internal streams data + * structure. + */ + if (ret < 0) + goto cleanup; + + spin_lock_irqsave(&xhci->lock, flags); + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; + xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", + udev->slot_id, ep_index); + vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; + } + xhci_free_command(xhci, config_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Subtract 1 for stream 0, which drivers can't use */ + return num_streams - 1; + +cleanup: + /* If it didn't work, free the streams! */ + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); + vdev->eps[ep_index].stream_info = NULL; + /* FIXME Unset maxPstreams in endpoint context and + * update deq ptr to point to normal string ring. + */ + vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; + vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; + xhci_endpoint_zero(xhci, vdev, eps[i]); + } + xhci_free_command(xhci, config_cmd); + return -ENOMEM; +} + +/* Transition the endpoint from using streams to being a "normal" endpoint + * without streams. + * + * Modify the endpoint context state, submit a configure endpoint command, + * and free all endpoint rings for streams if that completes successfully. + */ +int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + gfp_t mem_flags) +{ + int i, ret; + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_command *command; + unsigned int ep_index; + unsigned long flags; + u32 changed_ep_bitmask; + + xhci = hcd_to_xhci(hcd); + vdev = xhci->devs[udev->slot_id]; + + /* Set up a configure endpoint command to remove the streams rings */ + spin_lock_irqsave(&xhci->lock, flags); + changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, + udev, eps, num_eps); + if (changed_ep_bitmask == 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + return -EINVAL; + } + + /* Use the xhci_command structure from the first endpoint. We may have + * allocated too many, but the driver may call xhci_free_streams() for + * each endpoint it grouped into one call to xhci_alloc_streams(). + */ + ep_index = xhci_get_endpoint_index(&eps[0]->desc); + command = vdev->eps[ep_index].stream_info->free_streams_command; + for (i = 0; i < num_eps; i++) { + struct xhci_ep_ctx *ep_ctx; + + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= + EP_GETTING_NO_STREAMS; + + xhci_endpoint_copy(xhci, command->in_ctx, + vdev->out_ctx, ep_index); + xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, + &vdev->eps[ep_index]); + } + xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, + vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Issue and wait for the configure endpoint command, + * which must succeed. + */ + ret = xhci_configure_endpoint(xhci, udev, command, + false, true); + + /* xHC rejected the configure endpoint command for some reason, so we + * leave the streams rings intact. + */ + if (ret < 0) + return ret; + + spin_lock_irqsave(&xhci->lock, flags); + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); + vdev->eps[ep_index].stream_info = NULL; + /* FIXME Unset maxPstreams in endpoint context and + * update deq ptr to point to normal string ring. + */ + vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; + vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; + } + spin_unlock_irqrestore(&xhci->lock, flags); + + return 0; +} + +/* + * This submits a Reset Device Command, which will set the device state to 0, + * set the device address to 0, and disable all the endpoints except the default + * control endpoint. The USB core should come back and call + * xhci_address_device(), and then re-set up the configuration. If this is + * called because of a usb_reset_and_verify_device(), then the old alternate + * settings will be re-installed through the normal bandwidth allocation + * functions. + * + * Wait for the Reset Device command to finish. Remove all structures + * associated with the endpoints that were disabled. Clear the input device + * structure? Cache the rings? Reset the control endpoint 0 max packet size? + */ +int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + int ret, i; + unsigned long flags; + struct xhci_hcd *xhci; + unsigned int slot_id; + struct xhci_virt_device *virt_dev; + struct xhci_command *reset_device_cmd; + int timeleft; + int last_freed_endpoint; + + ret = xhci_check_args(hcd, udev, NULL, 0, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + slot_id = udev->slot_id; + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) { + xhci_dbg(xhci, "%s called with invalid slot ID %u\n", + __func__, slot_id); + return -EINVAL; + } + + xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); + /* Allocate the command structure that holds the struct completion. + * Assume we're in process context, since the normal device reset + * process has to wait for the device anyway. Storage devices are + * reset as part of error handling, so use GFP_NOIO instead of + * GFP_KERNEL. + */ + reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); + if (!reset_device_cmd) { + xhci_dbg(xhci, "Couldn't allocate command structure.\n"); + return -ENOMEM; + } + + /* Attempt to submit the Reset Device command to the command ring */ + spin_lock_irqsave(&xhci->lock, flags); + reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; + list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); + ret = xhci_queue_reset_device(xhci, slot_id); + if (ret) { + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + list_del(&reset_device_cmd->cmd_list); + spin_unlock_irqrestore(&xhci->lock, flags); + goto command_cleanup; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Wait for the Reset Device command to finish */ + timeleft = wait_for_completion_interruptible_timeout( + reset_device_cmd->completion, + USB_CTRL_SET_TIMEOUT); + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for reset device command\n", + timeleft == 0 ? "Timeout" : "Signal"); + spin_lock_irqsave(&xhci->lock, flags); + /* The timeout might have raced with the event ring handler, so + * only delete from the list if the item isn't poisoned. + */ + if (reset_device_cmd->cmd_list.next != LIST_POISON1) + list_del(&reset_device_cmd->cmd_list); + spin_unlock_irqrestore(&xhci->lock, flags); + ret = -ETIME; + goto command_cleanup; + } + + /* The Reset Device command can't fail, according to the 0.95/0.96 spec, + * unless we tried to reset a slot ID that wasn't enabled, + * or the device wasn't in the addressed or configured state. + */ + ret = reset_device_cmd->status; + switch (ret) { + case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ + case COMP_CTX_STATE: /* 0.96 completion code for same thing */ + xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", + slot_id, + xhci_get_slot_state(xhci, virt_dev->out_ctx)); + xhci_info(xhci, "Not freeing device rings.\n"); + /* Don't treat this as an error. May change my mind later. */ + ret = 0; + goto command_cleanup; + case COMP_SUCCESS: + xhci_dbg(xhci, "Successful reset device command.\n"); + break; + default: + if (xhci_is_vendor_info_code(xhci, ret)) + break; + xhci_warn(xhci, "Unknown completion code %u for " + "reset device command.\n", ret); + ret = -EINVAL; + goto command_cleanup; + } + + /* Everything but endpoint 0 is disabled, so free or cache the rings. */ + last_freed_endpoint = 1; + for (i = 1; i < 31; ++i) { + if (!virt_dev->eps[i].ring) + continue; + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + last_freed_endpoint = i; + } + xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); + ret = 0; + +command_cleanup: + xhci_free_command(xhci, reset_device_cmd); + return ret; +} + /* * At this point, the struct usb_device is about to go away, the device has * disconnected, and all traffic has been stopped and the endpoints have been @@ -1409,12 +2014,32 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev; unsigned long flags; + u32 state; + int i; if (udev->slot_id == 0) return; + virt_dev = xhci->devs[udev->slot_id]; + if (!virt_dev) + return; + + /* Stop any wayward timer functions (which may grab the lock) */ + for (i = 0; i < 31; ++i) { + virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; + del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); + } spin_lock_irqsave(&xhci->lock, flags); + /* Don't disable the slot if the host controller is dead. */ + state = xhci_readl(xhci, &xhci->op_regs->status); + if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { + xhci_free_virt_device(xhci, udev->slot_id); + spin_unlock_irqrestore(&xhci->lock, flags); + return; + } + if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); @@ -1509,6 +2134,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) /* If this is a Set Address to an unconfigured device, setup ep 0 */ if (!udev->config) xhci_setup_addressable_virt_dev(xhci, udev); + else + xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); /* Otherwise, assume the core has the device configured how it wants */ xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); @@ -1618,7 +2245,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); return -EINVAL; } - config_cmd = xhci_alloc_command(xhci, true, mem_flags); + config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); if (!config_cmd) { xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); return -ENOMEM; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 4b254b6fa24..6c7e3430ec9 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -26,8 +26,8 @@ #include <linux/usb.h> #include <linux/timer.h> #include <linux/kernel.h> +#include <linux/usb/hcd.h> -#include "../core/hcd.h" /* Code sharing between pci-quirks and xhci hcd */ #include "xhci-ext-caps.h" @@ -117,7 +117,7 @@ struct xhci_cap_regs { /* true: no secondary Stream ID Support */ #define HCC_NSS(p) ((p) & (1 << 7)) /* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */ -#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1)) +#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1)) /* Extended Capabilities pointer from PCI base - section 5.3.6 */ #define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p) @@ -444,6 +444,7 @@ struct xhci_doorbell_array { /* Endpoint Target - bits 0:7 */ #define EPI_TO_DB(p) (((p) + 1) & 0xff) +#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16) /** @@ -585,6 +586,10 @@ struct xhci_ep_ctx { /* Interval - period between requests to an endpoint - 125u increments. */ #define EP_INTERVAL(p) ((p & 0xff) << 16) #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) +#define EP_MAXPSTREAMS_MASK (0x1f << 10) +#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ +#define EP_HAS_LSA (1 << 15) /* ep_info2 bitmasks */ /* @@ -609,6 +614,10 @@ struct xhci_ep_ctx { #define MAX_PACKET_MASK (0xffff << 16) #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) +/* tx_info bitmasks */ +#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) +#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) + /** * struct xhci_input_control_context @@ -644,21 +653,73 @@ struct xhci_command { /* add context bitmasks */ #define ADD_EP(x) (0x1 << x) +struct xhci_stream_ctx { + /* 64-bit stream ring address, cycle state, and stream type */ + u64 stream_ring; + /* offset 0x14 - 0x1f reserved for HC internal use */ + u32 reserved[2]; +}; + +/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ +#define SCT_FOR_CTX(p) (((p) << 1) & 0x7) +/* Secondary stream array type, dequeue pointer is to a transfer ring */ +#define SCT_SEC_TR 0 +/* Primary stream array type, dequeue pointer is to a transfer ring */ +#define SCT_PRI_TR 1 +/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */ +#define SCT_SSA_8 2 +#define SCT_SSA_16 3 +#define SCT_SSA_32 4 +#define SCT_SSA_64 5 +#define SCT_SSA_128 6 +#define SCT_SSA_256 7 + +/* Assume no secondary streams for now */ +struct xhci_stream_info { + struct xhci_ring **stream_rings; + /* Number of streams, including stream 0 (which drivers can't use) */ + unsigned int num_streams; + /* The stream context array may be bigger than + * the number of streams the driver asked for + */ + struct xhci_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + /* For mapping physical TRB addresses to segments in stream rings */ + struct radix_tree_root trb_address_map; + struct xhci_command *free_streams_command; +}; + +#define SMALL_STREAM_ARRAY_SIZE 256 +#define MEDIUM_STREAM_ARRAY_SIZE 1024 + struct xhci_virt_ep { struct xhci_ring *ring; + /* Related to endpoints that are configured to use stream IDs only */ + struct xhci_stream_info *stream_info; /* Temporary storage in case the configure endpoint command fails and we * have to restore the device state to the previous state */ struct xhci_ring *new_ring; unsigned int ep_state; #define SET_DEQ_PENDING (1 << 0) -#define EP_HALTED (1 << 1) +#define EP_HALTED (1 << 1) /* For stall handling */ +#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */ +/* Transitioning the endpoint to using streams, don't enqueue URBs */ +#define EP_GETTING_STREAMS (1 << 3) +#define EP_HAS_STREAMS (1 << 4) +/* Transitioning the endpoint to not using streams, don't enqueue URBs */ +#define EP_GETTING_NO_STREAMS (1 << 5) /* ---- Related to URB cancellation ---- */ struct list_head cancelled_td_list; - unsigned int cancels_pending; /* The TRB that was last reported in a stopped endpoint ring */ union xhci_trb *stopped_trb; struct xhci_td *stopped_td; + unsigned int stopped_stream; + /* Watchdog timer for stop endpoint command to cancel URBs */ + struct timer_list stop_cmd_timer; + int stop_cmds_pending; + struct xhci_hcd *xhci; }; struct xhci_virt_device { @@ -673,6 +734,10 @@ struct xhci_virt_device { struct xhci_container_ctx *out_ctx; /* Used for addressing devices and configuration changes */ struct xhci_container_ctx *in_ctx; + /* Rings saved to ensure old alt settings can be re-instated */ + struct xhci_ring **ring_cache; + int num_rings_cached; +#define XHCI_MAX_RINGS_CACHED 31 struct xhci_virt_ep eps[31]; struct completion cmd_completion; /* Status of the last command issued for this device */ @@ -698,14 +763,6 @@ struct xhci_device_context_array { */ -struct xhci_stream_ctx { - /* 64-bit stream ring address, cycle state, and stream type */ - u64 stream_ring; - /* offset 0x14 - 0x1f reserved for HC internal use */ - u32 reserved[2]; -}; - - struct xhci_transfer_event { /* 64-bit buffer address, or immediate data */ u64 buffer; @@ -816,6 +873,10 @@ struct xhci_event_cmd { #define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) #define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) +/* Set TR Dequeue Pointer command TRB fields */ +#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16)) +#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) + /* Port Status Change Event TRB fields */ /* Port ID - bits 31:24 */ @@ -824,9 +885,6 @@ struct xhci_event_cmd { /* Normal TRB fields */ /* transfer_len bitmasks - bits 0:16 */ #define TRB_LEN(p) ((p) & 0x1ffff) -/* TD size - number of bytes remaining in the TD (including this TRB): - * bits 17 - 21. Shift the number of bytes by 10. */ -#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17) /* Interrupter Target - which MSI-X vector to target the completion event at */ #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) @@ -867,6 +925,7 @@ union xhci_trb { /* TRB bit mask */ #define TRB_TYPE_BITMASK (0xfc00) #define TRB_TYPE(p) ((p) << 10) +#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10) /* TRB type IDs */ /* bulk, interrupt, isoc scatter/gather, and control data stage */ #define TRB_NORMAL 1 @@ -934,6 +993,14 @@ union xhci_trb { #define TRB_MFINDEX_WRAP 39 /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ +/* Nec vendor-specific command completion event. */ +#define TRB_NEC_CMD_COMP 48 +/* Get NEC firmware revision. */ +#define TRB_NEC_GET_FW 49 + +#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff) +#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff) + /* * TRBS_PER_SEGMENT must be a multiple of 4, * since the command ring is 64-byte aligned. @@ -943,6 +1010,10 @@ union xhci_trb { /* Allow two commands + a link TRB, along with any reserved command TRBs */ #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) +/* SEGMENT_SHIFT should be log2(SEGMENT_SIZE). + * Change this if you change TRBS_PER_SEGMENT! + */ +#define SEGMENT_SHIFT 10 /* TRB buffer pointers can't cross 64KB boundaries */ #define TRB_MAX_BUFF_SHIFT 16 #define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) @@ -984,6 +1055,7 @@ struct xhci_ring { * if we own the TRB (if we are the consumer). See section 4.9.1. */ u32 cycle_state; + unsigned int stream_id; }; struct xhci_erst_entry { @@ -1022,6 +1094,8 @@ struct xhci_scratchpad { #define ERST_ENTRIES 1 /* Poll every 60 seconds */ #define POLL_TIMEOUT 60 +/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */ +#define XHCI_STOP_EP_CMD_TIMEOUT 5 /* XXX: Make these module parameters */ @@ -1077,12 +1151,29 @@ struct xhci_hcd { /* DMA pools */ struct dma_pool *device_pool; struct dma_pool *segment_pool; + struct dma_pool *small_streams_pool; + struct dma_pool *medium_streams_pool; #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING /* Poll the rings - for debugging */ struct timer_list event_ring_timer; int zombie; #endif + /* Host controller watchdog timer structures */ + unsigned int xhc_state; +/* Host controller is dying - not responding to commands. "I'm not dead yet!" + * + * xHC interrupts have been disabled and a watchdog timer will (or has already) + * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code + * that sees this status (other than the timer that set it) should stop touching + * hardware immediately. Interrupt handlers should return immediately when + * they see this status (any time they drop and re-acquire xhci->lock). + * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without + * putting the TD on the canceled list, etc. + * + * There are no reports of xHCI host controllers that display this issue. + */ +#define XHCI_STATE_DYING (1 << 0) /* Statistics */ int noops_submitted; int noops_handled; @@ -1090,6 +1181,7 @@ struct xhci_hcd { unsigned int quirks; #define XHCI_LINK_TRB_QUIRK (1 << 0) #define XHCI_RESET_EP_QUIRK (1 << 1) +#define XHCI_NEC_HOST (1 << 2) }; /* For testing purposes */ @@ -1188,6 +1280,11 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); +char *xhci_get_slot_state(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); +void xhci_dbg_ep_rings(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + struct xhci_virt_ep *ep); /* xHCI memory management */ void xhci_mem_cleanup(struct xhci_hcd *xhci); @@ -1195,6 +1292,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); +void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, + struct usb_device *udev); unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc); unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index); @@ -1211,8 +1310,35 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); +void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + unsigned int ep_index); +struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, + unsigned int num_stream_ctxs, + unsigned int num_streams, gfp_t flags); +void xhci_free_stream_info(struct xhci_hcd *xhci, + struct xhci_stream_info *stream_info); +void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + struct xhci_stream_info *stream_info); +void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + struct xhci_virt_ep *ep); +struct xhci_ring *xhci_dma_to_transfer_ring( + struct xhci_virt_ep *ep, + u64 address); +struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, + struct urb *urb); +struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id); +struct xhci_ring *xhci_stream_id_to_ring( + struct xhci_virt_device *dev, + unsigned int ep_index, + unsigned int stream_id); struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, - bool allocate_completion, gfp_t mem_flags); + bool allocate_in_ctx, bool allocate_completion, + gfp_t mem_flags); void xhci_free_command(struct xhci_hcd *xhci, struct xhci_command *command); @@ -1223,6 +1349,7 @@ void xhci_unregister_pci(void); #endif /* xHCI host controller glue */ +void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); int xhci_init(struct usb_hcd *hcd); @@ -1233,6 +1360,12 @@ int xhci_get_frame(struct usb_hcd *hcd); irqreturn_t xhci_irq(struct usb_hcd *hcd); int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); +int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int num_streams, gfp_t mem_flags); +int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + gfp_t mem_flags); int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); @@ -1241,11 +1374,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); +int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); +struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, + union xhci_trb *start_trb, union xhci_trb *end_trb, + dma_addr_t suspect_dma); +int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code); void xhci_ring_cmd_db(struct xhci_hcd *xhci); void *xhci_setup_one_noop(struct xhci_hcd *xhci); void xhci_handle_event(struct xhci_hcd *xhci); @@ -1253,6 +1391,8 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci); int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); +int xhci_queue_vendor_command(struct xhci_hcd *xhci, + u32 field1, u32 field2, u32 field3, u32 field4); int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index); int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, @@ -1267,17 +1407,21 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index); +int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id); void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, - struct xhci_td *cur_td, struct xhci_dequeue_state *state); + unsigned int stream_id, struct xhci_td *cur_td, + struct xhci_dequeue_state *state); void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id, struct xhci_dequeue_state *deq_state); void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, struct usb_device *udev, unsigned int ep_index); void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); +void xhci_stop_endpoint_command_watchdog(unsigned long arg); /* xHCI roothub code */ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |