diff options
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r-- | drivers/mmc/host/Kconfig | 33 | ||||
-rw-r--r-- | drivers/mmc/host/Makefile | 1 | ||||
-rw-r--r-- | drivers/mmc/host/dw_mmc.c | 6 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.c | 25 | ||||
-rw-r--r-- | drivers/mmc/host/omap_hsmmc.c | 3 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-pci.c | 49 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-pxa.c | 48 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-tegra.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.c | 854 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.h | 59 | ||||
-rw-r--r-- | drivers/mmc/host/sdricoh_cs.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/sh_mmcif.c | 126 | ||||
-rw-r--r-- | drivers/mmc/host/sh_mobile_sdhi.c | 50 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc.c | 34 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc.h | 16 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc_dma.c | 21 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc_pio.c | 184 | ||||
-rw-r--r-- | drivers/mmc/host/vub300.c | 2506 |
18 files changed, 3854 insertions, 165 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 94df40531c3..56dbf3f6ad0 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -154,7 +154,7 @@ config MMC_SDHCI_DOVE If unsure, say N. config MMC_SDHCI_TEGRA - tristate "SDHCI platform support for the Tegra SD/MMC Controller" + bool "SDHCI platform support for the Tegra SD/MMC Controller" depends on MMC_SDHCI_PLTFM && ARCH_TEGRA select MMC_SDHCI_IO_ACCESSORS help @@ -535,6 +535,37 @@ config MMC_JZ4740 If you have a board based on such a SoC and with a SD/MMC slot, say Y or M here. +config MMC_VUB300 + tristate "VUB300 USB to SDIO/SD/MMC Host Controller support" + depends on USB + help + This selects support for Elan Digital Systems' VUB300 chip. + + The VUB300 is a USB-SDIO Host Controller Interface chip + that enables the host computer to use SDIO/SD/MMC cards + via a USB 2.0 or USB 1.1 host. + + The VUB300 chip will be found in both physically separate + USB to SDIO/SD/MMC adapters and embedded on some motherboards. + + The VUB300 chip supports SD and MMC memory cards in addition + to single and multifunction SDIO cards. + + Some SDIO cards will need a firmware file to be loaded and + sent to VUB300 chip in order to achieve better data throughput. + Download these "Offload Pseudocode" from Elan Digital Systems' + web-site http://www.elandigitalsystems.com/support/downloads.php + and put them in /lib/firmware. Note that without these additional + firmware files the VUB300 chip will still function, but not at + the best obtainable data rate. + + To compile this mmc host controller driver as a module, + choose M here: the module will be called vub300. + + If you have a computer with an embedded VUB300 chip + or if you intend connecting a USB adapter based on a + VUB300 chip say Y or M here. + config MMC_USHC tristate "USB SD Host Controller (USHC) support" depends on USB diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 4f1df0aae57..58a5cf73d6e 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o obj-$(CONFIG_MMC_DW) += dw_mmc.o obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o +obj-$(CONFIG_MMC_VUB300) += vub300.o obj-$(CONFIG_MMC_USHC) += ushc.o obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 87e1f57ec9b..66dcddb9c20 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1769,9 +1769,6 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg) int i, ret; struct dw_mci *host = platform_get_drvdata(pdev); - if (host->vmmc) - regulator_enable(host->vmmc); - for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; if (!slot) @@ -1798,6 +1795,9 @@ static int dw_mci_resume(struct platform_device *pdev) int i, ret; struct dw_mci *host = platform_get_drvdata(pdev); + if (host->vmmc) + regulator_enable(host->vmmc); + if (host->dma_ops->init) host->dma_ops->init(host); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 4941e06fe2e..5da5bea0f9f 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -51,6 +51,7 @@ static unsigned int fmax = 515633; * is asserted (likewise for RX) * @sdio: variant supports SDIO * @st_clkdiv: true if using a ST-specific clock divider algorithm + * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register */ struct variant_data { unsigned int clkreg; @@ -60,6 +61,7 @@ struct variant_data { unsigned int fifohalfsize; bool sdio; bool st_clkdiv; + bool blksz_datactrl16; }; static struct variant_data variant_arm = { @@ -92,6 +94,17 @@ static struct variant_data variant_ux500 = { .st_clkdiv = true, }; +static struct variant_data variant_ux500v2 = { + .fifosize = 30 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_enable = MCI_ST_UX500_HWFCEN, + .datalength_bits = 24, + .sdio = true, + .st_clkdiv = true, + .blksz_datactrl16 = true, +}; + /* * This must be called with host->lock held */ @@ -465,7 +478,10 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) blksz_bits = ffs(data->blksz) - 1; BUG_ON(1 << blksz_bits != data->blksz); - datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; + if (variant->blksz_datactrl16) + datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); + else + datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; @@ -1311,9 +1327,14 @@ static struct amba_id mmci_ids[] = { }, { .id = 0x00480180, - .mask = 0x00ffffff, + .mask = 0xf0ffffff, .data = &variant_ux500, }, + { + .id = 0x10480180, + .mask = 0xf0ffffff, + .data = &variant_ux500v2, + }, { 0, 0 }, }; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 259ece047af..5b2e2155b41 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -435,6 +435,9 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) reg = regulator_get(host->dev, "vmmc_aux"); host->vcc_aux = IS_ERR(reg) ? NULL : reg; + /* For eMMC do not power off when not in sleep state */ + if (mmc_slot(host).no_regulator_off_init) + return 0; /* * UGLY HACK: workaround regulator framework bugs. * When the bootloader leaves a supply active, it's diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index f8b5f37007b..936bbca19c0 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -18,11 +18,9 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/device.h> - #include <linux/mmc/host.h> - -#include <asm/scatterlist.h> -#include <asm/io.h> +#include <linux/scatterlist.h> +#include <linux/io.h> #include "sdhci.h" @@ -46,14 +44,14 @@ struct sdhci_pci_slot; struct sdhci_pci_fixes { unsigned int quirks; - int (*probe)(struct sdhci_pci_chip*); + int (*probe) (struct sdhci_pci_chip *); - int (*probe_slot)(struct sdhci_pci_slot*); - void (*remove_slot)(struct sdhci_pci_slot*, int); + int (*probe_slot) (struct sdhci_pci_slot *); + void (*remove_slot) (struct sdhci_pci_slot *, int); - int (*suspend)(struct sdhci_pci_chip*, + int (*suspend) (struct sdhci_pci_chip *, pm_message_t); - int (*resume)(struct sdhci_pci_chip*); + int (*resume) (struct sdhci_pci_chip *); }; struct sdhci_pci_slot { @@ -329,6 +327,11 @@ static int jmicron_probe(struct sdhci_pci_chip *chip) return ret; } + /* quirk for unsable RO-detection on JM388 chips */ + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || + chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) + chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; + return 0; } @@ -402,7 +405,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { - for (i = 0;i < chip->num_slots;i++) + for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 0); } @@ -415,7 +418,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { - for (i = 0;i < chip->num_slots;i++) + for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 1); } @@ -798,7 +801,7 @@ static struct sdhci_ops sdhci_pci_ops = { #ifdef CONFIG_PM -static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) +static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; @@ -810,7 +813,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) if (!chip) return 0; - for (i = 0;i < chip->num_slots;i++) { + for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; @@ -818,7 +821,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) ret = sdhci_suspend_host(slot->host, state); if (ret) { - for (i--;i >= 0;i--) + for (i--; i >= 0; i--) sdhci_resume_host(chip->slots[i]->host); return ret; } @@ -833,7 +836,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) if (chip->fixes && chip->fixes->suspend) { ret = chip->fixes->suspend(chip, state); if (ret) { - for (i = chip->num_slots - 1;i >= 0;i--) + for (i = chip->num_slots - 1; i >= 0; i--) sdhci_resume_host(chip->slots[i]->host); return ret; } @@ -855,7 +858,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) return 0; } -static int sdhci_pci_resume (struct pci_dev *pdev) +static int sdhci_pci_resume(struct pci_dev *pdev) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; @@ -877,7 +880,7 @@ static int sdhci_pci_resume (struct pci_dev *pdev) return ret; } - for (i = 0;i < chip->num_slots;i++) { + for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; @@ -1059,7 +1062,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, } chip->pdev = pdev; - chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; + chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; if (chip->fixes) chip->quirks = chip->fixes->quirks; chip->num_slots = slots; @@ -1074,10 +1077,10 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, slots = chip->num_slots; /* Quirk may have changed this */ - for (i = 0;i < slots;i++) { + for (i = 0; i < slots; i++) { slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); if (IS_ERR(slot)) { - for (i--;i >= 0;i--) + for (i--; i >= 0; i--) sdhci_pci_remove_slot(chip->slots[i]); ret = PTR_ERR(slot); goto free; @@ -1105,7 +1108,7 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev) chip = pci_get_drvdata(pdev); if (chip) { - for (i = 0;i < chip->num_slots; i++) + for (i = 0; i < chip->num_slots; i++) sdhci_pci_remove_slot(chip->slots[i]); pci_set_drvdata(pdev, NULL); @@ -1116,9 +1119,9 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev) } static struct pci_driver sdhci_driver = { - .name = "sdhci-pci", + .name = "sdhci-pci", .id_table = pci_ids, - .probe = sdhci_pci_probe, + .probe = sdhci_pci_probe, .remove = __devexit_p(sdhci_pci_remove), .suspend = sdhci_pci_suspend, .resume = sdhci_pci_resume, diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c index 5a61208cbc6..089c9a68b7b 100644 --- a/drivers/mmc/host/sdhci-pxa.c +++ b/drivers/mmc/host/sdhci-pxa.c @@ -69,7 +69,45 @@ static void set_clock(struct sdhci_host *host, unsigned int clock) } } +static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +{ + u16 ctrl_2; + + /* + * Set V18_EN -- UHS modes do not work without this. + * does not change signaling voltage + */ + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (uhs) { + case MMC_TIMING_UHS_SDR12: + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_UHS_SDR25: + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_UHS_SDR104: + ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_UHS_DDR50: + ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; + break; + } + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n", + __func__, mmc_hostname(host->mmc), uhs, ctrl_2); + + return 0; +} + static struct sdhci_ops sdhci_pxa_ops = { + .set_uhs_signaling = set_uhs_signaling, .set_clock = set_clock, }; @@ -136,11 +174,19 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev) host->hw_name = "MMC"; host->ops = &sdhci_pxa_ops; host->irq = irq; - host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + host->quirks = SDHCI_QUIRK_BROKEN_ADMA + | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL + | SDHCI_QUIRK_32BIT_DMA_ADDR + | SDHCI_QUIRK_32BIT_DMA_SIZE + | SDHCI_QUIRK_32BIT_ADMA_SIZE + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; if (pdata->quirks) host->quirks |= pdata->quirks; + /* enable 1/8V DDR capable */ + host->mmc->caps |= MMC_CAP_1_8V_DDR; + /* If slot design supports 8 bit data, indicate this to MMC. */ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) host->mmc->caps |= MMC_CAP_8_BIT_DATA; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index f7e1f964395..343c97edba3 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -184,6 +184,8 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, clk_enable(clk); pltfm_host->clk = clk; + host->mmc->pm_caps = plat->pm_flags; + if (plat->is_8bit) host->mmc->caps |= MMC_CAP_8_BIT_DATA; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 5d20661bc35..58d5436ff64 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -38,13 +38,16 @@ #define SDHCI_USE_LEDS_CLASS #endif +#define MAX_TUNING_LOOP 40 + static unsigned int debug_quirks = 0; -static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); static void sdhci_finish_data(struct sdhci_host *); static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); static void sdhci_finish_command(struct sdhci_host *); +static int sdhci_execute_tuning(struct mmc_host *mmc); +static void sdhci_tuning_timer(unsigned long data); static void sdhci_dumpregs(struct sdhci_host *host) { @@ -84,6 +87,8 @@ static void sdhci_dumpregs(struct sdhci_host *host) printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", sdhci_readw(host, SDHCI_COMMAND), sdhci_readl(host, SDHCI_MAX_CURRENT)); + printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_CONTROL2)); if (host->flags & SDHCI_USE_ADMA) printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", @@ -157,6 +162,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) ier = sdhci_readl(host, SDHCI_INT_ENABLE); + if (host->ops->platform_reset_enter) + host->ops->platform_reset_enter(host, mask); + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); if (mask & SDHCI_RESET_ALL) @@ -177,6 +185,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) mdelay(1); } + if (host->ops->platform_reset_exit) + host->ops->platform_reset_exit(host, mask); + if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); } @@ -591,9 +602,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host, data->sg_len, direction); } -static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) +static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) { u8 count; + struct mmc_data *data = cmd->data; unsigned target_timeout, current_timeout; /* @@ -605,9 +617,16 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) return 0xE; + /* Unspecified timeout, assume max */ + if (!data && !cmd->cmd_timeout_ms) + return 0xE; + /* timeout in us */ - target_timeout = data->timeout_ns / 1000 + - data->timeout_clks / host->clock; + if (!data) + target_timeout = cmd->cmd_timeout_ms * 1000; + else + target_timeout = data->timeout_ns / 1000 + + data->timeout_clks / host->clock; if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) host->timeout_clk = host->clock / 1000; @@ -622,6 +641,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) * => * (1) / (2) > 2^6 */ + BUG_ON(!host->timeout_clk); count = 0; current_timeout = (1 << 13) * 1000 / host->timeout_clk; while (current_timeout < target_timeout) { @@ -632,8 +652,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) } if (count >= 0xF) { - printk(KERN_WARNING "%s: Too large timeout requested!\n", - mmc_hostname(host->mmc)); + printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n", + mmc_hostname(host->mmc), cmd->opcode); count = 0xE; } @@ -651,15 +671,21 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); } -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) { u8 count; u8 ctrl; + struct mmc_data *data = cmd->data; int ret; WARN_ON(host->data); - if (data == NULL) + if (data || (cmd->flags & MMC_RSP_BUSY)) { + count = sdhci_calc_timeout(host, cmd); + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + } + + if (!data) return; /* Sanity checks */ @@ -669,9 +695,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) host->data = data; host->data_early = 0; - - count = sdhci_calc_timeout(host, data); - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + host->data->bytes_xfered = 0; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) host->flags |= SDHCI_REQ_USE_DMA; @@ -807,15 +831,17 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) sdhci_set_transfer_irqs(host); - /* We do not handle DMA boundaries, so set it to max (512 KiB) */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); + /* Set the DMA boundary value and block size */ + sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, + data->blksz), SDHCI_BLOCK_SIZE); sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); } static void sdhci_set_transfer_mode(struct sdhci_host *host, - struct mmc_data *data) + struct mmc_command *cmd) { u16 mode; + struct mmc_data *data = cmd->data; if (data == NULL) return; @@ -823,12 +849,20 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, WARN_ON(!host->data); mode = SDHCI_TRNS_BLK_CNT_EN; - if (data->blocks > 1) { - if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) - mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; - else - mode |= SDHCI_TRNS_MULTI; + if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { + mode |= SDHCI_TRNS_MULTI; + /* + * If we are sending CMD23, CMD12 never gets sent + * on successful completion (so no Auto-CMD12). + */ + if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) + mode |= SDHCI_TRNS_AUTO_CMD12; + else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { + mode |= SDHCI_TRNS_AUTO_CMD23; + sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); + } } + if (data->flags & MMC_DATA_READ) mode |= SDHCI_TRNS_READ; if (host->flags & SDHCI_REQ_USE_DMA) @@ -868,7 +902,15 @@ static void sdhci_finish_data(struct sdhci_host *host) else data->bytes_xfered = data->blksz * data->blocks; - if (data->stop) { + /* + * Need to send CMD12 if - + * a) open-ended multiblock transfer (no CMD23) + * b) error in multiblock transfer + */ + if (data->stop && + (data->error || + !host->mrq->sbc)) { + /* * The controller needs a reset of internal state machines * upon error conditions. @@ -920,11 +962,11 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) host->cmd = cmd; - sdhci_prepare_data(host, cmd->data); + sdhci_prepare_data(host, cmd); sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); - sdhci_set_transfer_mode(host, cmd->data); + sdhci_set_transfer_mode(host, cmd); if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { printk(KERN_ERR "%s: Unsupported response type!\n", @@ -947,7 +989,9 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) flags |= SDHCI_CMD_CRC; if (cmd->flags & MMC_RSP_OPCODE) flags |= SDHCI_CMD_INDEX; - if (cmd->data) + + /* CMD19 is special in that the Data Present Select should be set */ + if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK)) flags |= SDHCI_CMD_DATA; sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); @@ -977,19 +1021,27 @@ static void sdhci_finish_command(struct sdhci_host *host) host->cmd->error = 0; - if (host->data && host->data_early) - sdhci_finish_data(host); + /* Finished CMD23, now send actual command. */ + if (host->cmd == host->mrq->sbc) { + host->cmd = NULL; + sdhci_send_command(host, host->mrq->cmd); + } else { - if (!host->cmd->data) - tasklet_schedule(&host->finish_tasklet); + /* Processed actual command. */ + if (host->data && host->data_early) + sdhci_finish_data(host); - host->cmd = NULL; + if (!host->cmd->data) + tasklet_schedule(&host->finish_tasklet); + + host->cmd = NULL; + } } static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) { - int div; - u16 clk; + int div = 0; /* Initialized for compiler warning */ + u16 clk = 0; unsigned long timeout; if (clock == host->clock) @@ -1007,14 +1059,45 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) goto out; if (host->version >= SDHCI_SPEC_300) { - /* Version 3.00 divisors must be a multiple of 2. */ - if (host->max_clk <= clock) - div = 1; - else { - for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { - if ((host->max_clk / div) <= clock) - break; + /* + * Check if the Host Controller supports Programmable Clock + * Mode. + */ + if (host->clk_mul) { + u16 ctrl; + + /* + * We need to figure out whether the Host Driver needs + * to select Programmable Clock Mode, or the value can + * be set automatically by the Host Controller based on + * the Preset Value registers. + */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + for (div = 1; div <= 1024; div++) { + if (((host->max_clk * host->clk_mul) / + div) <= clock) + break; + } + /* + * Set Programmable Clock Mode in the Clock + * Control register. + */ + clk = SDHCI_PROG_CLOCK_MODE; + div--; } + } else { + /* Version 3.00 divisors must be a multiple of 2. */ + if (host->max_clk <= clock) + div = 1; + else { + for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; + div += 2) { + if ((host->max_clk / div) <= clock) + break; + } + } + div >>= 1; } } else { /* Version 2.00 divisors must be a power of 2. */ @@ -1022,10 +1105,10 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) if ((host->max_clk / div) <= clock) break; } + div >>= 1; } - div >>= 1; - clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; + clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) << SDHCI_DIVIDER_HI_SHIFT; clk |= SDHCI_CLOCK_INT_EN; @@ -1131,7 +1214,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) #ifndef SDHCI_USE_LEDS_CLASS sdhci_activate_led(host); #endif - if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { + + /* + * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED + * requests if Auto-CMD12 is enabled. + */ + if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { if (mrq->stop) { mrq->data->stop = NULL; mrq->stop = NULL; @@ -1150,8 +1238,30 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) if (!present || host->flags & SDHCI_DEVICE_DEAD) { host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); - } else - sdhci_send_command(host, mrq->cmd); + } else { + u32 present_state; + + present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); + /* + * Check if the re-tuning timer has already expired and there + * is no on-going data transfer. If so, we need to execute + * tuning procedure before sending command. + */ + if ((host->flags & SDHCI_NEEDS_RETUNING) && + !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { + spin_unlock_irqrestore(&host->lock, flags); + sdhci_execute_tuning(mmc); + spin_lock_irqsave(&host->lock, flags); + + /* Restore original mmc_request structure */ + host->mrq = mrq; + } + + if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) + sdhci_send_command(host, mrq->sbc); + else + sdhci_send_command(host, mrq->cmd); + } mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -1222,7 +1332,84 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) else ctrl &= ~SDHCI_CTRL_HISPD; - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + if (host->version >= SDHCI_SPEC_300) { + u16 clk, ctrl_2; + unsigned int clock; + + /* In case of UHS-I modes, set High Speed Enable */ + if ((ios->timing == MMC_TIMING_UHS_SDR50) || + (ios->timing == MMC_TIMING_UHS_SDR104) || + (ios->timing == MMC_TIMING_UHS_DDR50) || + (ios->timing == MMC_TIMING_UHS_SDR25) || + (ios->timing == MMC_TIMING_UHS_SDR12)) + ctrl |= SDHCI_CTRL_HISPD; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + /* + * We only need to set Driver Strength if the + * preset value enable is not set. + */ + ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; + if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; + else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + } else { + /* + * According to SDHC Spec v3.00, if the Preset Value + * Enable in the Host Control 2 register is set, we + * need to reset SD Clock Enable before changing High + * Speed Enable to avoid generating clock gliches. + */ + + /* Reset SD Clock Enable */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + + /* Re-enable SD Clock */ + clock = host->clock; + host->clock = 0; + sdhci_set_clock(host, clock); + } + + + /* Reset SD Clock Enable */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (host->ops->set_uhs_signaling) + host->ops->set_uhs_signaling(host, ios->timing); + else { + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if (ios->timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (ios->timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (ios->timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if (ios->timing == MMC_TIMING_UHS_SDR104) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (ios->timing == MMC_TIMING_UHS_DDR50) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + } + + /* Re-enable SD Clock */ + clock = host->clock; + host->clock = 0; + sdhci_set_clock(host, clock); + } else + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); /* * Some (ENE) controllers go apeshit on some ios operation, @@ -1237,14 +1424,11 @@ out: spin_unlock_irqrestore(&host->lock, flags); } -static int sdhci_get_ro(struct mmc_host *mmc) +static int check_ro(struct sdhci_host *host) { - struct sdhci_host *host; unsigned long flags; int is_readonly; - host = mmc_priv(mmc); - spin_lock_irqsave(&host->lock, flags); if (host->flags & SDHCI_DEVICE_DEAD) @@ -1262,6 +1446,29 @@ static int sdhci_get_ro(struct mmc_host *mmc) !is_readonly : is_readonly; } +#define SAMPLE_COUNT 5 + +static int sdhci_get_ro(struct mmc_host *mmc) +{ + struct sdhci_host *host; + int i, ro_count; + + host = mmc_priv(mmc); + + if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) + return check_ro(host); + + ro_count = 0; + for (i = 0; i < SAMPLE_COUNT; i++) { + if (check_ro(host)) { + if (++ro_count > SAMPLE_COUNT / 2) + return 1; + } + msleep(30); + } + return 0; +} + static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct sdhci_host *host; @@ -1284,11 +1491,322 @@ out: spin_unlock_irqrestore(&host->lock, flags); } +static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host; + u8 pwr; + u16 clk, ctrl; + u32 present_state; + + host = mmc_priv(mmc); + + /* + * Signal Voltage Switching is only applicable for Host Controllers + * v3.00 and above. + */ + if (host->version < SDHCI_SPEC_300) + return 0; + + /* + * We first check whether the request is to set signalling voltage + * to 3.3V. If so, we change the voltage to 3.3V and return quickly. + */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ + ctrl &= ~SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* Wait for 5ms */ + usleep_range(5000, 5500); + + /* 3.3V regulator output should be stable within 5 ms */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl & SDHCI_CTRL_VDD_180)) + return 0; + else { + printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V " + "signalling voltage failed\n"); + return -EIO; + } + } else if (!(ctrl & SDHCI_CTRL_VDD_180) && + (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { + /* Stop SDCLK */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Check whether DAT[3:0] is 0000 */ + present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); + if (!((present_state & SDHCI_DATA_LVL_MASK) >> + SDHCI_DATA_LVL_SHIFT)) { + /* + * Enable 1.8V Signal Enable in the Host Control2 + * register + */ + ctrl |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* Wait for 5ms */ + usleep_range(5000, 5500); + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ctrl & SDHCI_CTRL_VDD_180) { + /* Provide SDCLK again and wait for 1ms*/ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + usleep_range(1000, 1500); + + /* + * If DAT[3:0] level is 1111b, then the card + * was successfully switched to 1.8V signaling. + */ + present_state = sdhci_readl(host, + SDHCI_PRESENT_STATE); + if ((present_state & SDHCI_DATA_LVL_MASK) == + SDHCI_DATA_LVL_MASK) + return 0; + } + } + + /* + * If we are here, that means the switch to 1.8V signaling + * failed. We power cycle the card, and retry initialization + * sequence by setting S18R to 0. + */ + pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); + pwr &= ~SDHCI_POWER_ON; + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + + /* Wait for 1ms as per the spec */ + usleep_range(1000, 1500); + pwr |= SDHCI_POWER_ON; + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + + printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling " + "voltage failed, retrying with S18R set to 0\n"); + return -EAGAIN; + } else + /* No signal voltage switch required */ + return 0; +} + +static int sdhci_execute_tuning(struct mmc_host *mmc) +{ + struct sdhci_host *host; + u16 ctrl; + u32 ier; + int tuning_loop_counter = MAX_TUNING_LOOP; + unsigned long timeout; + int err = 0; + + host = mmc_priv(mmc); + + disable_irq(host->irq); + spin_lock(&host->lock); + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + /* + * Host Controller needs tuning only in case of SDR104 mode + * and for SDR50 mode when Use Tuning for SDR50 is set in + * Capabilities register. + */ + if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || + (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && + (host->flags & SDHCI_SDR50_NEEDS_TUNING))) + ctrl |= SDHCI_CTRL_EXEC_TUNING; + else { + spin_unlock(&host->lock); + enable_irq(host->irq); + return 0; + } + + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* + * As per the Host Controller spec v3.00, tuning command + * generates Buffer Read Ready interrupt, so enable that. + * + * Note: The spec clearly says that when tuning sequence + * is being performed, the controller does not generate + * interrupts other than Buffer Read Ready interrupt. But + * to make sure we don't hit a controller bug, we _only_ + * enable Buffer Read Ready interrupt here. + */ + ier = sdhci_readl(host, SDHCI_INT_ENABLE); + sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL); + + /* + * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number + * of loops reaches 40 times or a timeout of 150ms occurs. + */ + timeout = 150; + do { + struct mmc_command cmd = {0}; + struct mmc_request mrq = {0}; + + if (!tuning_loop_counter && !timeout) + break; + + cmd.opcode = MMC_SEND_TUNING_BLOCK; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + cmd.retries = 0; + cmd.data = NULL; + cmd.error = 0; + + mrq.cmd = &cmd; + host->mrq = &mrq; + + /* + * In response to CMD19, the card sends 64 bytes of tuning + * block to the Host Controller. So we set the block size + * to 64 here. + */ + sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE); + + /* + * The tuning block is sent by the card to the host controller. + * So we set the TRNS_READ bit in the Transfer Mode register. + * This also takes care of setting DMA Enable and Multi Block + * Select in the same register to 0. + */ + sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); + + sdhci_send_command(host, &cmd); + + host->cmd = NULL; + host->mrq = NULL; + + spin_unlock(&host->lock); + enable_irq(host->irq); + + /* Wait for Buffer Read Ready interrupt */ + wait_event_interruptible_timeout(host->buf_ready_int, + (host->tuning_done == 1), + msecs_to_jiffies(50)); + disable_irq(host->irq); + spin_lock(&host->lock); + + if (!host->tuning_done) { + printk(KERN_INFO DRIVER_NAME ": Timeout waiting for " + "Buffer Read Ready interrupt during tuning " + "procedure, falling back to fixed sampling " + "clock\n"); + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl &= ~SDHCI_CTRL_TUNED_CLK; + ctrl &= ~SDHCI_CTRL_EXEC_TUNING; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + err = -EIO; + goto out; + } + + host->tuning_done = 0; + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + tuning_loop_counter--; + timeout--; + mdelay(1); + } while (ctrl & SDHCI_CTRL_EXEC_TUNING); + + /* + * The Host Driver has exhausted the maximum number of loops allowed, + * so use fixed sampling frequency. + */ + if (!tuning_loop_counter || !timeout) { + ctrl &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + } else { + if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { + printk(KERN_INFO DRIVER_NAME ": Tuning procedure" + " failed, falling back to fixed sampling" + " clock\n"); + err = -EIO; + } + } + +out: + /* + * If this is the very first time we are here, we start the retuning + * timer. Since only during the first time, SDHCI_NEEDS_RETUNING + * flag won't be set, we check this condition before actually starting + * the timer. + */ + if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && + (host->tuning_mode == SDHCI_TUNING_MODE_1)) { + mod_timer(&host->tuning_timer, jiffies + + host->tuning_count * HZ); + /* Tuning mode 1 limits the maximum data length to 4MB */ + mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; + } else { + host->flags &= ~SDHCI_NEEDS_RETUNING; + /* Reload the new initial value for timer */ + if (host->tuning_mode == SDHCI_TUNING_MODE_1) + mod_timer(&host->tuning_timer, jiffies + + host->tuning_count * HZ); + } + + /* + * In case tuning fails, host controllers which support re-tuning can + * try tuning again at a later time, when the re-tuning timer expires. + * So for these controllers, we return 0. Since there might be other + * controllers who do not have this capability, we return error for + * them. + */ + if (err && host->tuning_count && + host->tuning_mode == SDHCI_TUNING_MODE_1) + err = 0; + + sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); + spin_unlock(&host->lock); + enable_irq(host->irq); + + return err; +} + +static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) +{ + struct sdhci_host *host; + u16 ctrl; + unsigned long flags; + + host = mmc_priv(mmc); + + /* Host Controller v3.00 defines preset value registers */ + if (host->version < SDHCI_SPEC_300) + return; + + spin_lock_irqsave(&host->lock, flags); + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + /* + * We only enable or disable Preset Value if they are not already + * enabled or disabled respectively. Otherwise, we bail out. + */ + if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + static const struct mmc_host_ops sdhci_ops = { .request = sdhci_request, .set_ios = sdhci_set_ios, .get_ro = sdhci_get_ro, .enable_sdio_irq = sdhci_enable_sdio_irq, + .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, + .execute_tuning = sdhci_execute_tuning, + .enable_preset_value = sdhci_enable_preset_value, }; /*****************************************************************************\ @@ -1345,6 +1863,9 @@ static void sdhci_tasklet_finish(unsigned long param) del_timer(&host->timer); + if (host->version >= SDHCI_SPEC_300) + del_timer(&host->tuning_timer); + mrq = host->mrq; /* @@ -1418,6 +1939,20 @@ static void sdhci_timeout_timer(unsigned long data) spin_unlock_irqrestore(&host->lock, flags); } +static void sdhci_tuning_timer(unsigned long data) +{ + struct sdhci_host *host; + unsigned long flags; + + host = (struct sdhci_host *)data; + + spin_lock_irqsave(&host->lock, flags); + + host->flags |= SDHCI_NEEDS_RETUNING; + + spin_unlock_irqrestore(&host->lock, flags); +} + /*****************************************************************************\ * * * Interrupt handling * @@ -1506,6 +2041,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) { BUG_ON(intmask == 0); + /* CMD19 generates _only_ Buffer Read Ready interrupt */ + if (intmask & SDHCI_INT_DATA_AVAIL) { + if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) == + MMC_SEND_TUNING_BLOCK) { + host->tuning_done = 1; + wake_up(&host->buf_ready_int); + return; + } + } + if (!host->data) { /* * The "data complete" interrupt is also used to @@ -1551,10 +2096,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) * We currently don't do anything fancy with DMA * boundaries, but as we can't disable the feature * we need to at least restart the transfer. + * + * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) + * should return a valid address to continue from, but as + * some controllers are faulty, don't trust them. */ - if (intmask & SDHCI_INT_DMA_END) - sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), - SDHCI_DMA_ADDRESS); + if (intmask & SDHCI_INT_DMA_END) { + u32 dmastart, dmanow; + dmastart = sg_dma_address(host->data->sg); + dmanow = dmastart + host->data->bytes_xfered; + /* + * Force update to the next DMA block boundary. + */ + dmanow = (dmanow & + ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + + SDHCI_DEFAULT_BOUNDARY_SIZE; + host->data->bytes_xfered = dmanow - dmastart; + DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," + " next 0x%08x\n", + mmc_hostname(host->mmc), dmastart, + host->data->bytes_xfered, dmanow); + sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); + } if (intmask & SDHCI_INT_DATA_END) { if (host->cmd) { @@ -1664,6 +2227,14 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) sdhci_disable_card_detection(host); + /* Disable tuning since we are suspending */ + if (host->version >= SDHCI_SPEC_300 && host->tuning_count && + host->tuning_mode == SDHCI_TUNING_MODE_1) { + host->flags &= ~SDHCI_NEEDS_RETUNING; + mod_timer(&host->tuning_timer, jiffies + + host->tuning_count * HZ); + } + ret = mmc_suspend_host(host->mmc); if (ret) return ret; @@ -1705,6 +2276,11 @@ int sdhci_resume_host(struct sdhci_host *host) ret = mmc_resume_host(host->mmc); sdhci_enable_card_detection(host); + /* Set the re-tuning expiration flag */ + if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && + (host->tuning_mode == SDHCI_TUNING_MODE_1)) + host->flags |= SDHCI_NEEDS_RETUNING; + return ret; } @@ -1751,7 +2327,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host); int sdhci_add_host(struct sdhci_host *host) { struct mmc_host *mmc; - unsigned int caps, ocr_avail; + u32 caps[2]; + u32 max_current_caps; + unsigned int ocr_avail; int ret; WARN_ON(host == NULL); @@ -1774,12 +2352,15 @@ int sdhci_add_host(struct sdhci_host *host) host->version); } - caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : + caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : sdhci_readl(host, SDHCI_CAPABILITIES); + caps[1] = (host->version >= SDHCI_SPEC_300) ? + sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0; + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; - else if (!(caps & SDHCI_CAN_DO_SDMA)) + else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) DBG("Controller doesn't have SDMA capability\n"); else host->flags |= SDHCI_USE_SDMA; @@ -1790,7 +2371,8 @@ int sdhci_add_host(struct sdhci_host *host) host->flags &= ~SDHCI_USE_SDMA; } - if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) + if ((host->version >= SDHCI_SPEC_200) && + (caps[0] & SDHCI_CAN_DO_ADMA2)) host->flags |= SDHCI_USE_ADMA; if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && @@ -1840,10 +2422,10 @@ int sdhci_add_host(struct sdhci_host *host) } if (host->version >= SDHCI_SPEC_300) - host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) + host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; else - host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) + host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; host->max_clk *= 1000000; @@ -1859,7 +2441,7 @@ int sdhci_add_host(struct sdhci_host *host) } host->timeout_clk = - (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; + (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; if (host->timeout_clk == 0) { if (host->ops->get_timeout_clock) { host->timeout_clk = host->ops->get_timeout_clock(host); @@ -1871,22 +2453,55 @@ int sdhci_add_host(struct sdhci_host *host) return -ENODEV; } } - if (caps & SDHCI_TIMEOUT_CLK_UNIT) + if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) host->timeout_clk *= 1000; /* + * In case of Host Controller v3.00, find out whether clock + * multiplier is supported. + */ + host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> + SDHCI_CLOCK_MUL_SHIFT; + + /* + * In case the value in Clock Multiplier is 0, then programmable + * clock mode is not supported, otherwise the actual clock + * multiplier is one more than the value of Clock Multiplier + * in the Capabilities Register. + */ + if (host->clk_mul) + host->clk_mul += 1; + + /* * Set host parameters. */ mmc->ops = &sdhci_ops; + mmc->f_max = host->max_clk; if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); - else if (host->version >= SDHCI_SPEC_300) - mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; - else + else if (host->version >= SDHCI_SPEC_300) { + if (host->clk_mul) { + mmc->f_min = (host->max_clk * host->clk_mul) / 1024; + mmc->f_max = host->max_clk * host->clk_mul; + } else + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; + } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; - mmc->f_max = host->max_clk; - mmc->caps |= MMC_CAP_SDIO_IRQ; + mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; + + if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) + host->flags |= SDHCI_AUTO_CMD12; + + /* Auto-CMD23 stuff only works in ADMA or PIO. */ + if ((host->version >= SDHCI_SPEC_300) && + ((host->flags & SDHCI_USE_ADMA) || + !(host->flags & SDHCI_USE_SDMA))) { + host->flags |= SDHCI_AUTO_CMD23; + DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); + } else { + DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); + } /* * A controller may support 8-bit width, but the board itself @@ -1898,21 +2513,113 @@ int sdhci_add_host(struct sdhci_host *host) if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) mmc->caps |= MMC_CAP_4_BIT_DATA; - if (caps & SDHCI_CAN_DO_HISPD) + if (caps[0] & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && mmc_card_is_removable(mmc)) mmc->caps |= MMC_CAP_NEEDS_POLL; + /* UHS-I mode(s) supported by the host controller. */ + if (host->version >= SDHCI_SPEC_300) + mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + + /* SDR104 supports also implies SDR50 support */ + if (caps[1] & SDHCI_SUPPORT_SDR104) + mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; + else if (caps[1] & SDHCI_SUPPORT_SDR50) + mmc->caps |= MMC_CAP_UHS_SDR50; + + if (caps[1] & SDHCI_SUPPORT_DDR50) + mmc->caps |= MMC_CAP_UHS_DDR50; + + /* Does the host needs tuning for SDR50? */ + if (caps[1] & SDHCI_USE_SDR50_TUNING) + host->flags |= SDHCI_SDR50_NEEDS_TUNING; + + /* Driver Type(s) (A, C, D) supported by the host */ + if (caps[1] & SDHCI_DRIVER_TYPE_A) + mmc->caps |= MMC_CAP_DRIVER_TYPE_A; + if (caps[1] & SDHCI_DRIVER_TYPE_C) + mmc->caps |= MMC_CAP_DRIVER_TYPE_C; + if (caps[1] & SDHCI_DRIVER_TYPE_D) + mmc->caps |= MMC_CAP_DRIVER_TYPE_D; + + /* Initial value for re-tuning timer count */ + host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> + SDHCI_RETUNING_TIMER_COUNT_SHIFT; + + /* + * In case Re-tuning Timer is not disabled, the actual value of + * re-tuning timer will be 2 ^ (n - 1). + */ + if (host->tuning_count) + host->tuning_count = 1 << (host->tuning_count - 1); + + /* Re-tuning mode supported by the Host Controller */ + host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> + SDHCI_RETUNING_MODE_SHIFT; + ocr_avail = 0; - if (caps & SDHCI_CAN_VDD_330) + /* + * According to SD Host Controller spec v3.00, if the Host System + * can afford more than 150mA, Host Driver should set XPC to 1. Also + * the value is meaningful only if Voltage Support in the Capabilities + * register is set. The actual current value is 4 times the register + * value. + */ + max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); + + if (caps[0] & SDHCI_CAN_VDD_330) { + int max_current_330; + ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; - if (caps & SDHCI_CAN_VDD_300) + + max_current_330 = ((max_current_caps & + SDHCI_MAX_CURRENT_330_MASK) >> + SDHCI_MAX_CURRENT_330_SHIFT) * + SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_330 > 150) + mmc->caps |= MMC_CAP_SET_XPC_330; + } + if (caps[0] & SDHCI_CAN_VDD_300) { + int max_current_300; + ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; - if (caps & SDHCI_CAN_VDD_180) + + max_current_300 = ((max_current_caps & + SDHCI_MAX_CURRENT_300_MASK) >> + SDHCI_MAX_CURRENT_300_SHIFT) * + SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_300 > 150) + mmc->caps |= MMC_CAP_SET_XPC_300; + } + if (caps[0] & SDHCI_CAN_VDD_180) { + int max_current_180; + ocr_avail |= MMC_VDD_165_195; + max_current_180 = ((max_current_caps & + SDHCI_MAX_CURRENT_180_MASK) >> + SDHCI_MAX_CURRENT_180_SHIFT) * + SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_180 > 150) + mmc->caps |= MMC_CAP_SET_XPC_180; + + /* Maximum current capabilities of the host at 1.8V */ + if (max_current_180 >= 800) + mmc->caps |= MMC_CAP_MAX_CURRENT_800; + else if (max_current_180 >= 600) + mmc->caps |= MMC_CAP_MAX_CURRENT_600; + else if (max_current_180 >= 400) + mmc->caps |= MMC_CAP_MAX_CURRENT_400; + else + mmc->caps |= MMC_CAP_MAX_CURRENT_200; + } + mmc->ocr_avail = ocr_avail; mmc->ocr_avail_sdio = ocr_avail; if (host->ocr_avail_sdio) @@ -1972,7 +2679,7 @@ int sdhci_add_host(struct sdhci_host *host) if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { mmc->max_blk_size = 2; } else { - mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> + mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; if (mmc->max_blk_size >= 3) { printk(KERN_WARNING "%s: Invalid maximum block size, " @@ -1998,6 +2705,15 @@ int sdhci_add_host(struct sdhci_host *host) setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); + if (host->version >= SDHCI_SPEC_300) { + init_waitqueue_head(&host->buf_ready_int); + + /* Initialize re-tuning timer */ + init_timer(&host->tuning_timer); + host->tuning_timer.data = (unsigned long)host; + host->tuning_timer.function = sdhci_tuning_timer; + } + ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, mmc_hostname(mmc), host); if (ret) @@ -2091,6 +2807,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) free_irq(host->irq, host); del_timer_sync(&host->timer); + if (host->version >= SDHCI_SPEC_300) + del_timer_sync(&host->tuning_timer); tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 25e8bde600d..745c42fa41e 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -25,6 +25,7 @@ */ #define SDHCI_DMA_ADDRESS 0x00 +#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS #define SDHCI_BLOCK_SIZE 0x04 #define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) @@ -36,7 +37,8 @@ #define SDHCI_TRANSFER_MODE 0x0C #define SDHCI_TRNS_DMA 0x01 #define SDHCI_TRNS_BLK_CNT_EN 0x02 -#define SDHCI_TRNS_ACMD12 0x04 +#define SDHCI_TRNS_AUTO_CMD12 0x04 +#define SDHCI_TRNS_AUTO_CMD23 0x08 #define SDHCI_TRNS_READ 0x10 #define SDHCI_TRNS_MULTI 0x20 @@ -68,8 +70,10 @@ #define SDHCI_DATA_AVAILABLE 0x00000800 #define SDHCI_CARD_PRESENT 0x00010000 #define SDHCI_WRITE_PROTECT 0x00080000 +#define SDHCI_DATA_LVL_MASK 0x00F00000 +#define SDHCI_DATA_LVL_SHIFT 20 -#define SDHCI_HOST_CONTROL 0x28 +#define SDHCI_HOST_CONTROL 0x28 #define SDHCI_CTRL_LED 0x01 #define SDHCI_CTRL_4BITBUS 0x02 #define SDHCI_CTRL_HISPD 0x04 @@ -99,6 +103,7 @@ #define SDHCI_DIV_MASK 0xFF #define SDHCI_DIV_MASK_LEN 8 #define SDHCI_DIV_HI_MASK 0x300 +#define SDHCI_PROG_CLOCK_MODE 0x0020 #define SDHCI_CLOCK_CARD_EN 0x0004 #define SDHCI_CLOCK_INT_STABLE 0x0002 #define SDHCI_CLOCK_INT_EN 0x0001 @@ -146,7 +151,22 @@ #define SDHCI_ACMD12_ERR 0x3C -/* 3E-3F reserved */ +#define SDHCI_HOST_CONTROL2 0x3E +#define SDHCI_CTRL_UHS_MASK 0x0007 +#define SDHCI_CTRL_UHS_SDR12 0x0000 +#define SDHCI_CTRL_UHS_SDR25 0x0001 +#define SDHCI_CTRL_UHS_SDR50 0x0002 +#define SDHCI_CTRL_UHS_SDR104 0x0003 +#define SDHCI_CTRL_UHS_DDR50 0x0004 +#define SDHCI_CTRL_VDD_180 0x0008 +#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 +#define SDHCI_CTRL_DRV_TYPE_B 0x0000 +#define SDHCI_CTRL_DRV_TYPE_A 0x0010 +#define SDHCI_CTRL_DRV_TYPE_C 0x0020 +#define SDHCI_CTRL_DRV_TYPE_D 0x0030 +#define SDHCI_CTRL_EXEC_TUNING 0x0040 +#define SDHCI_CTRL_TUNED_CLK 0x0080 +#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000 #define SDHCI_CAPABILITIES 0x40 #define SDHCI_TIMEOUT_CLK_MASK 0x0000003F @@ -167,9 +187,30 @@ #define SDHCI_CAN_VDD_180 0x04000000 #define SDHCI_CAN_64BIT 0x10000000 +#define SDHCI_SUPPORT_SDR50 0x00000001 +#define SDHCI_SUPPORT_SDR104 0x00000002 +#define SDHCI_SUPPORT_DDR50 0x00000004 +#define SDHCI_DRIVER_TYPE_A 0x00000010 +#define SDHCI_DRIVER_TYPE_C 0x00000020 +#define SDHCI_DRIVER_TYPE_D 0x00000040 +#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00 +#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8 +#define SDHCI_USE_SDR50_TUNING 0x00002000 +#define SDHCI_RETUNING_MODE_MASK 0x0000C000 +#define SDHCI_RETUNING_MODE_SHIFT 14 +#define SDHCI_CLOCK_MUL_MASK 0x00FF0000 +#define SDHCI_CLOCK_MUL_SHIFT 16 + #define SDHCI_CAPABILITIES_1 0x44 -#define SDHCI_MAX_CURRENT 0x48 +#define SDHCI_MAX_CURRENT 0x48 +#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF +#define SDHCI_MAX_CURRENT_330_SHIFT 0 +#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00 +#define SDHCI_MAX_CURRENT_300_SHIFT 8 +#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000 +#define SDHCI_MAX_CURRENT_180_SHIFT 16 +#define SDHCI_MAX_CURRENT_MULTIPLIER 4 /* 4C-4F reserved for more max current */ @@ -202,6 +243,12 @@ #define SDHCI_MAX_DIV_SPEC_200 256 #define SDHCI_MAX_DIV_SPEC_300 2046 +/* + * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2. + */ +#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) +#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) + struct sdhci_ops { #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS u32 (*read_l)(struct sdhci_host *host, int reg); @@ -223,6 +270,10 @@ struct sdhci_ops { void (*platform_send_init_74_clocks)(struct sdhci_host *host, u8 power_mode); unsigned int (*get_ro)(struct sdhci_host *host); + void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); + void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); + int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); + }; #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index bbc298fd2a1..496b7efbc6b 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -76,7 +76,7 @@ static unsigned int switchlocked; #define BUSY_TIMEOUT 32767 /* list of supported pcmcia devices */ -static struct pcmcia_device_id pcmcia_ids[] = { +static const struct pcmcia_device_id pcmcia_ids[] = { /* vendor and device strings followed by their crc32 hashes */ PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, 0xc3901202), diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index af97015a2fc..14f8edbaa19 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -29,6 +29,8 @@ #include <linux/mmc/sh_mmcif.h> #include <linux/pagemap.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/spinlock.h> #define DRIVER_NAME "sh_mmcif" #define DRIVER_VERSION "2010-04-28" @@ -153,6 +155,12 @@ #define CLKDEV_MMC_DATA 20000000 /* 20MHz */ #define CLKDEV_INIT 400000 /* 400 KHz */ +enum mmcif_state { + STATE_IDLE, + STATE_REQUEST, + STATE_IOS, +}; + struct sh_mmcif_host { struct mmc_host *mmc; struct mmc_data *data; @@ -164,6 +172,9 @@ struct sh_mmcif_host { long timeout; void __iomem *addr; struct completion intr_wait; + enum mmcif_state state; + spinlock_t lock; + bool power; /* DMA support */ struct dma_chan *chan_rx; @@ -798,17 +809,31 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sh_mmcif_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->state != STATE_IDLE) { + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EAGAIN; + mmc_request_done(mmc, mrq); + return; + } + + host->state = STATE_REQUEST; + spin_unlock_irqrestore(&host->lock, flags); switch (mrq->cmd->opcode) { /* MMCIF does not support SD/SDIO command */ case SD_IO_SEND_OP_COND: case MMC_APP_CMD: + host->state = STATE_IDLE; mrq->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, mrq); return; case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ if (!mrq->data) { /* send_if_cond cmd (not support) */ + host->state = STATE_IDLE; mrq->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, mrq); return; @@ -830,12 +855,9 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) sh_mmcif_start_cmd(host, mrq, mrq->cmd); host->data = NULL; - if (mrq->cmd->error != 0) { - mmc_request_done(mmc, mrq); - return; - } - if (mrq->stop) + if (!mrq->cmd->error && mrq->stop) sh_mmcif_stop_cmd(host, mrq, mrq->stop); + host->state = STATE_IDLE; mmc_request_done(mmc, mrq); } @@ -843,15 +865,39 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sh_mmcif_host *host = mmc_priv(mmc); struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->state != STATE_IDLE) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + host->state = STATE_IOS; + spin_unlock_irqrestore(&host->lock, flags); if (ios->power_mode == MMC_POWER_UP) { if (p->set_pwr) p->set_pwr(host->pd, ios->power_mode); + if (!host->power) { + /* See if we also get DMA */ + sh_mmcif_request_dma(host, host->pd->dev.platform_data); + pm_runtime_get_sync(&host->pd->dev); + host->power = true; + } } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* clock stop */ sh_mmcif_clock_control(host, 0); - if (ios->power_mode == MMC_POWER_OFF && p->down_pwr) - p->down_pwr(host->pd); + if (ios->power_mode == MMC_POWER_OFF) { + if (host->power) { + pm_runtime_put(&host->pd->dev); + sh_mmcif_release_dma(host); + host->power = false; + } + if (p->down_pwr) + p->down_pwr(host->pd); + } + host->state = STATE_IDLE; return; } @@ -859,6 +905,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sh_mmcif_clock_control(host, ios->clock); host->bus_width = ios->bus_width; + host->state = STATE_IDLE; } static int sh_mmcif_get_cd(struct mmc_host *mmc) @@ -925,7 +972,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); err = 1; } else { - dev_dbg(&host->pd->dev, "Not support int\n"); + dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); err = 1; @@ -996,6 +1043,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) host->pd = pdev; init_completion(&host->intr_wait); + spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; mmc->f_max = host->clk; @@ -1020,24 +1068,29 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) sh_mmcif_sync_reset(host); platform_set_drvdata(pdev, host); - /* See if we also get DMA */ - sh_mmcif_request_dma(host, pd); + pm_runtime_enable(&pdev->dev); + host->power = false; + + ret = pm_runtime_resume(&pdev->dev); + if (ret < 0) + goto clean_up2; mmc_add_host(mmc); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); - goto clean_up2; + goto clean_up3; } ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); if (ret) { free_irq(irq[0], host); dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); - goto clean_up2; + goto clean_up3; } - sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_detect(host->mmc); dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); @@ -1045,7 +1098,11 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; +clean_up3: + mmc_remove_host(mmc); + pm_runtime_suspend(&pdev->dev); clean_up2: + pm_runtime_disable(&pdev->dev); clk_disable(host->hclk); clean_up1: mmc_free_host(mmc); @@ -1060,14 +1117,14 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) struct sh_mmcif_host *host = platform_get_drvdata(pdev); int irq[2]; + pm_runtime_get_sync(&pdev->dev); + mmc_remove_host(host->mmc); - sh_mmcif_release_dma(host); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); if (host->addr) iounmap(host->addr); - sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); @@ -1078,15 +1135,52 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) clk_disable(host->hclk); mmc_free_host(host->mmc); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return 0; } +#ifdef CONFIG_PM +static int sh_mmcif_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + int ret = mmc_suspend_host(host->mmc); + + if (!ret) { + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + clk_disable(host->hclk); + } + + return ret; +} + +static int sh_mmcif_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + + clk_enable(host->hclk); + + return mmc_resume_host(host->mmc); +} +#else +#define sh_mmcif_suspend NULL +#define sh_mmcif_resume NULL +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { + .suspend = sh_mmcif_suspend, + .resume = sh_mmcif_resume, +}; + static struct platform_driver sh_mmcif_driver = { .probe = sh_mmcif_probe, .remove = sh_mmcif_remove, .driver = { .name = DRIVER_NAME, + .pm = &sh_mmcif_dev_pm_ops, }, }; diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index cc701236d16..b3654293017 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -62,7 +62,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct tmio_mmc_host *host; char clk_name[8]; - int ret; + int i, irq, ret; priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); if (priv == NULL) { @@ -71,6 +71,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) } mmc_data = &priv->mmc_data; + p->pdata = mmc_data; snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); priv->clk = clk_get(&pdev->dev, clk_name); @@ -116,11 +117,36 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) if (ret < 0) goto eprobe; - pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), - (unsigned long)host->ctl, host->irq); + for (i = 0; i < 3; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + if (i) { + continue; + } else { + ret = irq; + goto eirq; + } + } + ret = request_irq(irq, tmio_mmc_irq, 0, + dev_name(&pdev->dev), host); + if (ret) { + while (i--) { + irq = platform_get_irq(pdev, i); + if (irq >= 0) + free_irq(irq, host); + } + goto eirq; + } + } + dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", + mmc_hostname(host->mmc), (unsigned long) + (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start), + mmc_data->hclk / 1000000); return ret; +eirq: + tmio_mmc_host_remove(host); eprobe: clk_disable(priv->clk); clk_put(priv->clk); @@ -134,6 +160,16 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) struct mmc_host *mmc = platform_get_drvdata(pdev); struct tmio_mmc_host *host = mmc_priv(mmc); struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); + struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; + int i, irq; + + p->pdata = NULL; + + for (i = 0; i < 3; i++) { + irq = platform_get_irq(pdev, i); + if (irq >= 0) + free_irq(irq, host); + } tmio_mmc_host_remove(host); clk_disable(priv->clk); @@ -143,10 +179,18 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { + .suspend = tmio_mmc_host_suspend, + .resume = tmio_mmc_host_resume, + .runtime_suspend = tmio_mmc_host_runtime_suspend, + .runtime_resume = tmio_mmc_host_runtime_resume, +}; + static struct platform_driver sh_mobile_sdhi_driver = { .driver = { .name = "sh_mobile_sdhi", .owner = THIS_MODULE, + .pm = &tmio_mmc_dev_pm_ops, }, .probe = sh_mobile_sdhi_probe, .remove = __devexit_p(sh_mobile_sdhi_remove), diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 79c568461d5..8d185de90d2 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -30,7 +30,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) struct mmc_host *mmc = platform_get_drvdata(dev); int ret; - ret = mmc_suspend_host(mmc); + ret = tmio_mmc_host_suspend(&dev->dev); /* Tell MFD core it can disable us now.*/ if (!ret && cell->disable) @@ -46,15 +46,12 @@ static int tmio_mmc_resume(struct platform_device *dev) int ret = 0; /* Tell the MFD core we are ready to be enabled */ - if (cell->resume) { + if (cell->resume) ret = cell->resume(dev); - if (ret) - goto out; - } - mmc_resume_host(mmc); + if (!ret) + ret = tmio_mmc_host_resume(&dev->dev); -out: return ret; } #else @@ -67,15 +64,21 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); struct tmio_mmc_data *pdata; struct tmio_mmc_host *host; - int ret = -EINVAL; + int ret = -EINVAL, irq; if (pdev->num_resources != 2) goto out; - pdata = mfd_get_data(pdev); + pdata = pdev->dev.platform_data; if (!pdata || !pdata->hclk) goto out; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out; + } + /* Tell the MFD core we are ready to be enabled */ if (cell->enable) { ret = cell->enable(pdev); @@ -87,11 +90,18 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev) if (ret) goto cell_disable; + ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED | + IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host); + if (ret) + goto host_remove; + pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), - (unsigned long)host->ctl, host->irq); + (unsigned long)host->ctl, irq); return 0; +host_remove: + tmio_mmc_host_remove(host); cell_disable: if (cell->disable) cell->disable(pdev); @@ -107,7 +117,9 @@ static int __devexit tmio_mmc_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); if (mmc) { - tmio_mmc_host_remove(mmc_priv(mmc)); + struct tmio_mmc_host *host = mmc_priv(mmc); + free_irq(platform_get_irq(pdev, 0), host); + tmio_mmc_host_remove(host); if (cell->disable) cell->disable(pdev); } diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 099ed49a259..8260bc2c34e 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -19,6 +19,7 @@ #include <linux/highmem.h> #include <linux/mmc/tmio.h> #include <linux/pagemap.h> +#include <linux/spinlock.h> /* Definitions for values the CTRL_SDIO_STATUS register can take. */ #define TMIO_SDIO_STAT_IOIRQ 0x0001 @@ -44,13 +45,14 @@ struct tmio_mmc_host { struct mmc_request *mrq; struct mmc_data *data; struct mmc_host *mmc; - int irq; unsigned int sdio_irq_enabled; /* Callbacks for clock / power control */ void (*set_pwr)(struct platform_device *host, int state); void (*set_clk_div)(struct platform_device *host, int state); + int pm_error; + /* pio related stuff */ struct scatterlist *sg_ptr; struct scatterlist *sg_orig; @@ -83,6 +85,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); +irqreturn_t tmio_mmc_irq(int irq, void *devid); static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) @@ -120,4 +123,15 @@ static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) } #endif +#ifdef CONFIG_PM +int tmio_mmc_host_suspend(struct device *dev); +int tmio_mmc_host_resume(struct device *dev); +#else +#define tmio_mmc_host_suspend NULL +#define tmio_mmc_host_resume NULL +#endif + +int tmio_mmc_host_runtime_suspend(struct device *dev); +int tmio_mmc_host_runtime_resume(struct device *dev); + #endif diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index d3de74ab633..25f1ad6cbe0 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -256,7 +256,10 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) { /* We can only either use DMA for both Tx and Rx or not use it at all */ - if (pdata->dma) { + if (!pdata->dma) + return; + + if (!host->chan_tx && !host->chan_rx) { dma_cap_mask_t mask; dma_cap_zero(mask); @@ -284,18 +287,18 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); + } - tmio_mmc_enable_dma(host, true); + tmio_mmc_enable_dma(host, true); + + return; - return; ebouncebuf: - dma_release_channel(host->chan_rx); - host->chan_rx = NULL; + dma_release_channel(host->chan_rx); + host->chan_rx = NULL; ereqrx: - dma_release_channel(host->chan_tx); - host->chan_tx = NULL; - return; - } + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; } void tmio_mmc_release_dma(struct tmio_mmc_host *host) diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 710339a85c8..ad6347bb02d 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -39,6 +39,7 @@ #include <linux/module.h> #include <linux/pagemap.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/scatterlist.h> #include <linux/workqueue.h> #include <linux/spinlock.h> @@ -243,8 +244,12 @@ static void tmio_mmc_reset_work(struct work_struct *work) spin_lock_irqsave(&host->lock, flags); mrq = host->mrq; - /* request already finished */ - if (!mrq + /* + * is request already finished? Since we use a non-blocking + * cancel_delayed_work(), it can happen, that a .set_ios() call preempts + * us, so, have to check for IS_ERR(host->mrq) + */ + if (IS_ERR_OR_NULL(mrq) || time_is_after_jiffies(host->last_req_ts + msecs_to_jiffies(2000))) { spin_unlock_irqrestore(&host->lock, flags); @@ -264,16 +269,19 @@ static void tmio_mmc_reset_work(struct work_struct *work) host->cmd = NULL; host->data = NULL; - host->mrq = NULL; host->force_pio = false; spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_reset(host); + /* Ready for new calls */ + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); } +/* called with host->lock held, interrupts disabled */ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) { struct mmc_request *mrq = host->mrq; @@ -281,13 +289,15 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) if (!mrq) return; - host->mrq = NULL; host->cmd = NULL; host->data = NULL; host->force_pio = false; cancel_delayed_work(&host->delayed_reset_work); + host->mrq = NULL; + + /* FIXME: mmc_request_done() can schedule! */ mmc_request_done(host->mmc, mrq); } @@ -554,7 +564,7 @@ out: spin_unlock(&host->lock); } -static irqreturn_t tmio_mmc_irq(int irq, void *devid) +irqreturn_t tmio_mmc_irq(int irq, void *devid) { struct tmio_mmc_host *host = devid; struct tmio_mmc_data *pdata = host->pdata; @@ -649,6 +659,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid) out: return IRQ_HANDLED; } +EXPORT_SYMBOL(tmio_mmc_irq); static int tmio_mmc_start_data(struct tmio_mmc_host *host, struct mmc_data *data) @@ -685,15 +696,27 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct tmio_mmc_host *host = mmc_priv(mmc); + unsigned long flags; int ret; - if (host->mrq) + spin_lock_irqsave(&host->lock, flags); + + if (host->mrq) { pr_debug("request not null\n"); + if (IS_ERR(host->mrq)) { + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EAGAIN; + mmc_request_done(mmc, mrq); + return; + } + } host->last_req_ts = jiffies; wmb(); host->mrq = mrq; + spin_unlock_irqrestore(&host->lock, flags); + if (mrq->data) { ret = tmio_mmc_start_data(host, mrq->data); if (ret) @@ -708,8 +731,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) } fail: - host->mrq = NULL; host->force_pio = false; + host->mrq = NULL; mrq->cmd->error = ret; mmc_request_done(mmc, mrq); } @@ -723,19 +746,54 @@ fail: static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_data *pdata = host->pdata; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->mrq) { + if (IS_ERR(host->mrq)) { + dev_dbg(&host->pdev->dev, + "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", + current->comm, task_pid_nr(current), + ios->clock, ios->power_mode); + host->mrq = ERR_PTR(-EINTR); + } else { + dev_dbg(&host->pdev->dev, + "%s.%d: CMD%u active since %lu, now %lu!\n", + current->comm, task_pid_nr(current), + host->mrq->cmd->opcode, host->last_req_ts, jiffies); + } + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + host->mrq = ERR_PTR(-EBUSY); + + spin_unlock_irqrestore(&host->lock, flags); if (ios->clock) tmio_mmc_set_clock(host, ios->clock); /* Power sequence - OFF -> UP -> ON */ if (ios->power_mode == MMC_POWER_UP) { + if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) { + pm_runtime_get_sync(&host->pdev->dev); + pdata->power = true; + } /* power up SD bus */ if (host->set_pwr) host->set_pwr(host->pdev, 1); } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* power down SD bus */ - if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) - host->set_pwr(host->pdev, 0); + if (ios->power_mode == MMC_POWER_OFF) { + if (host->set_pwr) + host->set_pwr(host->pdev, 0); + if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && + pdata->power) { + pdata->power = false; + pm_runtime_put(&host->pdev->dev); + } + } tmio_mmc_clk_stop(host); } else { /* start bus clock */ @@ -753,6 +811,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* Let things settle. delay taken from winCE driver */ udelay(140); + if (PTR_ERR(host->mrq) == -EINTR) + dev_dbg(&host->pdev->dev, + "%s.%d: IOS interrupted: clk %u, mode %u", + current->comm, task_pid_nr(current), + ios->clock, ios->power_mode); + host->mrq = NULL; } static int tmio_mmc_get_ro(struct mmc_host *mmc) @@ -801,6 +865,7 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, if (!mmc) return -ENOMEM; + pdata->dev = &pdev->dev; _host = mmc_priv(mmc); _host->pdata = pdata; _host->mmc = mmc; @@ -834,24 +899,19 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, else mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - tmio_mmc_clk_stop(_host); - tmio_mmc_reset(_host); - - ret = platform_get_irq(pdev, 0); + pdata->power = false; + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume(&pdev->dev); if (ret < 0) - goto unmap_ctl; + goto pm_disable; - _host->irq = ret; + tmio_mmc_clk_stop(_host); + tmio_mmc_reset(_host); tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); if (pdata->flags & TMIO_MMC_SDIO_IRQ) tmio_mmc_enable_sdio_irq(mmc, 0); - ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | - IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); - if (ret) - goto unmap_ctl; - spin_lock_init(&_host->lock); /* Init delayed work for request timeouts */ @@ -860,6 +920,10 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, /* See if we also get DMA */ tmio_mmc_request_dma(_host, pdata); + /* We have to keep the device powered for its card detection to work */ + if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) + pm_runtime_get_noresume(&pdev->dev); + mmc_add_host(mmc); /* Unmask the IRQs we want to know about */ @@ -874,7 +938,8 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, return 0; -unmap_ctl: +pm_disable: + pm_runtime_disable(&pdev->dev); iounmap(_host->ctl); host_free: mmc_free_host(mmc); @@ -885,13 +950,88 @@ EXPORT_SYMBOL(tmio_mmc_host_probe); void tmio_mmc_host_remove(struct tmio_mmc_host *host) { + struct platform_device *pdev = host->pdev; + + /* + * We don't have to manipulate pdata->power here: if there is a card in + * the slot, the runtime PM is active and our .runtime_resume() will not + * be run. If there is no card in the slot and the platform can suspend + * the controller, the runtime PM is suspended and pdata->power == false, + * so, our .runtime_resume() will not try to detect a card in the slot. + */ + if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD) + pm_runtime_get_sync(&pdev->dev); + mmc_remove_host(host->mmc); cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_release_dma(host); - free_irq(host->irq, host); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + iounmap(host->ctl); mmc_free_host(host->mmc); } EXPORT_SYMBOL(tmio_mmc_host_remove); +#ifdef CONFIG_PM +int tmio_mmc_host_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct tmio_mmc_host *host = mmc_priv(mmc); + int ret = mmc_suspend_host(mmc); + + if (!ret) + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + + host->pm_error = pm_runtime_put_sync(dev); + + return ret; +} +EXPORT_SYMBOL(tmio_mmc_host_suspend); + +int tmio_mmc_host_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct tmio_mmc_host *host = mmc_priv(mmc); + + /* The MMC core will perform the complete set up */ + host->pdata->power = false; + + if (!host->pm_error) + pm_runtime_get_sync(dev); + + tmio_mmc_reset(mmc_priv(mmc)); + tmio_mmc_request_dma(host, host->pdata); + + return mmc_resume_host(mmc); +} +EXPORT_SYMBOL(tmio_mmc_host_resume); + +#endif /* CONFIG_PM */ + +int tmio_mmc_host_runtime_suspend(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend); + +int tmio_mmc_host_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_data *pdata = host->pdata; + + tmio_mmc_reset(host); + + if (pdata->power) { + /* Only entered after a card-insert interrupt */ + tmio_mmc_set_ios(mmc, &mmc->ios); + mmc_detect_change(mmc, msecs_to_jiffies(100)); + } + + return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c new file mode 100644 index 00000000000..cbb03305b77 --- /dev/null +++ b/drivers/mmc/host/vub300.c @@ -0,0 +1,2506 @@ +/* + * Remote VUB300 SDIO/SDmem Host Controller Driver + * + * Copyright (C) 2010 Elan Digital Systems Limited + * + * based on USB Skeleton driver - 2.2 + * + * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 + * + * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot + * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear, + * by virtue of this driver, to have been plugged into a local + * SDIO host controller, similar to, say, a PCI Ricoh controller + * This is because this kernel device driver is both a USB 2.0 + * client device driver AND an MMC host controller driver. Thus + * if there is an existing driver for the inserted SDIO/SDmem/MMC + * device then that driver will be used by the kernel to manage + * the device in exactly the same fashion as if it had been + * directly plugged into, say, a local pci bus Ricoh controller + * + * RANT: this driver was written using a display 128x48 - converting it + * to a line width of 80 makes it very difficult to support. In + * particular functions have been broken down into sub functions + * and the original meaningful names have been shortened into + * cryptic ones. + * The problem is that executing a fragment of code subject to + * two conditions means an indentation of 24, thus leaving only + * 56 characters for a C statement. And that is quite ridiculous! + * + * Data types: data passed to/from the VUB300 is fixed to a number of + * bits and driver data fields reflect that limit by using + * u8, u16, u32 + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/kref.h> +#include <linux/uaccess.h> +#include <linux/usb.h> +#include <linux/mutex.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> +#include <linux/workqueue.h> +#include <linux/ctype.h> +#include <linux/firmware.h> +#include <linux/scatterlist.h> + +struct host_controller_info { + u8 info_size; + u16 firmware_version; + u8 number_of_ports; +} __packed; + +#define FIRMWARE_BLOCK_BOUNDARY 1024 +struct sd_command_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; /* Bit7 - Rd/Wr */ + u8 command_index; + u8 transfer_size[4]; /* ReadSize + ReadSize */ + u8 response_type; + u8 arguments[4]; + u8 block_count[2]; + u8 block_size[2]; + u8 block_boundary[2]; + u8 reserved[44]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_irqpoll_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; /* Bit7 - Rd/Wr */ + u8 padding[16]; /* don't ask why !! */ + u8 poll_timeout_msb; + u8 poll_timeout_lsb; + u8 reserved[42]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_common_header { + u8 header_size; + u8 header_type; + u8 port_number; +} __packed; + +struct sd_response_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; + u8 command_index; + u8 command_response[0]; +} __packed; + +struct sd_status_header { + u8 header_size; + u8 header_type; + u8 port_number; + u16 port_flags; + u32 sdio_clock; + u16 host_header_size; + u16 func_header_size; + u16 ctrl_header_size; +} __packed; + +struct sd_error_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 error_code; +} __packed; + +struct sd_interrupt_header { + u8 header_size; + u8 header_type; + u8 port_number; +} __packed; + +struct offload_registers_access { + u8 command_byte[4]; + u8 Respond_Byte[4]; +} __packed; + +#define INTERRUPT_REGISTER_ACCESSES 15 +struct sd_offloaded_interrupt { + u8 header_size; + u8 header_type; + u8 port_number; + struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES]; +} __packed; + +struct sd_register_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; + u8 command_index; + u8 command_response[6]; +} __packed; + +#define PIGGYBACK_REGISTER_ACCESSES 14 +struct sd_offloaded_piggyback { + struct sd_register_header sdio; + struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES]; +} __packed; + +union sd_response { + struct sd_common_header common; + struct sd_status_header status; + struct sd_error_header error; + struct sd_interrupt_header interrupt; + struct sd_response_header response; + struct sd_offloaded_interrupt irq; + struct sd_offloaded_piggyback pig; +} __packed; + +union sd_command { + struct sd_command_header head; + struct sd_irqpoll_header poll; +} __packed; + +enum SD_RESPONSE_TYPE { + SDRT_UNSPECIFIED = 0, + SDRT_NONE, + SDRT_1, + SDRT_1B, + SDRT_2, + SDRT_3, + SDRT_4, + SDRT_5, + SDRT_5B, + SDRT_6, + SDRT_7, +}; + +#define RESPONSE_INTERRUPT 0x01 +#define RESPONSE_ERROR 0x02 +#define RESPONSE_STATUS 0x03 +#define RESPONSE_IRQ_DISABLED 0x05 +#define RESPONSE_IRQ_ENABLED 0x06 +#define RESPONSE_PIGGYBACKED 0x07 +#define RESPONSE_NO_INTERRUPT 0x08 +#define RESPONSE_PIG_DISABLED 0x09 +#define RESPONSE_PIG_ENABLED 0x0A +#define SD_ERROR_1BIT_TIMEOUT 0x01 +#define SD_ERROR_4BIT_TIMEOUT 0x02 +#define SD_ERROR_1BIT_CRC_WRONG 0x03 +#define SD_ERROR_4BIT_CRC_WRONG 0x04 +#define SD_ERROR_1BIT_CRC_ERROR 0x05 +#define SD_ERROR_4BIT_CRC_ERROR 0x06 +#define SD_ERROR_NO_CMD_ENDBIT 0x07 +#define SD_ERROR_NO_1BIT_DATEND 0x08 +#define SD_ERROR_NO_4BIT_DATEND 0x09 +#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A +#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B +#define SD_ERROR_ILLEGAL_COMMAND 0x0C +#define SD_ERROR_NO_DEVICE 0x0D +#define SD_ERROR_TRANSFER_LENGTH 0x0E +#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F +#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10 +#define SD_ERROR_ILLEGAL_STATE 0x11 +#define SD_ERROR_UNKNOWN_ERROR 0x12 +#define SD_ERROR_RESERVED_ERROR 0x13 +#define SD_ERROR_INVALID_FUNCTION 0x14 +#define SD_ERROR_OUT_OF_RANGE 0x15 +#define SD_ERROR_STAT_CMD 0x16 +#define SD_ERROR_STAT_DATA 0x17 +#define SD_ERROR_STAT_CMD_TIMEOUT 0x18 +#define SD_ERROR_SDCRDY_STUCK 0x19 +#define SD_ERROR_UNHANDLED 0x1A +#define SD_ERROR_OVERRUN 0x1B +#define SD_ERROR_PIO_TIMEOUT 0x1C + +#define FUN(c) (0x000007 & (c->arg>>28)) +#define REG(c) (0x01FFFF & (c->arg>>9)) + +static int limit_speed_to_24_MHz; +module_param(limit_speed_to_24_MHz, bool, 0644); +MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); + +static int pad_input_to_usb_pkt; +module_param(pad_input_to_usb_pkt, bool, 0644); +MODULE_PARM_DESC(pad_input_to_usb_pkt, + "Pad USB data input transfers to whole USB Packet"); + +static int disable_offload_processing; +module_param(disable_offload_processing, bool, 0644); +MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); + +static int force_1_bit_data_xfers; +module_param(force_1_bit_data_xfers, bool, 0644); +MODULE_PARM_DESC(force_1_bit_data_xfers, + "Force SDIO Data Transfers to 1-bit Mode"); + +static int force_polling_for_irqs; +module_param(force_polling_for_irqs, bool, 0644); +MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); + +static int firmware_irqpoll_timeout = 1024; +module_param(firmware_irqpoll_timeout, int, 0644); +MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout"); + +static int force_max_req_size = 128; +module_param(force_max_req_size, int, 0644); +MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes"); + +#ifdef SMSC_DEVELOPMENT_BOARD +static int firmware_rom_wait_states = 0x04; +#else +static int firmware_rom_wait_states = 0x1C; +#endif + +module_param(firmware_rom_wait_states, bool, 0644); +MODULE_PARM_DESC(firmware_rom_wait_states, + "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); + +#define ELAN_VENDOR_ID 0x2201 +#define VUB300_VENDOR_ID 0x0424 +#define VUB300_PRODUCT_ID 0x012C +static struct usb_device_id vub300_table[] = { + {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, + {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, + {} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(usb, vub300_table); + +static struct workqueue_struct *cmndworkqueue; +static struct workqueue_struct *pollworkqueue; +static struct workqueue_struct *deadworkqueue; + +static inline int interface_to_InterfaceNumber(struct usb_interface *interface) +{ + if (!interface) + return -1; + if (!interface->cur_altsetting) + return -1; + return interface->cur_altsetting->desc.bInterfaceNumber; +} + +struct sdio_register { + unsigned func_num:3; + unsigned sdio_reg:17; + unsigned activate:1; + unsigned prepared:1; + unsigned regvalue:8; + unsigned response:8; + unsigned sparebit:26; +}; + +struct vub300_mmc_host { + struct usb_device *udev; + struct usb_interface *interface; + struct kref kref; + struct mutex cmd_mutex; + struct mutex irq_mutex; + char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */ + u8 cmnd_out_ep; /* EndPoint for commands */ + u8 cmnd_res_ep; /* EndPoint for responses */ + u8 data_out_ep; /* EndPoint for out data */ + u8 data_inp_ep; /* EndPoint for inp data */ + bool card_powered; + bool card_present; + bool read_only; + bool large_usb_packets; + bool app_spec; /* ApplicationSpecific */ + bool irq_enabled; /* by the MMC CORE */ + bool irq_disabled; /* in the firmware */ + unsigned bus_width:4; + u8 total_offload_count; + u8 dynamic_register_count; + u8 resp_len; + u32 datasize; + int errors; + int usb_transport_fail; + int usb_timed_out; + int irqs_queued; + struct sdio_register sdio_register[16]; + struct offload_interrupt_function_register { +#define MAXREGBITS 4 +#define MAXREGS (1<<MAXREGBITS) +#define MAXREGMASK (MAXREGS-1) + u8 offload_count; + u32 offload_point; + struct offload_registers_access reg[MAXREGS]; + } fn[8]; + u16 fbs[8]; /* Function Block Size */ + struct mmc_command *cmd; + struct mmc_request *req; + struct mmc_data *data; + struct mmc_host *mmc; + struct urb *urb; + struct urb *command_out_urb; + struct urb *command_res_urb; + struct completion command_complete; + struct completion irqpoll_complete; + union sd_command cmnd; + union sd_response resp; + struct timer_list sg_transfer_timer; + struct usb_sg_request sg_request; + struct timer_list inactivity_timer; + struct work_struct deadwork; + struct work_struct cmndwork; + struct delayed_work pollwork; + struct host_controller_info hc_info; + struct sd_status_header system_port_status; + u8 padded_buffer[64]; +}; + +#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref) +#define SET_TRANSFER_PSEUDOCODE 21 +#define SET_INTERRUPT_PSEUDOCODE 20 +#define SET_FAILURE_MODE 18 +#define SET_ROM_WAIT_STATES 16 +#define SET_IRQ_ENABLE 13 +#define SET_CLOCK_SPEED 11 +#define SET_FUNCTION_BLOCK_SIZE 9 +#define SET_SD_DATA_MODE 6 +#define SET_SD_POWER 4 +#define ENTER_DFU_MODE 3 +#define GET_HC_INF0 1 +#define GET_SYSTEM_PORT_STATUS 0 + +static void vub300_delete(struct kref *kref) +{ /* kref callback - softirq */ + struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref); + struct mmc_host *mmc = vub300->mmc; + usb_free_urb(vub300->command_out_urb); + vub300->command_out_urb = NULL; + usb_free_urb(vub300->command_res_urb); + vub300->command_res_urb = NULL; + usb_put_dev(vub300->udev); + mmc_free_host(mmc); + /* + * and hence also frees vub300 + * which is contained at the end of struct mmc + */ +} + +static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300) +{ + kref_get(&vub300->kref); + if (queue_work(cmndworkqueue, &vub300->cmndwork)) { + /* + * then the cmndworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the cmndworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay) +{ + kref_get(&vub300->kref); + if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) { + /* + * then the pollworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the pollworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void vub300_queue_dead_work(struct vub300_mmc_host *vub300) +{ + kref_get(&vub300->kref); + if (queue_work(deadworkqueue, &vub300->deadwork)) { + /* + * then the deadworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the deadworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void irqpoll_res_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) + vub300->usb_transport_fail = urb->status; + complete(&vub300->irqpoll_complete); +} + +static void irqpoll_out_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + vub300->usb_transport_fail = urb->status; + complete(&vub300->irqpoll_complete); + return; + } else { + int ret; + unsigned int pipe = + usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); + usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, + &vub300->resp, sizeof(vub300->resp), + irqpoll_res_completed, vub300); + vub300->command_res_urb->actual_length = 0; + ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); + if (ret) { + vub300->usb_transport_fail = ret; + complete(&vub300->irqpoll_complete); + } + return; + } +} + +static void send_irqpoll(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + int retval; + int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout); + vub300->cmnd.poll.header_size = 22; + vub300->cmnd.poll.header_type = 1; + vub300->cmnd.poll.port_number = 0; + vub300->cmnd.poll.command_type = 2; + vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout; + vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8); + usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, + usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep) + , &vub300->cmnd, sizeof(vub300->cmnd) + , irqpoll_out_completed, vub300); + retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); + if (0 > retval) { + vub300->usb_transport_fail = retval; + vub300_queue_poll_work(vub300, 1); + complete(&vub300->irqpoll_complete); + return; + } else { + return; + } +} + +static void new_system_port_status(struct vub300_mmc_host *vub300) +{ + int old_card_present = vub300->card_present; + int new_card_present = + (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; + vub300->read_only = + (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; + if (new_card_present && !old_card_present) { + dev_info(&vub300->udev->dev, "card just inserted\n"); + vub300->card_present = 1; + vub300->bus_width = 0; + if (disable_offload_processing) + strncpy(vub300->vub_name, "EMPTY Processing Disabled", + sizeof(vub300->vub_name)); + else + vub300->vub_name[0] = 0; + mmc_detect_change(vub300->mmc, 1); + } else if (!new_card_present && old_card_present) { + dev_info(&vub300->udev->dev, "card just ejected\n"); + vub300->card_present = 0; + mmc_detect_change(vub300->mmc, 0); + } else { + /* no change */ + } +} + +static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300, + struct offload_registers_access + *register_access, u8 func) +{ + u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count; + memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access, + sizeof(struct offload_registers_access)); + vub300->fn[func].offload_count += 1; + vub300->total_offload_count += 1; +} + +static void add_offloaded_reg(struct vub300_mmc_host *vub300, + struct offload_registers_access *register_access) +{ + u32 Register = ((0x03 & register_access->command_byte[0]) << 15) + | ((0xFF & register_access->command_byte[1]) << 7) + | ((0xFE & register_access->command_byte[2]) >> 1); + u8 func = ((0x70 & register_access->command_byte[0]) >> 4); + u8 regs = vub300->dynamic_register_count; + u8 i = 0; + while (0 < regs-- && 1 == vub300->sdio_register[i].activate) { + if (vub300->sdio_register[i].func_num == func && + vub300->sdio_register[i].sdio_reg == Register) { + if (vub300->sdio_register[i].prepared == 0) + vub300->sdio_register[i].prepared = 1; + vub300->sdio_register[i].response = + register_access->Respond_Byte[2]; + vub300->sdio_register[i].regvalue = + register_access->Respond_Byte[3]; + return; + } else { + i += 1; + continue; + } + }; + __add_offloaded_reg_to_fifo(vub300, register_access, func); +} + +static void check_vub300_port_status(struct vub300_mmc_host *vub300) +{ + /* + * cmd_mutex is held by vub300_pollwork_thread, + * vub300_deadwork_thread or vub300_cmndwork_thread + */ + int retval; + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_SYSTEM_PORT_STATUS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->system_port_status, + sizeof(vub300->system_port_status), HZ); + if (sizeof(vub300->system_port_status) == retval) + new_system_port_status(vub300); +} + +static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + if (vub300->command_res_urb->actual_length == 0) + return; + + switch (vub300->resp.common.header_type) { + case RESPONSE_INTERRUPT: + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + break; + case RESPONSE_ERROR: + if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE) + check_vub300_port_status(vub300); + break; + case RESPONSE_STATUS: + vub300->system_port_status = vub300->resp.status; + new_system_port_status(vub300); + if (!vub300->card_present) + vub300_queue_poll_work(vub300, HZ / 5); + break; + case RESPONSE_IRQ_DISABLED: + { + int offloaded_data_length = vub300->resp.common.header_size - 3; + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + break; + } + case RESPONSE_IRQ_ENABLED: + { + int offloaded_data_length = vub300->resp.common.header_size - 3; + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else if (vub300->irqs_queued) + vub300->irqs_queued += 1; + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 0; + mutex_unlock(&vub300->irq_mutex); + break; + } + case RESPONSE_NO_INTERRUPT: + vub300_queue_poll_work(vub300, 1); + break; + default: + break; + } +} + +static void __do_poll(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + long commretval; + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + init_completion(&vub300->irqpoll_complete); + send_irqpoll(vub300); + commretval = wait_for_completion_timeout(&vub300->irqpoll_complete, + msecs_to_jiffies(500)); + if (vub300->usb_transport_fail) { + /* no need to do anything */ + } else if (commretval == 0) { + vub300->usb_timed_out = 1; + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + } else if (commretval < 0) { + vub300_queue_poll_work(vub300, 1); + } else { /* commretval > 0 */ + __vub300_irqpoll_response(vub300); + } +} + +/* this thread runs only when the driver + * is trying to poll the device for an IRQ + */ +static void vub300_pollwork_thread(struct work_struct *work) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = container_of(work, + struct vub300_mmc_host, pollwork.work); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } + mutex_lock(&vub300->cmd_mutex); + if (vub300->cmd) { + vub300_queue_poll_work(vub300, 1); + } else if (!vub300->card_present) { + /* no need to do anything */ + } else { /* vub300->card_present */ + mutex_lock(&vub300->irq_mutex); + if (!vub300->irq_enabled) { + mutex_unlock(&vub300->irq_mutex); + } else if (vub300->irqs_queued) { + vub300->irqs_queued -= 1; + mmc_signal_sdio_irq(vub300->mmc); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + mutex_unlock(&vub300->irq_mutex); + } else { /* NOT vub300->irqs_queued */ + mutex_unlock(&vub300->irq_mutex); + __do_poll(vub300); + } + } + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_deadwork_thread(struct work_struct *work) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = + container_of(work, struct vub300_mmc_host, deadwork); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } + mutex_lock(&vub300->cmd_mutex); + if (vub300->cmd) { + /* + * a command got in as the inactivity + * timer expired - so we just let the + * processing of the command show if + * the device is dead + */ + } else if (vub300->card_present) { + check_vub300_port_status(vub300); + } else if (vub300->mmc && vub300->mmc->card && + mmc_card_present(vub300->mmc->card)) { + /* + * the MMC core must not have responded + * to the previous indication - lets + * hope that it eventually does so we + * will just ignore this for now + */ + } else { + check_vub300_port_status(vub300); + } + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_inactivity_timer_expired(unsigned long data) +{ /* softirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + } else if (vub300->cmd) { + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + } else { + vub300_queue_dead_work(vub300); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + } +} + +static int vub300_response_error(u8 error_code) +{ + switch (error_code) { + case SD_ERROR_PIO_TIMEOUT: + case SD_ERROR_1BIT_TIMEOUT: + case SD_ERROR_4BIT_TIMEOUT: + return -ETIMEDOUT; + case SD_ERROR_STAT_DATA: + case SD_ERROR_OVERRUN: + case SD_ERROR_STAT_CMD: + case SD_ERROR_STAT_CMD_TIMEOUT: + case SD_ERROR_SDCRDY_STUCK: + case SD_ERROR_UNHANDLED: + case SD_ERROR_1BIT_CRC_WRONG: + case SD_ERROR_4BIT_CRC_WRONG: + case SD_ERROR_1BIT_CRC_ERROR: + case SD_ERROR_4BIT_CRC_ERROR: + case SD_ERROR_NO_CMD_ENDBIT: + case SD_ERROR_NO_1BIT_DATEND: + case SD_ERROR_NO_4BIT_DATEND: + case SD_ERROR_1BIT_DATA_TIMEOUT: + case SD_ERROR_4BIT_DATA_TIMEOUT: + case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT: + case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT: + return -EILSEQ; + case 33: + return -EILSEQ; + case SD_ERROR_ILLEGAL_COMMAND: + return -EINVAL; + case SD_ERROR_NO_DEVICE: + return -ENOMEDIUM; + default: + return -ENODEV; + } +} + +static void command_res_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + /* we have to let the initiator handle the error */ + } else if (vub300->command_res_urb->actual_length == 0) { + /* + * we have seen this happen once or twice and + * we suspect a buggy USB host controller + */ + } else if (!vub300->data) { + /* this means that the command (typically CMD52) suceeded */ + } else if (vub300->resp.common.header_type != 0x02) { + /* + * this is an error response from the VUB300 chip + * and we let the initiator handle it + */ + } else if (vub300->urb) { + vub300->cmd->error = + vub300_response_error(vub300->resp.error.error_code); + usb_unlink_urb(vub300->urb); + } else { + vub300->cmd->error = + vub300_response_error(vub300->resp.error.error_code); + usb_sg_cancel(&vub300->sg_request); + } + complete(&vub300->command_complete); /* got_response_in */ +} + +static void command_out_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + complete(&vub300->command_complete); + } else { + int ret; + unsigned int pipe = + usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); + usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, + &vub300->resp, sizeof(vub300->resp), + command_res_completed, vub300); + vub300->command_res_urb->actual_length = 0; + ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); + if (ret == 0) { + /* + * the urb completion handler will call + * our completion handler + */ + } else { + /* + * and thus we only call it directly + * when it will not be called + */ + complete(&vub300->command_complete); + } + } +} + +/* + * the STUFF bits are masked out for the comparisons + */ +static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300, + u32 cmd_arg) +{ + if ((0xFBFFFE00 & cmd_arg) == 0x80022200) + vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80022000) + vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80042200) + vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80042000) + vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80062200) + vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80062000) + vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80082200) + vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80082000) + vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200) + vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000) + vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200) + vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000) + vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200) + vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000) + vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]); + else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00) + vub300->bus_width = 1; + else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02) + vub300->bus_width = 4; +} + +static void send_command(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + struct mmc_command *cmd = vub300->cmd; + struct mmc_data *data = vub300->data; + int retval; + int i; + u8 response_type; + if (vub300->app_spec) { + switch (cmd->opcode) { + case 6: + response_type = SDRT_1; + vub300->resp_len = 6; + if (0x00000000 == (0x00000003 & cmd->arg)) + vub300->bus_width = 1; + else if (0x00000002 == (0x00000003 & cmd->arg)) + vub300->bus_width = 4; + else + dev_err(&vub300->udev->dev, + "unexpected ACMD6 bus_width=%d\n", + 0x00000003 & cmd->arg); + break; + case 13: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 22: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 23: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 41: + response_type = SDRT_3; + vub300->resp_len = 6; + break; + case 42: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 51: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 55: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + default: + vub300->resp_len = 0; + cmd->error = -EINVAL; + complete(&vub300->command_complete); + return; + } + vub300->app_spec = 0; + } else { + switch (cmd->opcode) { + case 0: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 1: + response_type = SDRT_3; + vub300->resp_len = 6; + break; + case 2: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 3: + response_type = SDRT_6; + vub300->resp_len = 6; + break; + case 4: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 5: + response_type = SDRT_4; + vub300->resp_len = 6; + break; + case 6: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 7: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 8: + response_type = SDRT_7; + vub300->resp_len = 6; + break; + case 9: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 10: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 12: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 13: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 15: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 16: + for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) + vub300->fbs[i] = 0xFFFF & cmd->arg; + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 17: + case 18: + case 24: + case 25: + case 27: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 28: + case 29: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 30: + case 32: + case 33: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 38: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 42: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 52: + response_type = SDRT_5; + vub300->resp_len = 6; + snoop_block_size_and_bus_width(vub300, cmd->arg); + break; + case 53: + response_type = SDRT_5; + vub300->resp_len = 6; + break; + case 55: + response_type = SDRT_1; + vub300->resp_len = 6; + vub300->app_spec = 1; + break; + case 56: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + default: + vub300->resp_len = 0; + cmd->error = -EINVAL; + complete(&vub300->command_complete); + return; + } + } + /* + * it is a shame that we can not use "sizeof(struct sd_command_header)" + * this is because the packet _must_ be padded to 64 bytes + */ + vub300->cmnd.head.header_size = 20; + vub300->cmnd.head.header_type = 0x00; + vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ + vub300->cmnd.head.command_type = 0x00; /* standard read command */ + vub300->cmnd.head.response_type = response_type; + vub300->cmnd.head.command_index = cmd->opcode; + vub300->cmnd.head.arguments[0] = cmd->arg >> 24; + vub300->cmnd.head.arguments[1] = cmd->arg >> 16; + vub300->cmnd.head.arguments[2] = cmd->arg >> 8; + vub300->cmnd.head.arguments[3] = cmd->arg >> 0; + if (cmd->opcode == 52) { + int fn = 0x7 & (cmd->arg >> 28); + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; + vub300->cmnd.head.command_type = 0x00; + vub300->cmnd.head.transfer_size[0] = 0; + vub300->cmnd.head.transfer_size[1] = 0; + vub300->cmnd.head.transfer_size[2] = 0; + vub300->cmnd.head.transfer_size[3] = 0; + } else if (!data) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; + vub300->cmnd.head.command_type = 0x00; + vub300->cmnd.head.transfer_size[0] = 0; + vub300->cmnd.head.transfer_size[1] = 0; + vub300->cmnd.head.transfer_size[2] = 0; + vub300->cmnd.head.transfer_size[3] = 0; + } else if (cmd->opcode == 53) { + int fn = 0x7 & (cmd->arg >> 28); + if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ + vub300->cmnd.head.block_count[0] = + (data->blocks >> 8) & 0xFF; + vub300->cmnd.head.block_count[1] = + (data->blocks >> 0) & 0xFF; + vub300->cmnd.head.block_size[0] = + (data->blksz >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = + (data->blksz >> 0) & 0xFF; + } else { /* BYTE MODE */ + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = + (vub300->datasize >> 0) & 0xFF; + } + vub300->cmnd.head.command_type = + (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; + vub300->cmnd.head.transfer_size[0] = + (vub300->datasize >> 24) & 0xFF; + vub300->cmnd.head.transfer_size[1] = + (vub300->datasize >> 16) & 0xFF; + vub300->cmnd.head.transfer_size[2] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.transfer_size[3] = + (vub300->datasize >> 0) & 0xFF; + if (vub300->datasize < vub300->fbs[fn]) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + } + } else { + vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; + vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; + vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; + vub300->cmnd.head.command_type = + (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; + vub300->cmnd.head.transfer_size[0] = + (vub300->datasize >> 24) & 0xFF; + vub300->cmnd.head.transfer_size[1] = + (vub300->datasize >> 16) & 0xFF; + vub300->cmnd.head.transfer_size[2] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.transfer_size[3] = + (vub300->datasize >> 0) & 0xFF; + if (vub300->datasize < vub300->fbs[0]) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + } + } + if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { + u16 block_size = vub300->cmnd.head.block_size[1] | + (vub300->cmnd.head.block_size[0] << 8); + u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY - + (FIRMWARE_BLOCK_BOUNDARY % block_size); + vub300->cmnd.head.block_boundary[0] = + (block_boundary >> 8) & 0xFF; + vub300->cmnd.head.block_boundary[1] = + (block_boundary >> 0) & 0xFF; + } else { + vub300->cmnd.head.block_boundary[0] = 0; + vub300->cmnd.head.block_boundary[1] = 0; + } + usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, + usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep), + &vub300->cmnd, sizeof(vub300->cmnd), + command_out_completed, vub300); + retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); + if (retval < 0) { + cmd->error = retval; + complete(&vub300->command_complete); + return; + } else { + return; + } +} + +/* + * timer callback runs in atomic mode + * so it cannot call usb_kill_urb() + */ +static void vub300_sg_timed_out(unsigned long data) +{ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + vub300->usb_timed_out = 1; + usb_sg_cancel(&vub300->sg_request); + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); +} + +static u16 roundup_to_multiple_of_64(u16 number) +{ + return 0xFFC0 & (0x3F + number); +} + +/* + * this is a separate function to solve the 80 column width restriction + */ +static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, + const struct firmware *fw) +{ + u8 register_count = 0; + u16 ts = 0; + u16 interrupt_size = 0; + const u8 *data = fw->data; + int size = fw->size; + u8 c; + dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n", + vub300->vub_name); + do { + c = *data++; + } while (size-- && c); /* skip comment */ + dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data, + vub300->vub_name); + if (size < 4) { + dev_err(&vub300->udev->dev, + "corrupt offload pseudocode in firmware %s\n", + vub300->vub_name); + strncpy(vub300->vub_name, "corrupt offload pseudocode", + sizeof(vub300->vub_name)); + return; + } + interrupt_size += *data++; + size -= 1; + interrupt_size <<= 8; + interrupt_size += *data++; + size -= 1; + if (interrupt_size < size) { + u16 xfer_length = roundup_to_multiple_of_64(interrupt_size); + u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); + if (xfer_buffer) { + int retval; + memcpy(xfer_buffer, data, interrupt_size); + memset(xfer_buffer + interrupt_size, 0, + xfer_length - interrupt_size); + size -= interrupt_size; + data += interrupt_size; + retval = + usb_control_msg(vub300->udev, + usb_sndctrlpipe(vub300->udev, 0), + SET_INTERRUPT_PSEUDOCODE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, 0x0000, 0x0000, + xfer_buffer, xfer_length, HZ); + kfree(xfer_buffer); + if (retval < 0) { + strncpy(vub300->vub_name, + "SDIO pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "not enough memory for xfer buffer to send" + " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data, + vub300->vub_name); + strncpy(vub300->vub_name, + "SDIO interrupt pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "corrupt interrupt pseudocode in firmware %s %s\n", + fw->data, vub300->vub_name); + strncpy(vub300->vub_name, "corrupt interrupt pseudocode", + sizeof(vub300->vub_name)); + return; + } + ts += *data++; + size -= 1; + ts <<= 8; + ts += *data++; + size -= 1; + if (ts < size) { + u16 xfer_length = roundup_to_multiple_of_64(ts); + u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); + if (xfer_buffer) { + int retval; + memcpy(xfer_buffer, data, ts); + memset(xfer_buffer + ts, 0, + xfer_length - ts); + size -= ts; + data += ts; + retval = + usb_control_msg(vub300->udev, + usb_sndctrlpipe(vub300->udev, 0), + SET_TRANSFER_PSEUDOCODE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, 0x0000, 0x0000, + xfer_buffer, xfer_length, HZ); + kfree(xfer_buffer); + if (retval < 0) { + strncpy(vub300->vub_name, + "SDIO pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "not enough memory for xfer buffer to send" + " TRANSFER_PSEUDOCODE for %s %s\n", fw->data, + vub300->vub_name); + strncpy(vub300->vub_name, + "SDIO transfer pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "corrupt transfer pseudocode in firmware %s %s\n", + fw->data, vub300->vub_name); + strncpy(vub300->vub_name, "corrupt transfer pseudocode", + sizeof(vub300->vub_name)); + return; + } + register_count += *data++; + size -= 1; + if (register_count * 4 == size) { + int I = vub300->dynamic_register_count = register_count; + int i = 0; + while (I--) { + unsigned int func_num = 0; + vub300->sdio_register[i].func_num = *data++; + size -= 1; + func_num += *data++; + size -= 1; + func_num <<= 8; + func_num += *data++; + size -= 1; + func_num <<= 8; + func_num += *data++; + size -= 1; + vub300->sdio_register[i].sdio_reg = func_num; + vub300->sdio_register[i].activate = 1; + vub300->sdio_register[i].prepared = 0; + i += 1; + } + dev_info(&vub300->udev->dev, + "initialized %d dynamic pseudocode registers\n", + vub300->dynamic_register_count); + return; + } else { + dev_err(&vub300->udev->dev, + "corrupt dynamic registers in firmware %s\n", + vub300->vub_name); + strncpy(vub300->vub_name, "corrupt dynamic registers", + sizeof(vub300->vub_name)); + return; + } +} + +/* + * if the binary containing the EMPTY PseudoCode can not be found + * vub300->vub_name is set anyway in order to prevent an automatic retry + */ +static void download_offload_pseudocode(struct vub300_mmc_host *vub300) +{ + struct mmc_card *card = vub300->mmc->card; + int sdio_funcs = card->sdio_funcs; + const struct firmware *fw = NULL; + int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), + "vub_%04X%04X", card->cis.vendor, card->cis.device); + int n = 0; + int retval; + for (n = 0; n < sdio_funcs; n++) { + struct sdio_func *sf = card->sdio_func[n]; + l += snprintf(vub300->vub_name + l, + sizeof(vub300->vub_name) - l, "_%04X%04X", + sf->vendor, sf->device); + }; + snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); + dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", + vub300->vub_name); + retval = request_firmware(&fw, vub300->vub_name, &card->dev); + if (retval < 0) { + strncpy(vub300->vub_name, "vub_default.bin", + sizeof(vub300->vub_name)); + retval = request_firmware(&fw, vub300->vub_name, &card->dev); + if (retval < 0) { + strncpy(vub300->vub_name, + "no SDIO offload firmware found", + sizeof(vub300->vub_name)); + } else { + __download_offload_pseudocode(vub300, fw); + release_firmware(fw); + } + } else { + __download_offload_pseudocode(vub300, fw); + release_firmware(fw); + } +} + +static void vub300_usb_bulk_msg_completion(struct urb *urb) +{ /* urb completion handler - hardirq */ + complete((struct completion *)urb->context); +} + +static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300, + unsigned int pipe, void *data, int len, + int *actual_length, int timeout_msecs) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + struct usb_device *usb_dev = vub300->udev; + struct completion done; + int retval; + vub300->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!vub300->urb) + return -ENOMEM; + usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len, + vub300_usb_bulk_msg_completion, NULL); + init_completion(&done); + vub300->urb->context = &done; + vub300->urb->actual_length = 0; + retval = usb_submit_urb(vub300->urb, GFP_KERNEL); + if (unlikely(retval)) + goto out; + if (!wait_for_completion_timeout + (&done, msecs_to_jiffies(timeout_msecs))) { + retval = -ETIMEDOUT; + usb_kill_urb(vub300->urb); + } else { + retval = vub300->urb->status; + } +out: + *actual_length = vub300->urb->actual_length; + usb_free_urb(vub300->urb); + vub300->urb = NULL; + return retval; +} + +static int __command_read_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, struct mmc_data *data) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + int linear_length = vub300->datasize; + int padded_length = vub300->large_usb_packets ? + ((511 + linear_length) >> 9) << 9 : + ((63 + linear_length) >> 6) << 6; + if ((padded_length == linear_length) || !pad_input_to_usb_pkt) { + int result; + unsigned pipe; + pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); + result = usb_sg_init(&vub300->sg_request, vub300->udev, + pipe, 0, data->sg, + data->sg_len, 0, GFP_KERNEL); + if (result < 0) { + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); + cmd->error = result; + data->bytes_xfered = 0; + return 0; + } else { + vub300->sg_transfer_timer.expires = + jiffies + msecs_to_jiffies(2000 + + (linear_length / 16384)); + add_timer(&vub300->sg_transfer_timer); + usb_sg_wait(&vub300->sg_request); + del_timer(&vub300->sg_transfer_timer); + if (vub300->sg_request.status < 0) { + cmd->error = vub300->sg_request.status; + data->bytes_xfered = 0; + return 0; + } else { + data->bytes_xfered = vub300->datasize; + return linear_length; + } + } + } else { + u8 *buf = kmalloc(padded_length, GFP_KERNEL); + if (buf) { + int result; + unsigned pipe = usb_rcvbulkpipe(vub300->udev, + vub300->data_inp_ep); + int actual_length = 0; + result = vub300_usb_bulk_msg(vub300, pipe, buf, + padded_length, &actual_length, + 2000 + (padded_length / 16384)); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + kfree(buf); + return 0; + } else if (actual_length < linear_length) { + cmd->error = -EREMOTEIO; + data->bytes_xfered = 0; + kfree(buf); + return 0; + } else { + sg_copy_from_buffer(data->sg, data->sg_len, buf, + linear_length); + kfree(buf); + data->bytes_xfered = vub300->datasize; + return linear_length; + } + } else { + cmd->error = -ENOMEM; + data->bytes_xfered = 0; + return 0; + } + } +} + +static int __command_write_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, struct mmc_data *data) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep); + int linear_length = vub300->datasize; + int modulo_64_length = linear_length & 0x003F; + int modulo_512_length = linear_length & 0x01FF; + if (linear_length < 64) { + int result; + int actual_length; + sg_copy_to_buffer(data->sg, data->sg_len, + vub300->padded_buffer, + sizeof(vub300->padded_buffer)); + memset(vub300->padded_buffer + linear_length, 0, + sizeof(vub300->padded_buffer) - linear_length); + result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer, + sizeof(vub300->padded_buffer), + &actual_length, 2000 + + (sizeof(vub300->padded_buffer) / + 16384)); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) || + (vub300->large_usb_packets && (64 > modulo_512_length)) + ) { /* don't you just love these work-rounds */ + int padded_length = ((63 + linear_length) >> 6) << 6; + u8 *buf = kmalloc(padded_length, GFP_KERNEL); + if (buf) { + int result; + int actual_length; + sg_copy_to_buffer(data->sg, data->sg_len, buf, + padded_length); + memset(buf + linear_length, 0, + padded_length - linear_length); + result = + vub300_usb_bulk_msg(vub300, pipe, buf, + padded_length, &actual_length, + 2000 + padded_length / 16384); + kfree(buf); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } else { + cmd->error = -ENOMEM; + data->bytes_xfered = 0; + } + } else { /* no data padding required */ + int result; + unsigned char buf[64 * 4]; + sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf)); + result = usb_sg_init(&vub300->sg_request, vub300->udev, + pipe, 0, data->sg, + data->sg_len, 0, GFP_KERNEL); + if (result < 0) { + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); + cmd->error = result; + data->bytes_xfered = 0; + } else { + vub300->sg_transfer_timer.expires = + jiffies + msecs_to_jiffies(2000 + + linear_length / 16384); + add_timer(&vub300->sg_transfer_timer); + usb_sg_wait(&vub300->sg_request); + if (cmd->error) { + data->bytes_xfered = 0; + } else { + del_timer(&vub300->sg_transfer_timer); + if (vub300->sg_request.status < 0) { + cmd->error = vub300->sg_request.status; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } + } + } + return linear_length; +} + +static void __vub300_command_response(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, + struct mmc_data *data, int data_length) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + long respretval; + int msec_timeout = 1000 + data_length / 4; + respretval = + wait_for_completion_timeout(&vub300->command_complete, + msecs_to_jiffies(msec_timeout)); + if (respretval == 0) { /* TIMED OUT */ + /* we don't know which of "out" and "res" if any failed */ + int result; + vub300->usb_timed_out = 1; + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + cmd->error = -ETIMEDOUT; + result = usb_lock_device_for_reset(vub300->udev, + vub300->interface); + if (result == 0) { + result = usb_reset_device(vub300->udev); + usb_unlock_device(vub300->udev); + } + } else if (respretval < 0) { + /* we don't know which of "out" and "res" if any failed */ + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + cmd->error = respretval; + } else if (cmd->error) { + /* + * the error occured sending the command + * or recieving the response + */ + } else if (vub300->command_out_urb->status) { + vub300->usb_transport_fail = vub300->command_out_urb->status; + cmd->error = -EPROTO == vub300->command_out_urb->status ? + -ESHUTDOWN : vub300->command_out_urb->status; + } else if (vub300->command_res_urb->status) { + vub300->usb_transport_fail = vub300->command_res_urb->status; + cmd->error = -EPROTO == vub300->command_res_urb->status ? + -ESHUTDOWN : vub300->command_res_urb->status; + } else if (vub300->resp.common.header_type == 0x00) { + /* + * the command completed successfully + * and there was no piggybacked data + */ + } else if (vub300->resp.common.header_type == RESPONSE_ERROR) { + cmd->error = + vub300_response_error(vub300->resp.error.error_code); + if (vub300->data) + usb_sg_cancel(&vub300->sg_request); + } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued += 1; + } else if (vub300->irq_enabled) { + vub300->irqs_queued += 1; + vub300_queue_poll_work(vub300, 0); + } else { + vub300->irqs_queued += 1; + } + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued += 1; + } else if (vub300->irq_enabled) { + vub300->irqs_queued += 1; + vub300_queue_poll_work(vub300, 0); + } else { + vub300->irqs_queued += 1; + } + vub300->irq_disabled = 0; + mutex_unlock(&vub300->irq_mutex); + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else { + cmd->error = -EINVAL; + } +} + +static void construct_request_response(struct vub300_mmc_host *vub300, + struct mmc_command *cmd) +{ + int resp_len = vub300->resp_len; + int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1; + int bytes = 3 & less_cmd; + int words = less_cmd >> 2; + u8 *r = vub300->resp.response.command_response; + if (bytes == 3) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16) + | (r[3 + (words << 2)] << 8); + } else if (bytes == 2) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16); + } else if (bytes == 1) { + cmd->resp[words] = (r[1 + (words << 2)] << 24); + } + while (words-- > 0) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16) + | (r[3 + (words << 2)] << 8) + | (r[4 + (words << 2)] << 0); + } + if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0])) + cmd->resp[0] &= 0xFFFFFF00; +} + +/* this thread runs only when there is an upper level command req outstanding */ +static void vub300_cmndwork_thread(struct work_struct *work) +{ + struct vub300_mmc_host *vub300 = + container_of(work, struct vub300_mmc_host, cmndwork); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } else { + struct mmc_request *req = vub300->req; + struct mmc_command *cmd = vub300->cmd; + struct mmc_data *data = vub300->data; + int data_length; + mutex_lock(&vub300->cmd_mutex); + init_completion(&vub300->command_complete); + if (likely(vub300->vub_name[0]) || !vub300->mmc->card || + !mmc_card_present(vub300->mmc->card)) { + /* + * the name of the EMPTY Pseudo firmware file + * is used as a flag to indicate that the file + * has been already downloaded to the VUB300 chip + */ + } else if (0 == vub300->mmc->card->sdio_funcs) { + strncpy(vub300->vub_name, "SD memory device", + sizeof(vub300->vub_name)); + } else { + download_offload_pseudocode(vub300); + } + send_command(vub300); + if (!data) + data_length = 0; + else if (MMC_DATA_READ & data->flags) + data_length = __command_read_data(vub300, cmd, data); + else + data_length = __command_write_data(vub300, cmd, data); + __vub300_command_response(vub300, cmd, data, data_length); + vub300->req = NULL; + vub300->cmd = NULL; + vub300->data = NULL; + if (cmd->error) { + if (cmd->error == -ENOMEDIUM) + check_vub300_port_status(vub300); + mutex_unlock(&vub300->cmd_mutex); + mmc_request_done(vub300->mmc, req); + kref_put(&vub300->kref, vub300_delete); + return; + } else { + construct_request_response(vub300, cmd); + vub300->resp_len = 0; + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + mmc_request_done(vub300->mmc, req); + return; + } + } +} + +static int examine_cyclic_buffer(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, u8 Function) +{ + /* cmd_mutex is held by vub300_mmc_request */ + u8 cmd0 = 0xFF & (cmd->arg >> 24); + u8 cmd1 = 0xFF & (cmd->arg >> 16); + u8 cmd2 = 0xFF & (cmd->arg >> 8); + u8 cmd3 = 0xFF & (cmd->arg >> 0); + int first = MAXREGMASK & vub300->fn[Function].offload_point; + struct offload_registers_access *rf = &vub300->fn[Function].reg[first]; + if (cmd0 == rf->command_byte[0] && + cmd1 == rf->command_byte[1] && + cmd2 == rf->command_byte[2] && + cmd3 == rf->command_byte[3]) { + u8 checksum = 0x00; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (rf->Respond_Byte[0] << 24) + | (rf->Respond_Byte[1] << 16) + | (rf->Respond_Byte[2] << 8) + | (rf->Respond_Byte[3] << 0); + vub300->fn[Function].offload_point += 1; + vub300->fn[Function].offload_count -= 1; + vub300->total_offload_count -= 1; + return 1; + } else { + int delta = 1; /* because it does not match the first one */ + u8 register_count = vub300->fn[Function].offload_count - 1; + u32 register_point = vub300->fn[Function].offload_point + 1; + while (0 < register_count) { + int point = MAXREGMASK & register_point; + struct offload_registers_access *r = + &vub300->fn[Function].reg[point]; + if (cmd0 == r->command_byte[0] && + cmd1 == r->command_byte[1] && + cmd2 == r->command_byte[2] && + cmd3 == r->command_byte[3]) { + u8 checksum = 0x00; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (r->Respond_Byte[0] << 24) + | (r->Respond_Byte[1] << 16) + | (r->Respond_Byte[2] << 8) + | (r->Respond_Byte[3] << 0); + vub300->fn[Function].offload_point += delta; + vub300->fn[Function].offload_count -= delta; + vub300->total_offload_count -= delta; + return 1; + } else { + register_point += 1; + register_count -= 1; + delta += 1; + continue; + } + } + return 0; + } +} + +static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd) +{ + /* cmd_mutex is held by vub300_mmc_request */ + u8 regs = vub300->dynamic_register_count; + u8 i = 0; + u8 func = FUN(cmd); + u32 reg = REG(cmd); + while (0 < regs--) { + if ((vub300->sdio_register[i].func_num == func) && + (vub300->sdio_register[i].sdio_reg == reg)) { + if (!vub300->sdio_register[i].prepared) { + return 0; + } else if ((0x80000000 & cmd->arg) == 0x80000000) { + /* + * a write to a dynamic register + * nullifies our offloaded value + */ + vub300->sdio_register[i].prepared = 0; + return 0; + } else { + u8 checksum = 0x00; + u8 rsp0 = 0x00; + u8 rsp1 = 0x00; + u8 rsp2 = vub300->sdio_register[i].response; + u8 rsp3 = vub300->sdio_register[i].regvalue; + vub300->sdio_register[i].prepared = 0; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (rsp0 << 24) + | (rsp1 << 16) + | (rsp2 << 8) + | (rsp3 << 0); + return 1; + } + } else { + i += 1; + continue; + } + }; + if (vub300->total_offload_count == 0) + return 0; + else if (vub300->fn[func].offload_count == 0) + return 0; + else + return examine_cyclic_buffer(vub300, cmd, func); +} + +static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ /* NOT irq */ + struct mmc_command *cmd = req->cmd; + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) { + cmd->error = -ESHUTDOWN; + mmc_request_done(mmc, req); + return; + } else { + struct mmc_data *data = req->data; + if (!vub300->card_powered) { + cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, req); + return; + } + if (!vub300->card_present) { + cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, req); + return; + } + if (vub300->usb_transport_fail) { + cmd->error = vub300->usb_transport_fail; + mmc_request_done(mmc, req); + return; + } + if (!vub300->interface) { + cmd->error = -ENODEV; + mmc_request_done(mmc, req); + return; + } + kref_get(&vub300->kref); + mutex_lock(&vub300->cmd_mutex); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + /* + * for performance we have to return immediately + * if the requested data has been offloaded + */ + if (cmd->opcode == 52 && + satisfy_request_from_offloaded_data(vub300, cmd)) { + cmd->error = 0; + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + mmc_request_done(mmc, req); + return; + } else { + vub300->cmd = cmd; + vub300->req = req; + vub300->data = data; + if (data) + vub300->datasize = data->blksz * data->blocks; + else + vub300->datasize = 0; + vub300_queue_cmnd_work(vub300); + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + /* + * the kernel lock diagnostics complain + * if the cmd_mutex * is "passed on" + * to the cmndwork thread, + * so we must release it now + * and re-acquire it in the cmndwork thread + */ + } + } +} + +static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], + struct mmc_ios *ios) +{ + int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */ + int retval; + u32 kHzClock; + if (ios->clock >= 48000000) + kHzClock = 48000; + else if (ios->clock >= 24000000) + kHzClock = 24000; + else if (ios->clock >= 20000000) + kHzClock = 20000; + else if (ios->clock >= 15000000) + kHzClock = 15000; + else if (ios->clock >= 200000) + kHzClock = 200; + else + kHzClock = 0; + { + int i; + u64 c = kHzClock; + for (i = 0; i < buf_array_size; i++) { + buf[i] = c; + c >>= 8; + } + } + retval = + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_CLOCK_SPEED, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x00, 0x00, buf, buf_array_size, HZ); + if (retval != 8) { + dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" + " %dkHz failed with retval=%d\n", kHzClock, retval); + } else { + dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED" + " %dkHz\n", kHzClock); + } +} + +static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) + return; + kref_get(&vub300->kref); + mutex_lock(&vub300->cmd_mutex); + if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) { + vub300->card_powered = 0; + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_SD_POWER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, NULL, 0, HZ); + /* must wait for the VUB300 u-proc to boot up */ + msleep(600); + } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_SD_POWER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0001, 0x0000, NULL, 0, HZ); + msleep(600); + vub300->card_powered = 1; + } else if (ios->power_mode == MMC_POWER_ON) { + u8 *buf = kmalloc(8, GFP_KERNEL); + if (buf) { + __set_clock_speed(vub300, buf, ios); + kfree(buf); + } + } else { + /* this should mean no change of state */ + } + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static int vub300_mmc_get_ro(struct mmc_host *mmc) +{ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + return vub300->read_only; +} + +static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) + return; + kref_get(&vub300->kref); + if (enable) { + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued -= 1; + mmc_signal_sdio_irq(vub300->mmc); + } else if (vub300->irq_disabled) { + vub300->irq_disabled = 0; + vub300->irq_enabled = 1; + vub300_queue_poll_work(vub300, 0); + } else if (vub300->irq_enabled) { + /* this should not happen, so we will just ignore it */ + } else { + vub300->irq_enabled = 1; + vub300_queue_poll_work(vub300, 0); + } + mutex_unlock(&vub300->irq_mutex); + } else { + vub300->irq_enabled = 0; + } + kref_put(&vub300->kref, vub300_delete); +} + +void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); +} + +static struct mmc_host_ops vub300_mmc_ops = { + .request = vub300_mmc_request, + .set_ios = vub300_mmc_set_ios, + .get_ro = vub300_mmc_get_ro, + .enable_sdio_irq = vub300_enable_sdio_irq, + .init_card = vub300_init_card, +}; + +static int vub300_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = NULL; + struct usb_host_interface *iface_desc; + struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); + int i; + int retval = -ENOMEM; + struct urb *command_out_urb; + struct urb *command_res_urb; + struct mmc_host *mmc; + char manufacturer[48]; + char product[32]; + char serial_number[32]; + usb_string(udev, udev->descriptor.iManufacturer, manufacturer, + sizeof(manufacturer)); + usb_string(udev, udev->descriptor.iProduct, product, sizeof(product)); + usb_string(udev, udev->descriptor.iSerialNumber, serial_number, + sizeof(serial_number)); + dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n", + udev->descriptor.idVendor, udev->descriptor.idProduct, + manufacturer, product, serial_number); + command_out_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!command_out_urb) { + retval = -ENOMEM; + dev_err(&vub300->udev->dev, + "not enough memory for the command_out_urb\n"); + goto error0; + } + command_res_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!command_res_urb) { + retval = -ENOMEM; + dev_err(&vub300->udev->dev, + "not enough memory for the command_res_urb\n"); + goto error1; + } + /* this also allocates memory for our VUB300 mmc host device */ + mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); + if (!mmc) { + retval = -ENOMEM; + dev_err(&vub300->udev->dev, + "not enough memory for the mmc_host\n"); + goto error4; + } + /* MMC core transfer sizes tunable parameters */ + mmc->caps = 0; + if (!force_1_bit_data_xfers) + mmc->caps |= MMC_CAP_4_BIT_DATA; + if (!force_polling_for_irqs) + mmc->caps |= MMC_CAP_SDIO_IRQ; + mmc->caps &= ~MMC_CAP_NEEDS_POLL; + /* + * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll + * for devices which results in spurious CMD7's being + * issued which stops some SDIO cards from working + */ + if (limit_speed_to_24_MHz) { + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + mmc->f_max = 24000000; + dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n"); + } else { + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + mmc->f_max = 48000000; + } + mmc->f_min = 200000; + mmc->max_blk_count = 511; + mmc->max_blk_size = 512; + mmc->max_segs = 128; + if (force_max_req_size) + mmc->max_req_size = force_max_req_size * 1024; + else + mmc->max_req_size = 64 * 1024; + mmc->max_seg_size = mmc->max_req_size; + mmc->ocr_avail = 0; + mmc->ocr_avail |= MMC_VDD_165_195; + mmc->ocr_avail |= MMC_VDD_20_21; + mmc->ocr_avail |= MMC_VDD_21_22; + mmc->ocr_avail |= MMC_VDD_22_23; + mmc->ocr_avail |= MMC_VDD_23_24; + mmc->ocr_avail |= MMC_VDD_24_25; + mmc->ocr_avail |= MMC_VDD_25_26; + mmc->ocr_avail |= MMC_VDD_26_27; + mmc->ocr_avail |= MMC_VDD_27_28; + mmc->ocr_avail |= MMC_VDD_28_29; + mmc->ocr_avail |= MMC_VDD_29_30; + mmc->ocr_avail |= MMC_VDD_30_31; + mmc->ocr_avail |= MMC_VDD_31_32; + mmc->ocr_avail |= MMC_VDD_32_33; + mmc->ocr_avail |= MMC_VDD_33_34; + mmc->ocr_avail |= MMC_VDD_34_35; + mmc->ocr_avail |= MMC_VDD_35_36; + mmc->ops = &vub300_mmc_ops; + vub300 = mmc_priv(mmc); + vub300->mmc = mmc; + vub300->card_powered = 0; + vub300->bus_width = 0; + vub300->cmnd.head.block_size[0] = 0x00; + vub300->cmnd.head.block_size[1] = 0x00; + vub300->app_spec = 0; + mutex_init(&vub300->cmd_mutex); + mutex_init(&vub300->irq_mutex); + vub300->command_out_urb = command_out_urb; + vub300->command_res_urb = command_res_urb; + vub300->usb_timed_out = 0; + vub300->dynamic_register_count = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) { + vub300->fn[i].offload_point = 0; + vub300->fn[i].offload_count = 0; + } + + vub300->total_offload_count = 0; + vub300->irq_enabled = 0; + vub300->irq_disabled = 0; + vub300->irqs_queued = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++) + vub300->sdio_register[i++].activate = 0; + + vub300->udev = udev; + vub300->interface = interface; + vub300->cmnd_res_ep = 0; + vub300->cmnd_out_ep = 0; + vub300->data_inp_ep = 0; + vub300->data_out_ep = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) + vub300->fbs[i] = 512; + + /* + * set up the endpoint information + * + * use the first pair of bulk-in and bulk-out + * endpoints for Command/Response+Interrupt + * + * use the second pair of bulk-in and bulk-out + * endpoints for Data In/Out + */ + vub300->large_usb_packets = 0; + iface_desc = interface->cur_altsetting; + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + struct usb_endpoint_descriptor *endpoint = + &iface_desc->endpoint[i].desc; + dev_info(&vub300->udev->dev, + "vub300 testing %s EndPoint(%d) %02X\n", + usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" : + usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" : + "UNKNOWN", i, endpoint->bEndpointAddress); + if (endpoint->wMaxPacketSize > 64) + vub300->large_usb_packets = 1; + if (usb_endpoint_is_bulk_in(endpoint)) { + if (!vub300->cmnd_res_ep) { + vub300->cmnd_res_ep = + endpoint->bEndpointAddress; + } else if (!vub300->data_inp_ep) { + vub300->data_inp_ep = + endpoint->bEndpointAddress; + } else { + dev_warn(&vub300->udev->dev, + "ignoring" + " unexpected bulk_in endpoint"); + } + } else if (usb_endpoint_is_bulk_out(endpoint)) { + if (!vub300->cmnd_out_ep) { + vub300->cmnd_out_ep = + endpoint->bEndpointAddress; + } else if (!vub300->data_out_ep) { + vub300->data_out_ep = + endpoint->bEndpointAddress; + } else { + dev_warn(&vub300->udev->dev, + "ignoring" + " unexpected bulk_out endpoint"); + } + } else { + dev_warn(&vub300->udev->dev, + "vub300 ignoring EndPoint(%d) %02X", i, + endpoint->bEndpointAddress); + } + } + if (vub300->cmnd_res_ep && vub300->cmnd_out_ep && + vub300->data_inp_ep && vub300->data_out_ep) { + dev_info(&vub300->udev->dev, + "vub300 %s packets" + " using EndPoints %02X %02X %02X %02X\n", + vub300->large_usb_packets ? "LARGE" : "SMALL", + vub300->cmnd_out_ep, vub300->cmnd_res_ep, + vub300->data_out_ep, vub300->data_inp_ep); + /* we have the expected EndPoints */ + } else { + dev_err(&vub300->udev->dev, + "Could not find two sets of bulk-in/out endpoint pairs\n"); + retval = -EINVAL; + goto error5; + } + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_HC_INF0, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->hc_info, + sizeof(vub300->hc_info), HZ); + if (retval < 0) + goto error5; + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + SET_ROM_WAIT_STATES, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + firmware_rom_wait_states, 0x0000, NULL, 0, HZ); + if (retval < 0) + goto error5; + dev_info(&vub300->udev->dev, + "operating_mode = %s %s %d MHz %s %d byte USB packets\n", + (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL", + (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit", + mmc->f_max / 1000000, + pad_input_to_usb_pkt ? "padding input data to" : "with", + vub300->large_usb_packets ? 512 : 64); + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_SYSTEM_PORT_STATUS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->system_port_status, + sizeof(vub300->system_port_status), HZ); + if (retval < 0) { + goto error4; + } else if (sizeof(vub300->system_port_status) == retval) { + vub300->card_present = + (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; + vub300->read_only = + (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; + } else { + goto error4; + } + usb_set_intfdata(interface, vub300); + INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); + INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); + INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); + kref_init(&vub300->kref); + init_timer(&vub300->sg_transfer_timer); + vub300->sg_transfer_timer.data = (unsigned long)vub300; + vub300->sg_transfer_timer.function = vub300_sg_timed_out; + kref_get(&vub300->kref); + init_timer(&vub300->inactivity_timer); + vub300->inactivity_timer.data = (unsigned long)vub300; + vub300->inactivity_timer.function = vub300_inactivity_timer_expired; + vub300->inactivity_timer.expires = jiffies + HZ; + add_timer(&vub300->inactivity_timer); + if (vub300->card_present) + dev_info(&vub300->udev->dev, + "USB vub300 remote SDIO host controller[%d]" + "connected with SD/SDIO card inserted\n", + interface_to_InterfaceNumber(interface)); + else + dev_info(&vub300->udev->dev, + "USB vub300 remote SDIO host controller[%d]" + "connected with no SD/SDIO card inserted\n", + interface_to_InterfaceNumber(interface)); + mmc_add_host(mmc); + return 0; +error5: + mmc_free_host(mmc); + /* + * and hence also frees vub300 + * which is contained at the end of struct mmc + */ +error4: + usb_free_urb(command_out_urb); +error1: + usb_free_urb(command_res_urb); +error0: + return retval; +} + +static void vub300_disconnect(struct usb_interface *interface) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(interface); + if (!vub300 || !vub300->mmc) { + return; + } else { + struct mmc_host *mmc = vub300->mmc; + if (!vub300->mmc) { + return; + } else { + int ifnum = interface_to_InterfaceNumber(interface); + usb_set_intfdata(interface, NULL); + /* prevent more I/O from starting */ + vub300->interface = NULL; + kref_put(&vub300->kref, vub300_delete); + mmc_remove_host(mmc); + pr_info("USB vub300 remote SDIO host controller[%d]" + " now disconnected", ifnum); + return; + } + } +} + +#ifdef CONFIG_PM +static int vub300_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + if (!vub300 || !vub300->mmc) { + return 0; + } else { + struct mmc_host *mmc = vub300->mmc; + mmc_suspend_host(mmc); + return 0; + } +} + +static int vub300_resume(struct usb_interface *intf) +{ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + if (!vub300 || !vub300->mmc) { + return 0; + } else { + struct mmc_host *mmc = vub300->mmc; + mmc_resume_host(mmc); + return 0; + } +} +#else +#define vub300_suspend NULL +#define vub300_resume NULL +#endif +static int vub300_pre_reset(struct usb_interface *intf) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + mutex_lock(&vub300->cmd_mutex); + return 0; +} + +static int vub300_post_reset(struct usb_interface *intf) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + /* we are sure no URBs are active - no locking needed */ + vub300->errors = -EPIPE; + mutex_unlock(&vub300->cmd_mutex); + return 0; +} + +static struct usb_driver vub300_driver = { + .name = "vub300", + .probe = vub300_probe, + .disconnect = vub300_disconnect, + .suspend = vub300_suspend, + .resume = vub300_resume, + .pre_reset = vub300_pre_reset, + .post_reset = vub300_post_reset, + .id_table = vub300_table, + .supports_autosuspend = 1, +}; + +static int __init vub300_init(void) +{ /* NOT irq */ + int result; + + pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X", + firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout); + cmndworkqueue = create_singlethread_workqueue("kvub300c"); + if (!cmndworkqueue) { + pr_err("not enough memory for the REQUEST workqueue"); + result = -ENOMEM; + goto out1; + } + pollworkqueue = create_singlethread_workqueue("kvub300p"); + if (!pollworkqueue) { + pr_err("not enough memory for the IRQPOLL workqueue"); + result = -ENOMEM; + goto out2; + } + deadworkqueue = create_singlethread_workqueue("kvub300d"); + if (!deadworkqueue) { + pr_err("not enough memory for the EXPIRED workqueue"); + result = -ENOMEM; + goto out3; + } + result = usb_register(&vub300_driver); + if (result) { + pr_err("usb_register failed. Error number %d", result); + goto out4; + } + return 0; +out4: + destroy_workqueue(deadworkqueue); +out3: + destroy_workqueue(pollworkqueue); +out2: + destroy_workqueue(cmndworkqueue); +out1: + return result; +} + +static void __exit vub300_exit(void) +{ + usb_deregister(&vub300_driver); + flush_workqueue(cmndworkqueue); + flush_workqueue(pollworkqueue); + flush_workqueue(deadworkqueue); + destroy_workqueue(cmndworkqueue); + destroy_workqueue(pollworkqueue); + destroy_workqueue(deadworkqueue); +} + +module_init(vub300_init); +module_exit(vub300_exit); + +MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>"); +MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver"); +MODULE_LICENSE("GPL"); |