diff options
Diffstat (limited to 'drivers/spi')
50 files changed, 18273 insertions, 3887 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 2c733c27db2..1906840c111 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -100,6 +100,36 @@ config SPI_BUTTERFLY inexpensive battery powered microcontroller evaluation board. This same cable can be used to flash new firmware. +config SPI_COLDFIRE_QSPI + tristate "Freescale Coldfire QSPI controller" + depends on (M520x || M523x || M5249 || M527x || M528x || M532x) + help + This enables support for the Coldfire QSPI controller in master + mode. + + This driver can also be built as a module. If so, the module + will be called coldfire_qspi. + +config SPI_DAVINCI + tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" + depends on SPI_MASTER && ARCH_DAVINCI + select SPI_BITBANG + help + SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. + + This driver can also be built as a module. The module will be called + davinci_spi. + +config SPI_EP93XX + tristate "Cirrus Logic EP93xx SPI controller" + depends on ARCH_EP93XX + help + This enables using the Cirrus EP93xx SPI controller in master + mode. + + To compile this driver as a module, choose M here. The module will be + called ep93xx_spi. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GENERIC_GPIO @@ -116,11 +146,28 @@ config SPI_GPIO GPIO operations, you should be able to leverage that for better speed with a custom version of this driver; see the source code. +config SPI_IMX_VER_IMX1 + def_bool y if SOC_IMX1 + +config SPI_IMX_VER_0_0 + def_bool y if SOC_IMX21 || SOC_IMX27 + +config SPI_IMX_VER_0_4 + def_bool y if ARCH_MX31 + +config SPI_IMX_VER_0_7 + def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 + +config SPI_IMX_VER_2_3 + def_bool y if ARCH_MX51 + config SPI_IMX - tristate "Freescale iMX SPI controller" - depends on ARCH_MX1 && EXPERIMENTAL + tristate "Freescale i.MX SPI controllers" + depends on ARCH_MXC + select SPI_BITBANG + default m if IMX_HAVE_PLATFORM_SPI_IMX help - This enables using the Freescale iMX SPI controller in master + This enables using the Freescale i.MX SPI controllers in master mode. config SPI_LM70_LLP @@ -132,6 +179,14 @@ config SPI_LM70_LLP which interfaces to an LM70 temperature sensor using a parallel port. +config SPI_MPC52xx + tristate "Freescale MPC52xx SPI (non-PSC) controller support" + depends on PPC_MPC52xx && SPI + select SPI_MASTER_OF + help + This drivers supports the MPC52xx SPI controller in master SPI + mode. + config SPI_MPC52xx_PSC tristate "Freescale MPC52xx PSC SPI controller" depends on PPC_MPC52xx && EXPERIMENTAL @@ -139,15 +194,34 @@ config SPI_MPC52xx_PSC This enables using the Freescale MPC52xx Programmable Serial Controller in master SPI mode. -config SPI_MPC8xxx - tristate "Freescale MPC8xxx SPI controller" +config SPI_MPC512x_PSC + tristate "Freescale MPC512x PSC SPI controller" + depends on SPI_MASTER && PPC_MPC512x + help + This enables using the Freescale MPC5121 Programmable Serial + Controller in SPI master mode. + +config SPI_FSL_LIB + tristate + depends on FSL_SOC + +config SPI_FSL_SPI + tristate "Freescale SPI controller" depends on FSL_SOC + select SPI_FSL_LIB help - This enables using the Freescale MPC8xxx SPI controllers in master - mode. + This enables using the Freescale SPI controllers in master mode. + MPC83xx platform uses the controller in cpu mode or CPM/QE mode. + MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. - This driver uses a simple set of shift registers for data (opposed - to the CPM based descriptor model). +config SPI_FSL_ESPI + tristate "Freescale eSPI controller" + depends on FSL_SOC + select SPI_FSL_LIB + help + This enables using the Freescale eSPI controllers in master mode. + From MPC8536, 85xx platform uses the controller, and all P10xx, + P20xx, P30xx,P40xx, P50xx uses this controller. config SPI_OMAP_UWIRE tristate "OMAP1 MicroWire" @@ -157,12 +231,18 @@ config SPI_OMAP_UWIRE This hooks up to the MicroWire controller on OMAP1 chips. config SPI_OMAP24XX - tristate "McSPI driver for OMAP24xx/OMAP34xx" - depends on ARCH_OMAP24XX || ARCH_OMAP34XX + tristate "McSPI driver for OMAP" + depends on ARCH_OMAP2PLUS help - SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI + SPI master controller for OMAP24XX and later Multichannel SPI (McSPI) modules. +config SPI_OMAP_100K + tristate "OMAP SPI 100K" + depends on SPI_MASTER && (ARCH_OMAP850 || ARCH_OMAP730) + help + OMAP SPI 100K master controller for omap7xx boards. + config SPI_ORION tristate "Orion SPI master (EXPERIMENTAL)" depends on PLAT_ORION && EXPERIMENTAL @@ -173,19 +253,32 @@ config SPI_PL022 tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)" depends on ARM_AMBA && EXPERIMENTAL default y if MACH_U300 + default y if ARCH_REALVIEW + default y if INTEGRATOR_IMPD1 + default y if ARCH_VERSATILE help This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP controller. If you have an embedded system with an AMBA(R) bus and a PL022 controller, say Y or M here. +config SPI_PPC4xx + tristate "PPC4xx SPI Controller" + depends on PPC32 && 4xx && SPI_MASTER + select SPI_BITBANG + help + This selects a driver for the PPC4xx SPI Controller. + config SPI_PXA2XX tristate "PXA2xx SSP SPI master" - depends on ARCH_PXA && EXPERIMENTAL - select PXA_SSP + depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL + select PXA_SSP if ARCH_PXA help - This enables using a PXA2xx SSP port as a SPI master controller. - The driver can be configured to use any SSP port and additional - documentation can be found a Documentation/spi/pxa2xx. + This enables using a PXA2xx or Sodaville SSP port as a SPI master + controller. The driver can be configured to use any SSP port and + additional documentation can be found a Documentation/spi/pxa2xx. + +config SPI_PXA2XX_PCI + def_bool SPI_PXA2XX && X86_32 && PCI config SPI_S3C24XX tristate "Samsung S3C24XX series SPI" @@ -194,6 +287,17 @@ config SPI_S3C24XX help SPI driver for Samsung S3C24XX series ARM SoCs +config SPI_S3C24XX_FIQ + bool "S3C24XX driver with FIQ pseudo-DMA" + depends on SPI_S3C24XX + select FIQ + help + Enable FIQ support for the S3C24XX SPI driver to provide pseudo + DMA by using the fast-interrupt request framework, This allows + the driver to get DMA-like performance when there are either + no free DMA channels, or when doing transfers that required both + TX and RX data paths. + config SPI_S3C24XX_GPIO tristate "Samsung S3C24XX series SPI by GPIO" depends on ARCH_S3C2410 && EXPERIMENTAL @@ -204,6 +308,20 @@ config SPI_S3C24XX_GPIO the inbuilt hardware cannot provide the transfer mode, or where the board is using non hardware connected pins. +config SPI_S3C64XX + tristate "Samsung S3C64XX series type SPI" + depends on ARCH_S3C64XX && EXPERIMENTAL + select S3C64XX_DMA + help + SPI driver for Samsung S3C64XX and newer SoCs. + +config SPI_SH_MSIOF + tristate "SuperH MSIOF SPI controller" + depends on SUPERH && HAVE_CLK + select SPI_BITBANG + help + SPI driver for SuperH MSIOF blocks. + config SPI_SH_SCI tristate "SuperH SCI SPI controller" depends on SUPERH @@ -211,6 +329,26 @@ config SPI_SH_SCI help SPI driver for SuperH SCI blocks. +config SPI_STMP3XXX + tristate "Freescale STMP37xx/378x SPI/SSP controller" + depends on ARCH_STMP3XXX && SPI_MASTER + help + SPI driver for Freescale STMP37xx/378x SoC SSP interface + +config SPI_TEGRA + tristate "Nvidia Tegra SPI controller" + depends on ARCH_TEGRA + select TEGRA_SYSTEM_DMA + help + SPI driver for NVidia Tegra SoCs + +config SPI_TOPCLIFF_PCH + tristate "Topcliff PCH SPI Controller" + depends on PCI + help + SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus + used in some x86 embedded processors. + config SPI_TXX9 tristate "Toshiba TXx9 SPI controller" depends on GENERIC_GPIO && CPU_TX49XX @@ -218,8 +356,8 @@ config SPI_TXX9 SPI driver for Toshiba TXx9 MIPS SoCs config SPI_XILINX - tristate "Xilinx SPI controller" - depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL + tristate "Xilinx SPI controller common module" + depends on HAS_IOMEM && EXPERIMENTAL select SPI_BITBANG help This exposes the SPI controller IP from the Xilinx EDK. @@ -227,10 +365,37 @@ config SPI_XILINX See the "OPB Serial Peripheral Interface (SPI) (v1.00e)" Product Specification document (DS464) for hardware details. + Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)" + +config SPI_NUC900 + tristate "Nuvoton NUC900 series SPI" + depends on ARCH_W90X900 && EXPERIMENTAL + select SPI_BITBANG + help + SPI driver for Nuvoton NUC900 series ARM SoCs + # # Add new SPI master controllers in alphabetical order above this line # +config SPI_DESIGNWARE + tristate "DesignWare SPI controller core support" + depends on SPI_MASTER + help + general driver for SPI controller core from DesignWare + +config SPI_DW_PCI + tristate "PCI interface driver for DW SPI core" + depends on SPI_DESIGNWARE && PCI + +config SPI_DW_MID_DMA + bool "DMA support for DW SPI controller on Intel Moorestown platform" + depends on SPI_DW_PCI && INTEL_MID_DMAC + +config SPI_DW_MMIO + tristate "Memory-mapped io interface driver for DW SPI core" + depends on SPI_DESIGNWARE && HAVE_CLK + # # There are lots of SPI device types, with sensors and memory # being probably the most widely used ones. diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 3de408d294b..3a42463c92a 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -2,9 +2,7 @@ # Makefile for kernel SPI drivers. # -ifeq ($(CONFIG_SPI_DEBUG),y) -EXTRA_CFLAGS += -DDEBUG -endif +ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG # small core, mostly translating board-specific # config declarations into driver model code @@ -16,21 +14,46 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o obj-$(CONFIG_SPI_AU1550) += au1550_spi.o obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o +obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o +obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o +obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o +obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o +dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o +obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o +obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o obj-$(CONFIG_SPI_GPIO) += spi_gpio.o obj-$(CONFIG_SPI_IMX) += spi_imx.o obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o +obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o +obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o obj-$(CONFIG_SPI_ORION) += orion_spi.o obj-$(CONFIG_SPI_PL022) += amba-pl022.o +obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o -obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o +obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o +obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o +obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o +obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o +obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o -obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o +obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o +obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o +obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o +obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o obj-$(CONFIG_SPI_TXX9) += spi_txx9.o obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o +obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o +obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o +obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o + +# special build for s3c24xx spi driver with fiq support +spi_s3c24xx_hw-y := spi_s3c24xx.o +spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o + # ... add above this line ... # SPI protocol drivers (device/link on bus) diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index da76797ce8b..a2a5921c730 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -27,7 +27,6 @@ /* * TODO: * - add timeout on polled transfers - * - add generic DMA framework support */ #include <linux/init.h> @@ -38,14 +37,16 @@ #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> -#include <linux/errno.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <linux/io.h> -#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> /* * This macro is used to define some register default values. @@ -103,13 +104,21 @@ /* * SSP Control Register 0 - SSP_CR0 */ -#define SSP_CR0_MASK_DSS (0x1FUL << 0) -#define SSP_CR0_MASK_HALFDUP (0x1UL << 5) +#define SSP_CR0_MASK_DSS (0x0FUL << 0) +#define SSP_CR0_MASK_FRF (0x3UL << 4) #define SSP_CR0_MASK_SPO (0x1UL << 6) #define SSP_CR0_MASK_SPH (0x1UL << 7) #define SSP_CR0_MASK_SCR (0xFFUL << 8) -#define SSP_CR0_MASK_CSS (0x1FUL << 16) -#define SSP_CR0_MASK_FRF (0x3UL << 21) + +/* + * The ST version of this block moves som bits + * in SSP_CR0 and extends it to 32 bits + */ +#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) +#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) +#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) +#define SSP_CR0_MASK_FRF_ST (0x3UL << 21) + /* * SSP Control Register 0 - SSP_CR1 @@ -118,16 +127,18 @@ #define SSP_CR1_MASK_SSE (0x1UL << 1) #define SSP_CR1_MASK_MS (0x1UL << 2) #define SSP_CR1_MASK_SOD (0x1UL << 3) -#define SSP_CR1_MASK_RENDN (0x1UL << 4) -#define SSP_CR1_MASK_TENDN (0x1UL << 5) -#define SSP_CR1_MASK_MWAIT (0x1UL << 6) -#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7) -#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10) /* - * SSP Data Register - SSP_DR + * The ST version of this block adds some bits + * in SSP_CR1 */ -#define SSP_DR_MASK_DATA 0xFFFFFFFF +#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) +#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) +#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) +#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) +#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) +/* This one is only in the PL023 variant */ +#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) /* * SSP Status Register - SSP_SR @@ -135,7 +146,7 @@ #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ -#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ +#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ /* @@ -228,7 +239,7 @@ /* * SSP Test Data Register - SSP_TDR */ -#define TDR_MASK_TESTDATA (0xFFFFFFFF) +#define TDR_MASK_TESTDATA (0xFFFFFFFF) /* * Message State @@ -236,33 +247,28 @@ * hold a single state value, that's why all this * (void *) casting is done here. */ -#define STATE_START ((void *) 0) -#define STATE_RUNNING ((void *) 1) -#define STATE_DONE ((void *) 2) -#define STATE_ERROR ((void *) -1) +#define STATE_START ((void *) 0) +#define STATE_RUNNING ((void *) 1) +#define STATE_DONE ((void *) 2) +#define STATE_ERROR ((void *) -1) /* - * Queue State - */ -#define QUEUE_RUNNING (0) -#define QUEUE_STOPPED (1) -/* * SSP State - Whether Enabled or Disabled */ -#define SSP_DISABLED (0) -#define SSP_ENABLED (1) +#define SSP_DISABLED (0) +#define SSP_ENABLED (1) /* * SSP DMA State - Whether DMA Enabled or Disabled */ -#define SSP_DMA_DISABLED (0) -#define SSP_DMA_ENABLED (1) +#define SSP_DMA_DISABLED (0) +#define SSP_DMA_ENABLED (1) /* * SSP Clock Defaults */ -#define NMDK_SSP_DEFAULT_CLKRATE 0x2 -#define NMDK_SSP_DEFAULT_PRESCALE 0x40 +#define SSP_DEFAULT_CLKRATE 0x2 +#define SSP_DEFAULT_PRESCALE 0x40 /* * SSP Clock Parameter ranges @@ -308,16 +314,22 @@ enum ssp_writing { * @fifodepth: depth of FIFOs (both) * @max_bpw: maximum number of bits per word * @unidir: supports unidirection transfers + * @extended_cr: 32 bit wide control register 0 with extra + * features and extra features in CR1 as found in the ST variants + * @pl023: supports a subset of the ST extensions called "PL023" */ struct vendor_data { int fifodepth; int max_bpw; bool unidir; + bool extended_cr; + bool pl023; }; /** * struct pl022 - This is the private SSP driver data structure * @adev: AMBA device model hookup + * @vendor: Vendor data for the IP block * @phybase: The physical memory where the SSP device resides * @virtbase: The virtual memory where the SSP is mapped * @master: SPI framework hookup @@ -327,7 +339,7 @@ struct vendor_data { * @lock: spinlock to syncronise access to driver data * @workqueue: a workqueue on which any spi_message request is queued * @busy: workqueue is busy - * @run: workqueue is running + * @running: workqueue is running * @pump_transfers: Tasklet used in Interrupt Transfer mode * @cur_msg: Pointer to current spi_message being processed * @cur_transfer: Pointer to current spi_transfer @@ -352,8 +364,8 @@ struct pl022 { struct work_struct pump_messages; spinlock_t queue_lock; struct list_head queue; - int busy; - int run; + bool busy; + bool running; /* Message transfer pump */ struct tasklet_struct pump_transfers; struct spi_message *cur_msg; @@ -365,11 +377,21 @@ struct pl022 { void *rx_end; enum ssp_reading read; enum ssp_writing write; + u32 exp_fifo_level; + /* DMA settings */ +#ifdef CONFIG_DMA_ENGINE + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + struct sg_table sgt_rx; + struct sg_table sgt_tx; + char *dummypage; +#endif }; /** * struct chip_data - To maintain runtime state of SSP for each client chip - * @cr0: Value of control register CR0 of SSP + * @cr0: Value of control register CR0 of SSP - on later ST variants this + * register is 32 bits wide rather than just 16 * @cr1: Value of control register CR1 of SSP * @dmacr: Value of DMA control Register of SSP * @cpsr: Value of Clock prescale register @@ -384,12 +406,12 @@ struct pl022 { * This would be set according to the current message that would be served */ struct chip_data { - u16 cr0; + u32 cr0; u16 cr1; u16 dmacr; u16 cpsr; u8 n_bytes; - u8 enable_dma:1; + bool enable_dma; enum ssp_reading read; enum ssp_writing write; void (*cs_control) (u32 command); @@ -486,8 +508,9 @@ static void giveback(struct pl022 *pl022) msg->state = NULL; if (msg->complete) msg->complete(msg->context); - /* This message is completed, so let's turn off the clock! */ + /* This message is completed, so let's turn off the clocks! */ clk_disable(pl022->clk); + amba_pclk_disable(pl022->adev); } /** @@ -503,6 +526,9 @@ static int flush(struct pl022 *pl022) while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) readw(SSP_DR(pl022->virtbase)); } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); + + pl022->exp_fifo_level = 0; + return limit; } @@ -514,7 +540,10 @@ static void restore_state(struct pl022 *pl022) { struct chip_data *chip = pl022->cur_chip; - writew(chip->cr0, SSP_CR0(pl022->virtbase)); + if (pl022->vendor->extended_cr) + writel(chip->cr0, SSP_CR0(pl022->virtbase)); + else + writew(chip->cr0, SSP_CR0(pl022->virtbase)); writew(chip->cr1, SSP_CR1(pl022->virtbase)); writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); @@ -522,38 +551,70 @@ static void restore_state(struct pl022 *pl022) writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } -/** - * load_ssp_default_config - Load default configuration for SSP - * @pl022: SSP driver private data structure - */ - /* * Default SSP Register Values */ #define DEFAULT_SSP_REG_CR0 ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ - GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ - GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \ - GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ - GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ - GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ +) + +/* ST versions have slightly different bit layout */ +#define DEFAULT_SSP_REG_CR0_ST ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ + GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ +) + +/* The PL023 version is slightly different again */ +#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ ) #define DEFAULT_SSP_REG_CR1 ( \ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ + GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ +) + +/* ST versions extend this register to use all 16 bits */ +#define DEFAULT_SSP_REG_CR1_ST ( \ + DEFAULT_SSP_REG_CR1 | \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ +) + +/* + * The PL023 variant has further differences: no loopback mode, no microwire + * support, and a new clock feedback delay setting. + */ +#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ + GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ - GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \ - GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \ - GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\ - GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \ - GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ + GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ ) #define DEFAULT_SSP_REG_CPSR ( \ - GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ + GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ ) #define DEFAULT_SSP_REG_DMACR (\ @@ -561,11 +622,22 @@ static void restore_state(struct pl022 *pl022) GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ ) - +/** + * load_ssp_default_config - Load default configuration for SSP + * @pl022: SSP driver private data structure + */ static void load_ssp_default_config(struct pl022 *pl022) { - writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); - writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + if (pl022->vendor->pl023) { + writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); + } else if (pl022->vendor->extended_cr) { + writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); + } else { + writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + } writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); @@ -585,10 +657,9 @@ static void readwriter(struct pl022 *pl022) * errons in 8bit wide transfers on ARM variants (just 8 words * FIFO, means only 8x8 = 64 bits in FIFO) at least. * - * FIXME: currently we have no logic to account for this. - * perhaps there is even something broken in HW regarding - * 8bit transfers (it doesn't fail on 16bit) so this needs - * more investigation... + * To prevent this issue, the TX FIFO is only filled to the + * unused RX FIFO fill length, regardless of what the TX + * FIFO status flag indicates. */ dev_dbg(&pl022->adev->dev, "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", @@ -615,11 +686,12 @@ static void readwriter(struct pl022 *pl022) break; } pl022->rx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level--; } /* - * Write as much as you can, while keeping an eye on the RX FIFO! + * Write as much as possible up to the RX FIFO size */ - while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) + while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) && (pl022->tx < pl022->tx_end)) { switch (pl022->write) { case WRITING_NULL: @@ -636,6 +708,7 @@ static void readwriter(struct pl022 *pl022) break; } pl022->tx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level++; /* * This inner reader takes care of things appearing in the RX * FIFO as we're transmitting. This will happen a lot since the @@ -662,6 +735,7 @@ static void readwriter(struct pl022 *pl022) break; } pl022->rx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level--; } } /* @@ -694,6 +768,371 @@ static void *next_transfer(struct pl022 *pl022) } return STATE_DONE; } + +/* + * This DMA functionality is only compiled in if we have + * access to the generic DMA devices/DMA engine. + */ +#ifdef CONFIG_DMA_ENGINE +static void unmap_free_dma_scatter(struct pl022 *pl022) +{ + /* Unmap and free the SG tables */ + dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + sg_free_table(&pl022->sgt_rx); + sg_free_table(&pl022->sgt_tx); +} + +static void dma_callback(void *data) +{ + struct pl022 *pl022 = data; + struct spi_message *msg = pl022->cur_msg; + + BUG_ON(!pl022->sgt_rx.sgl); + +#ifdef VERBOSE_DEBUG + /* + * Optionally dump out buffers to inspect contents, this is + * good if you want to convince yourself that the loopback + * read/write contents are the same, when adopting to a new + * DMA engine. + */ + { + struct scatterlist *sg; + unsigned int i; + + dma_sync_sg_for_cpu(&pl022->adev->dev, + pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, + DMA_FROM_DEVICE); + + for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI RX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI TX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + } +#endif + + unmap_free_dma_scatter(pl022); + + /* Update total bytes transfered */ + msg->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip-> + cs_control(SSP_CHIP_DESELECT); + + /* Move to next transfer */ + msg->state = next_transfer(pl022); + tasklet_schedule(&pl022->pump_transfers); +} + +static void setup_dma_scatter(struct pl022 *pl022, + void *buffer, + unsigned int length, + struct sg_table *sgtab) +{ + struct scatterlist *sg; + int bytesleft = length; + void *bufp = buffer; + int mapbytes; + int i; + + if (buffer) { + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + /* + * If there are less bytes left than what fits + * in the current page (plus page alignment offset) + * we just feed in this, else we stuff in as much + * as we can. + */ + if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE - offset_in_page(bufp); + sg_set_page(sg, virt_to_page(bufp), + mapbytes, offset_in_page(bufp)); + bufp += mapbytes; + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX target page @ %p, %d bytes, %d left\n", + bufp, mapbytes, bytesleft); + } + } else { + /* Map the dummy buffer on every page */ + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + if (bytesleft < PAGE_SIZE) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE; + sg_set_page(sg, virt_to_page(pl022->dummypage), + mapbytes, 0); + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX to dummy page %d bytes, %d left\n", + mapbytes, bytesleft); + + } + } + BUG_ON(bytesleft); +} + +/** + * configure_dma - configures the channels for the next transfer + * @pl022: SSP driver's private data structure + */ +static int configure_dma(struct pl022 *pl022) +{ + struct dma_slave_config rx_conf = { + .src_addr = SSP_DR(pl022->phybase), + .direction = DMA_FROM_DEVICE, + .src_maxburst = pl022->vendor->fifodepth >> 1, + }; + struct dma_slave_config tx_conf = { + .dst_addr = SSP_DR(pl022->phybase), + .direction = DMA_TO_DEVICE, + .dst_maxburst = pl022->vendor->fifodepth >> 1, + }; + unsigned int pages; + int ret; + int rx_sglen, tx_sglen; + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + struct dma_async_tx_descriptor *rxdesc; + struct dma_async_tx_descriptor *txdesc; + dma_cookie_t cookie; + + /* Check that the channels are available */ + if (!rxchan || !txchan) + return -ENODEV; + + switch (pl022->read) { + case READING_NULL: + /* Use the same as for writing */ + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case READING_U8: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case READING_U16: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case READING_U32: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + } + + switch (pl022->write) { + case WRITING_NULL: + /* Use the same as for reading */ + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case WRITING_U8: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case WRITING_U16: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case WRITING_U32: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + } + + /* SPI pecularity: we need to read and write the same width */ + if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + rx_conf.src_addr_width = tx_conf.dst_addr_width; + if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + tx_conf.dst_addr_width = rx_conf.src_addr_width; + BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); + + rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, + (unsigned long) &rx_conf); + txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, + (unsigned long) &tx_conf); + + /* Create sglists for the transfers */ + pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; + dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); + + ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_rx_sg; + + ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_tx_sg; + + /* Fill in the scatterlists for the RX+TX buffers */ + setup_dma_scatter(pl022, pl022->rx, + pl022->cur_transfer->len, &pl022->sgt_rx); + setup_dma_scatter(pl022, pl022->tx, + pl022->cur_transfer->len, &pl022->sgt_tx); + + /* Map DMA buffers */ + rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + if (!rx_sglen) + goto err_rx_sgmap; + + tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + if (!tx_sglen) + goto err_tx_sgmap; + + /* Send both scatterlists */ + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, + pl022->sgt_rx.sgl, + rx_sglen, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto err_rxdesc; + + txdesc = txchan->device->device_prep_slave_sg(txchan, + pl022->sgt_tx.sgl, + tx_sglen, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto err_txdesc; + + /* Put the callback on the RX transfer only, that should finish last */ + rxdesc->callback = dma_callback; + rxdesc->callback_param = pl022; + + /* Submit and fire RX and TX with TX last so we're ready to read! */ + cookie = rxdesc->tx_submit(rxdesc); + if (dma_submit_error(cookie)) + goto err_submit_rx; + cookie = txdesc->tx_submit(txdesc); + if (dma_submit_error(cookie)) + goto err_submit_tx; + rxchan->device->device_issue_pending(rxchan); + txchan->device->device_issue_pending(txchan); + + return 0; + +err_submit_tx: +err_submit_rx: +err_txdesc: + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); +err_rxdesc: + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); +err_tx_sgmap: + dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, + pl022->sgt_tx.nents, DMA_FROM_DEVICE); +err_rx_sgmap: + sg_free_table(&pl022->sgt_tx); +err_alloc_tx_sg: + sg_free_table(&pl022->sgt_rx); +err_alloc_rx_sg: + return -ENOMEM; +} + +static int __init pl022_dma_probe(struct pl022 *pl022) +{ + dma_cap_mask_t mask; + + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + /* + * We need both RX and TX channels to do DMA, else do none + * of them. + */ + pl022->dma_rx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_rx_param); + if (!pl022->dma_rx_channel) { + dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); + goto err_no_rxchan; + } + + pl022->dma_tx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_tx_param); + if (!pl022->dma_tx_channel) { + dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); + goto err_no_txchan; + } + + pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pl022->dummypage) { + dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); + goto err_no_dummypage; + } + + dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", + dma_chan_name(pl022->dma_rx_channel), + dma_chan_name(pl022->dma_tx_channel)); + + return 0; + +err_no_dummypage: + dma_release_channel(pl022->dma_tx_channel); +err_no_txchan: + dma_release_channel(pl022->dma_rx_channel); + pl022->dma_rx_channel = NULL; +err_no_rxchan: + return -ENODEV; +} + +static void terminate_dma(struct pl022 *pl022) +{ + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); + unmap_free_dma_scatter(pl022); +} + +static void pl022_dma_remove(struct pl022 *pl022) +{ + if (pl022->busy) + terminate_dma(pl022); + if (pl022->dma_tx_channel) + dma_release_channel(pl022->dma_tx_channel); + if (pl022->dma_rx_channel) + dma_release_channel(pl022->dma_rx_channel); + kfree(pl022->dummypage); +} + +#else +static inline int configure_dma(struct pl022 *pl022) +{ + return -ENODEV; +} + +static inline int pl022_dma_probe(struct pl022 *pl022) +{ + return 0; +} + +static inline void pl022_dma_remove(struct pl022 *pl022) +{ +} +#endif + /** * pl022_interrupt_handler - Interrupt handler for SSP controller * @@ -725,14 +1164,17 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) if (unlikely(!irq_status)) return IRQ_NONE; - /* This handles the error code interrupts */ + /* + * This handles the FIFO interrupts, the timeout + * interrupts are flatly ignored, they cannot be + * trusted. + */ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { /* * Overrun interrupt - bail out since our Data has been * corrupted */ - dev_err(&pl022->adev->dev, - "FIFO overrun\n"); + dev_err(&pl022->adev->dev, "FIFO overrun\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) dev_err(&pl022->adev->dev, "RXFIFO is full\n"); @@ -827,8 +1269,8 @@ static int set_up_next_transfer(struct pl022 *pl022, } /** - * pump_transfers - Tasklet function which schedules next interrupt transfer - * when running in interrupt transfer mode. + * pump_transfers - Tasklet function which schedules next transfer + * when running in interrupt or DMA transfer mode. * @data: SSP driver private data structure * */ @@ -885,65 +1327,23 @@ static void pump_transfers(unsigned long data) } /* Flush the FIFOs and let's go! */ flush(pl022); - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); -} -/** - * NOT IMPLEMENTED - * configure_dma - It configures the DMA pipes for DMA transfers - * @data: SSP driver's private data structure - * - */ -static int configure_dma(void *data) -{ - struct pl022 *pl022 = data; - dev_dbg(&pl022->adev->dev, "configure DMA\n"); - return -ENOTSUPP; -} - -/** - * do_dma_transfer - It handles transfers of the current message - * if it is DMA xfer. - * NOT FULLY IMPLEMENTED - * @data: SSP driver's private data structure - */ -static void do_dma_transfer(void *data) -{ - struct pl022 *pl022 = data; - - if (configure_dma(data)) { - dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n"); - goto err_config_dma; - } - - /* TODO: Implememt DMA setup of pipes here */ - - /* Enable target chip, set up transfer */ - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); - if (set_up_next_transfer(pl022, pl022->cur_transfer)) { - /* Error path */ - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); + if (pl022->cur_chip->enable_dma) { + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } return; } - /* Enable SSP */ - writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), - SSP_CR1(pl022->virtbase)); - - /* TODO: Enable the DMA transfer here */ - return; - err_config_dma: - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); - return; +err_config_dma: + writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); } -static void do_interrupt_transfer(void *data) +static void do_interrupt_dma_transfer(struct pl022 *pl022) { - struct pl022 *pl022 = data; + u32 irqflags = ENABLE_ALL_INTERRUPTS; /* Enable target chip */ pl022->cur_chip->cs_control(SSP_CHIP_SELECT); @@ -954,15 +1354,26 @@ static void do_interrupt_transfer(void *data) giveback(pl022); return; } + /* If we're using DMA, set up DMA here */ + if (pl022->cur_chip->enable_dma) { + /* Configure DMA transfer */ + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } + /* Disable interrupts in DMA mode, IRQ from DMA controller */ + irqflags = DISABLE_ALL_INTERRUPTS; + } +err_config_dma: /* Enable SSP, turn on interrupts */ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(irqflags, SSP_IMSC(pl022->virtbase)); } -static void do_polling_transfer(void *data) +static void do_polling_transfer(struct pl022 *pl022) { - struct pl022 *pl022 = data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; @@ -1003,7 +1414,7 @@ static void do_polling_transfer(void *data) writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); - dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n"); + dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); /* FIXME: insert a timeout so we don't hang here indefinately */ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) readwriter(pl022); @@ -1032,7 +1443,7 @@ static void do_polling_transfer(void *data) * * This function checks if there is any spi message in the queue that * needs processing and delegate control to appropriate function - * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() + * do_polling_transfer()/do_interrupt_dma_transfer() * based on the kind of the transfer * */ @@ -1044,8 +1455,8 @@ static void pump_messages(struct work_struct *work) /* Lock queue and check for queue work */ spin_lock_irqsave(&pl022->queue_lock, flags); - if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) { - pl022->busy = 0; + if (list_empty(&pl022->queue) || !pl022->running) { + pl022->busy = false; spin_unlock_irqrestore(&pl022->queue_lock, flags); return; } @@ -1059,7 +1470,7 @@ static void pump_messages(struct work_struct *work) list_entry(pl022->queue.next, struct spi_message, queue); list_del_init(&pl022->cur_msg->queue); - pl022->busy = 1; + pl022->busy = true; spin_unlock_irqrestore(&pl022->queue_lock, flags); /* Initial message state */ @@ -1071,19 +1482,18 @@ static void pump_messages(struct work_struct *work) /* Setup the SPI using the per chip configuration */ pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); /* - * We enable the clock here, then the clock will be disabled when + * We enable the clocks here, then the clocks will be disabled when * giveback() is called in each method (poll/interrupt/DMA) */ + amba_pclk_enable(pl022->adev); clk_enable(pl022->clk); restore_state(pl022); flush(pl022); if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) do_polling_transfer(pl022); - else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER) - do_interrupt_transfer(pl022); else - do_dma_transfer(pl022); + do_interrupt_dma_transfer(pl022); } @@ -1092,8 +1502,8 @@ static int __init init_queue(struct pl022 *pl022) INIT_LIST_HEAD(&pl022->queue); spin_lock_init(&pl022->queue_lock); - pl022->run = QUEUE_STOPPED; - pl022->busy = 0; + pl022->running = false; + pl022->busy = false; tasklet_init(&pl022->pump_transfers, pump_transfers, (unsigned long)pl022); @@ -1114,12 +1524,12 @@ static int start_queue(struct pl022 *pl022) spin_lock_irqsave(&pl022->queue_lock, flags); - if (pl022->run == QUEUE_RUNNING || pl022->busy) { + if (pl022->running || pl022->busy) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return -EBUSY; } - pl022->run = QUEUE_RUNNING; + pl022->running = true; pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; @@ -1143,7 +1553,6 @@ static int stop_queue(struct pl022 *pl022) * A wait_queue on the pl022->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ - pl022->run = QUEUE_STOPPED; while (!list_empty(&pl022->queue) && pl022->busy && limit--) { spin_unlock_irqrestore(&pl022->queue_lock, flags); msleep(10); @@ -1152,6 +1561,8 @@ static int stop_queue(struct pl022 *pl022) if (!list_empty(&pl022->queue) || pl022->busy) status = -EBUSY; + else + pl022->running = false; spin_unlock_irqrestore(&pl022->queue_lock, flags); @@ -1178,116 +1589,78 @@ static int destroy_queue(struct pl022 *pl022) } static int verify_controller_parameters(struct pl022 *pl022, - struct pl022_config_chip *chip_info) + struct pl022_config_chip const *chip_info) { - if ((chip_info->lbm != LOOPBACK_ENABLED) - && (chip_info->lbm != LOOPBACK_DISABLED)) { - dev_err(chip_info->dev, - "loopback Mode is configured incorrectly\n"); - return -EINVAL; - } if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "interface is configured incorrectly\n"); return -EINVAL; } if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && (!pl022->vendor->unidir)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "unidirectional mode not supported in this " "hardware version\n"); return -EINVAL; } if ((chip_info->hierarchy != SSP_MASTER) && (chip_info->hierarchy != SSP_SLAVE)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "hierarchy is configured incorrectly\n"); return -EINVAL; } - if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN) - || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) { - dev_err(chip_info->dev, - "cpsdvsr is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->endian_rx != SSP_RX_MSB) - && (chip_info->endian_rx != SSP_RX_LSB)) { - dev_err(chip_info->dev, - "RX FIFO endianess is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->endian_tx != SSP_TX_MSB) - && (chip_info->endian_tx != SSP_TX_LSB)) { - dev_err(chip_info->dev, - "TX FIFO endianess is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->data_size < SSP_DATA_BITS_4) - || (chip_info->data_size > SSP_DATA_BITS_32)) { - dev_err(chip_info->dev, - "DATA Size is configured incorrectly\n"); - return -EINVAL; - } if ((chip_info->com_mode != INTERRUPT_TRANSFER) && (chip_info->com_mode != DMA_TRANSFER) && (chip_info->com_mode != POLLING_TRANSFER)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "Communication mode is configured incorrectly\n"); return -EINVAL; } if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } - if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { - if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE) - && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) { - dev_err(chip_info->dev, - "Clock Phase is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW) - && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) { - dev_err(chip_info->dev, - "Clock Polarity is configured incorrectly\n"); - return -EINVAL; - } - } if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { if ((chip_info->ctrl_len < SSP_BITS_4) || (chip_info->ctrl_len > SSP_BITS_32)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "CTRL LEN is configured incorrectly\n"); return -EINVAL; } if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "Wait State is configured incorrectly\n"); return -EINVAL; } - if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) - && (chip_info->duplex != - SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { - dev_err(chip_info->dev, - "DUPLEX is configured incorrectly\n"); + /* Half duplex is only available in the ST Micro version */ + if (pl022->vendor->extended_cr) { + if ((chip_info->duplex != + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + && (chip_info->duplex != + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { + dev_err(&pl022->adev->dev, + "Microwire duplex mode is configured incorrectly\n"); + return -EINVAL; + } + } else { + if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + dev_err(&pl022->adev->dev, + "Microwire half duplex mode requested," + " but this is only available in the" + " ST version of PL022\n"); return -EINVAL; } } - if (chip_info->cs_control == NULL) { - dev_warn(chip_info->dev, - "Chip Select Function is NULL for this chip\n"); - chip_info->cs_control = null_cs_control; - } return 0; } @@ -1307,7 +1680,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) spin_lock_irqsave(&pl022->queue_lock, flags); - if (pl022->run == QUEUE_STOPPED) { + if (!pl022->running) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return -ESHUTDOWN; } @@ -1316,7 +1689,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) msg->state = STATE_START; list_add_tail(&msg->queue, &pl022->queue); - if (pl022->run == QUEUE_RUNNING && !pl022->busy) + if (pl022->running && !pl022->busy) queue_work(pl022->workqueue, &pl022->pump_messages); spin_unlock_irqrestore(&pl022->queue_lock, flags); @@ -1387,22 +1760,24 @@ static int calculate_effective_freq(struct pl022 *pl022, return 0; } -/** - * NOT IMPLEMENTED - * process_dma_info - Processes the DMA info provided by client drivers - * @chip_info: chip info provided by client device - * @chip: Runtime state maintained by the SSP controller for each spi device - * - * This function processes and stores DMA config provided by client driver - * into the runtime state maintained by the SSP controller driver + +/* + * A piece of default chip info unless the platform + * supplies it. */ -static int process_dma_info(struct pl022_config_chip *chip_info, - struct chip_data *chip) -{ - dev_err(chip_info->dev, - "cannot process DMA info, DMA not implemented!\n"); - return -ENOTSUPP; -} +static const struct pl022_config_chip pl022_default_chip_info = { + .com_mode = POLLING_TRANSFER, + .iface = SSP_INTERFACE_MOTOROLA_SPI, + .hierarchy = SSP_SLAVE, + .slave_tx_disable = DO_NOT_DRIVE_TX, + .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, + .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, + .ctrl_len = SSP_BITS_8, + .wait_state = SSP_MWIRE_WAIT_ZERO, + .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + .cs_control = null_cs_control, +}; + /** * pl022_setup - setup function registered to SPI master framework @@ -1416,23 +1791,15 @@ static int process_dma_info(struct pl022_config_chip *chip_info, * controller hardware here, that is not done until the actual transfer * commence. */ - -/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ - | SPI_LSB_FIRST | SPI_LOOP) - static int pl022_setup(struct spi_device *spi) { - struct pl022_config_chip *chip_info; + struct pl022_config_chip const *chip_info; struct chip_data *chip; + struct ssp_clock_params clk_freq; int status = 0; struct pl022 *pl022 = spi_master_get_devdata(spi->master); - - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } + unsigned int bits = spi->bits_per_word; + u32 tmp; if (!spi->max_speed_hz) return -EINVAL; @@ -1455,48 +1822,13 @@ static int pl022_setup(struct spi_device *spi) chip_info = spi->controller_data; if (chip_info == NULL) { + chip_info = &pl022_default_chip_info; /* spi_board_info.controller_data not is supplied */ dev_dbg(&spi->dev, "using default controller_data settings\n"); - - chip_info = - kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL); - - if (!chip_info) { - dev_err(&spi->dev, - "cannot allocate controller data\n"); - status = -ENOMEM; - goto err_first_setup; - } - - dev_dbg(&spi->dev, "allocated memory for controller data\n"); - - /* Pointer back to the SPI device */ - chip_info->dev = &spi->dev; - /* - * Set controller data default values: - * Polling is supported by default - */ - chip_info->lbm = LOOPBACK_DISABLED; - chip_info->com_mode = POLLING_TRANSFER; - chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI; - chip_info->hierarchy = SSP_SLAVE; - chip_info->slave_tx_disable = DO_NOT_DRIVE_TX; - chip_info->endian_tx = SSP_TX_LSB; - chip_info->endian_rx = SSP_RX_LSB; - chip_info->data_size = SSP_DATA_BITS_12; - chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; - chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; - chip_info->clk_phase = SSP_CLK_FALLING_EDGE; - chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; - chip_info->ctrl_len = SSP_BITS_8; - chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; - chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX; - chip_info->cs_control = null_cs_control; - } else { + } else dev_dbg(&spi->dev, "using user supplied controller_data settings\n"); - } /* * We can override with custom divisors, else we use the board @@ -1506,29 +1838,48 @@ static int pl022_setup(struct spi_device *spi) && (0 == chip_info->clk_freq.scr)) { status = calculate_effective_freq(pl022, spi->max_speed_hz, - &chip_info->clk_freq); + &clk_freq); if (status < 0) goto err_config_params; } else { - if ((chip_info->clk_freq.cpsdvsr % 2) != 0) - chip_info->clk_freq.cpsdvsr = - chip_info->clk_freq.cpsdvsr - 1; + memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); + if ((clk_freq.cpsdvsr % 2) != 0) + clk_freq.cpsdvsr = + clk_freq.cpsdvsr - 1; } + if ((clk_freq.cpsdvsr < CPSDVR_MIN) + || (clk_freq.cpsdvsr > CPSDVR_MAX)) { + dev_err(&spi->dev, + "cpsdvsr is configured incorrectly\n"); + goto err_config_params; + } + + status = verify_controller_parameters(pl022, chip_info); if (status) { dev_err(&spi->dev, "controller data is incorrect"); goto err_config_params; } + /* Now set controller state based on controller data */ chip->xfer_type = chip_info->com_mode; - chip->cs_control = chip_info->cs_control; - - if (chip_info->data_size <= 8) { - dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n"); + if (!chip_info->cs_control) { + chip->cs_control = null_cs_control; + dev_warn(&spi->dev, + "chip select function is NULL for this chip\n"); + } else + chip->cs_control = chip_info->cs_control; + + if (bits <= 3) { + /* PL022 doesn't support less than 4-bits */ + status = -ENOTSUPP; + goto err_config_params; + } else if (bits <= 8) { + dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); chip->n_bytes = 1; chip->read = READING_U8; chip->write = WRITING_U8; - } else if (chip_info->data_size <= 16) { + } else if (bits <= 16) { dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); chip->n_bytes = 2; chip->read = READING_U16; @@ -1545,6 +1896,7 @@ static int pl022_setup(struct spi_device *spi) dev_err(&spi->dev, "a standard pl022 can only handle " "1 <= n <= 16 bit words\n"); + status = -ENOTSUPP; goto err_config_params; } } @@ -1556,9 +1908,8 @@ static int pl022_setup(struct spi_device *spi) chip->cpsr = 0; if ((chip_info->com_mode == DMA_TRANSFER) && ((pl022->master_info)->enable_dma)) { - chip->enable_dma = 1; + chip->enable_dma = true; dev_dbg(&spi->dev, "DMA mode set in controller state\n"); - status = process_dma_info(chip_info, chip); if (status < 0) goto err_config_params; SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, @@ -1566,7 +1917,7 @@ static int pl022_setup(struct spi_device *spi) SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_TXDMAE, 1); } else { - chip->enable_dma = 0; + chip->enable_dma = false; dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0); @@ -1574,30 +1925,81 @@ static int pl022_setup(struct spi_device *spi) SSP_DMACR_MASK_TXDMAE, 1); } - chip->cpsr = chip_info->clk_freq.cpsdvsr; + chip->cpsr = clk_freq.cpsdvsr; + + /* Special setup for the ST micro extended control registers */ + if (pl022->vendor->extended_cr) { + u32 etx; + + if (pl022->vendor->pl023) { + /* These bits are only in the PL023 */ + SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, + SSP_CR1_MASK_FBCLKDEL_ST, 13); + } else { + /* These bits are in the PL022 but not PL023 */ + SSP_WRITE_BITS(chip->cr0, chip_info->duplex, + SSP_CR0_MASK_HALFDUP_ST, 5); + SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, + SSP_CR0_MASK_CSS_ST, 16); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF_ST, 21); + SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, + SSP_CR1_MASK_MWAIT_ST, 6); + } + SSP_WRITE_BITS(chip->cr0, bits - 1, + SSP_CR0_MASK_DSS_ST, 0); + + if (spi->mode & SPI_LSB_FIRST) { + tmp = SSP_RX_LSB; + etx = SSP_TX_LSB; + } else { + tmp = SSP_RX_MSB; + etx = SSP_TX_MSB; + } + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); + SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); + SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, + SSP_CR1_MASK_RXIFLSEL_ST, 7); + SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, + SSP_CR1_MASK_TXIFLSEL_ST, 10); + } else { + SSP_WRITE_BITS(chip->cr0, bits - 1, + SSP_CR0_MASK_DSS, 0); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF, 4); + } + + /* Stuff that is common for all versions */ + if (spi->mode & SPI_CPOL) + tmp = SSP_CLK_POL_IDLE_HIGH; + else + tmp = SSP_CLK_POL_IDLE_LOW; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); - SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0); - SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); - SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16); - SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21); - SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); + if (spi->mode & SPI_CPHA) + tmp = SSP_CLK_SECOND_EDGE; + else + tmp = SSP_CLK_FIRST_EDGE; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); + + SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); + /* Loopback is available on all versions except PL023 */ + if (!pl022->vendor->pl023) { + if (spi->mode & SPI_LOOP) + tmp = LOOPBACK_ENABLED; + else + tmp = LOOPBACK_DISABLED; + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); + } SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5); - SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6); - SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7); - SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10); /* Save controller_state */ spi_set_ctldata(spi, chip); return status; err_config_params: - err_first_setup: + spi_set_ctldata(spi, NULL); kfree(chip); return status; } @@ -1618,7 +2020,7 @@ static void pl022_cleanup(struct spi_device *spi) } -static int __init +static int __devinit pl022_probe(struct amba_device *adev, struct amba_id *id) { struct device *dev = &adev->dev; @@ -1659,12 +2061,21 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) master->setup = pl022_setup; master->transfer = pl022_transfer; + /* + * Supports mode 0-3, loopback, and active low CS. Transfers are + * always MS bit first on the original pl022. + */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; + if (pl022->vendor->extended_cr) + master->mode_bits |= SPI_LSB_FIRST; + dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); status = amba_request_regions(adev, NULL); if (status) goto err_no_ioregion; + pl022->phybase = adev->res.start; pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); if (pl022->virtbase == NULL) { status = -ENOMEM; @@ -1681,11 +2092,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) } /* Disable SSP */ - clk_enable(pl022->clk); writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); load_ssp_default_config(pl022); - clk_disable(pl022->clk); status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", pl022); @@ -1693,6 +2102,14 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); goto err_no_irq; } + + /* Get DMA channels */ + if (platform_info->enable_dma) { + status = pl022_dma_probe(pl022); + if (status != 0) + goto err_no_dma; + } + /* Initialize and start queue */ status = init_queue(pl022); if (status != 0) { @@ -1713,12 +2130,16 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) goto err_spi_register; } dev_dbg(dev, "probe succeded\n"); + /* Disable the silicon block pclk and clock it when needed */ + amba_pclk_disable(adev); return 0; err_spi_register: err_start_queue: err_init_queue: destroy_queue(pl022); + pl022_dma_remove(pl022); + err_no_dma: free_irq(adev->irq[0], pl022); err_no_irq: clk_put(pl022->clk); @@ -1733,7 +2154,7 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) return status; } -static int __exit +static int __devexit pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); @@ -1749,6 +2170,7 @@ pl022_remove(struct amba_device *adev) return status; } load_ssp_default_config(pl022); + pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); clk_disable(pl022->clk); clk_put(pl022->clk); @@ -1774,9 +2196,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state) return status; } - clk_enable(pl022->clk); + amba_pclk_enable(adev); load_ssp_default_config(pl022); - clk_disable(pl022->clk); + amba_pclk_disable(adev); dev_dbg(&adev->dev, "suspended\n"); return 0; } @@ -1804,6 +2226,8 @@ static struct vendor_data vendor_arm = { .fifodepth = 8, .max_bpw = 16, .unidir = false, + .extended_cr = false, + .pl023 = false, }; @@ -1811,6 +2235,16 @@ static struct vendor_data vendor_st = { .fifodepth = 32, .max_bpw = 32, .unidir = false, + .extended_cr = true, + .pl023 = false, +}; + +static struct vendor_data vendor_st_pl023 = { + .fifodepth = 32, + .max_bpw = 32, + .unidir = false, + .extended_cr = true, + .pl023 = true, }; static struct amba_id pl022_ids[] = { @@ -1828,10 +2262,22 @@ static struct amba_id pl022_ids[] = { * ST Micro derivative, this has 32bit wide * and 32 locations deep TX/RX FIFO */ - .id = 0x00108022, + .id = 0x01080022, .mask = 0xffffffff, .data = &vendor_st, }, + { + /* + * ST-Ericsson derivative "PL023" (this is not + * an official ARM number), this is a PL022 SSP block + * stripped to SPI mode only, it has 32bit wide + * and 32 locations deep TX/RX FIFO but no extended + * CR0/CR1 register + */ + .id = 0x00080023, + .mask = 0xffffffff, + .data = &vendor_st_pl023, + }, { 0, 0 }, }; @@ -1841,7 +2287,7 @@ static struct amba_driver pl022_driver = { }, .id_table = pl022_ids, .probe = pl022_probe, - .remove = __exit_p(pl022_remove), + .remove = __devexit_p(pl022_remove), .suspend = pl022_suspend, .resume = pl022_resume, }; @@ -1852,7 +2298,7 @@ static int __init pl022_init(void) return amba_driver_register(&pl022_driver); } -module_init(pl022_init); +subsys_initcall(pl022_init); static void __exit pl022_exit(void) { diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index f5b3fdbb1e2..a067046c9da 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -18,6 +18,7 @@ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> +#include <linux/slab.h> #include <asm/io.h> #include <mach/board.h> @@ -189,14 +190,14 @@ static void atmel_spi_next_xfer_data(struct spi_master *master, /* use scratch buffer only when rx or tx data is unspecified */ if (xfer->rx_buf) - *rx_dma = xfer->rx_dma + xfer->len - len; + *rx_dma = xfer->rx_dma + xfer->len - *plen; else { *rx_dma = as->buffer_dma; if (len > BUFFER_SIZE) len = BUFFER_SIZE; } if (xfer->tx_buf) - *tx_dma = xfer->tx_dma + xfer->len - len; + *tx_dma = xfer->tx_dma + xfer->len - *plen; else { *tx_dma = as->buffer_dma; if (len > BUFFER_SIZE) @@ -351,8 +352,12 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; if (xfer->tx_buf) { + /* tx_buf is a const void* where we need a void * for the dma + * mapping */ + void *nonconst_tx = (void *)xfer->tx_buf; + xfer->tx_dma = dma_map_single(dev, - (void *) xfer->tx_buf, xfer->len, + nonconst_tx, xfer->len, DMA_TO_DEVICE); if (dma_mapping_error(dev, xfer->tx_dma)) return -ENOMEM; @@ -653,6 +658,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) struct spi_transfer *xfer; unsigned long flags; struct device *controller = spi->master->dev.parent; + u8 bits; + struct atmel_spi_device *asd; as = spi_master_get_devdata(spi->master); @@ -671,8 +678,18 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) return -EINVAL; } + if (xfer->bits_per_word) { + asd = spi->controller_state; + bits = (asd->csr >> 4) & 0xf; + if (bits != xfer->bits_per_word - 8) { + dev_dbg(&spi->dev, "you can't yet change " + "bits_per_word in transfers\n"); + return -ENOPROTOOPT; + } + } + /* FIXME implement these protocol options!! */ - if (xfer->bits_per_word || xfer->speed_hz) { + if (xfer->speed_hz) { dev_dbg(&spi->dev, "no protocol options yet\n"); return -ENOPROTOOPT; } @@ -788,7 +805,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev) spin_lock_init(&as->lock); INIT_LIST_HEAD(&as->queue); as->pdev = pdev; - as->regs = ioremap(regs->start, (regs->end - regs->start) + 1); + as->regs = ioremap(regs->start, resource_size(regs)); if (!as->regs) goto out_free_buffer; as->irq = irq; diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index 76cbc1a6659..3c9ade69643 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c @@ -23,6 +23,7 @@ #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/slab.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/platform_device.h> @@ -237,8 +238,14 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) unsigned bpw, hz; u32 cfg, stat; - bpw = t ? t->bits_per_word : spi->bits_per_word; - hz = t ? t->speed_hz : spi->max_speed_hz; + bpw = spi->bits_per_word; + hz = spi->max_speed_hz; + if (t) { + if (t->bits_per_word) + bpw = t->bits_per_word; + if (t->speed_hz) + hz = t->speed_hz; + } if (bpw < 4 || bpw > 24) { dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n", @@ -406,11 +413,13 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) } /* put buffers on the ring */ - res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, hw->rx, t->len); + res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx), + t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "rx dma put dest error\n"); - res = au1xxx_dbdma_put_source(hw->dma_tx_ch, (void *)hw->tx, t->len); + res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx), + t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "tx dma put source error\n"); diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c new file mode 100644 index 00000000000..8856bcca9d2 --- /dev/null +++ b/drivers/spi/coldfire_qspi.c @@ -0,0 +1,641 @@ +/* + * Freescale/Motorola Coldfire Queued SPI driver + * + * Copyright 2010 Steven King <sfking@fdwdc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA + * +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/spi/spi.h> + +#include <asm/coldfire.h> +#include <asm/mcfqspi.h> + +#define DRIVER_NAME "mcfqspi" + +#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) + +#define MCFQSPI_QMR 0x00 +#define MCFQSPI_QMR_MSTR 0x8000 +#define MCFQSPI_QMR_CPOL 0x0200 +#define MCFQSPI_QMR_CPHA 0x0100 +#define MCFQSPI_QDLYR 0x04 +#define MCFQSPI_QDLYR_SPE 0x8000 +#define MCFQSPI_QWR 0x08 +#define MCFQSPI_QWR_HALT 0x8000 +#define MCFQSPI_QWR_WREN 0x4000 +#define MCFQSPI_QWR_CSIV 0x1000 +#define MCFQSPI_QIR 0x0C +#define MCFQSPI_QIR_WCEFB 0x8000 +#define MCFQSPI_QIR_ABRTB 0x4000 +#define MCFQSPI_QIR_ABRTL 0x1000 +#define MCFQSPI_QIR_WCEFE 0x0800 +#define MCFQSPI_QIR_ABRTE 0x0400 +#define MCFQSPI_QIR_SPIFE 0x0100 +#define MCFQSPI_QIR_WCEF 0x0008 +#define MCFQSPI_QIR_ABRT 0x0004 +#define MCFQSPI_QIR_SPIF 0x0001 +#define MCFQSPI_QAR 0x010 +#define MCFQSPI_QAR_TXBUF 0x00 +#define MCFQSPI_QAR_RXBUF 0x10 +#define MCFQSPI_QAR_CMDBUF 0x20 +#define MCFQSPI_QDR 0x014 +#define MCFQSPI_QCR 0x014 +#define MCFQSPI_QCR_CONT 0x8000 +#define MCFQSPI_QCR_BITSE 0x4000 +#define MCFQSPI_QCR_DT 0x2000 + +struct mcfqspi { + void __iomem *iobase; + int irq; + struct clk *clk; + struct mcfqspi_cs_control *cs_control; + + wait_queue_head_t waitq; + + struct work_struct work; + struct workqueue_struct *workq; + spinlock_t lock; + struct list_head msgq; +}; + +static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QMR); +} + +static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); +} + +static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) +{ + return readw(mcfqspi->iobase + MCFQSPI_QDLYR); +} + +static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QWR); +} + +static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QIR); +} + +static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QAR); +} + +static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QDR); +} + +static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) +{ + return readw(mcfqspi->iobase + MCFQSPI_QDR); +} + +static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, + bool cs_high) +{ + mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); +} + +static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, + bool cs_high) +{ + mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); +} + +static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) +{ + return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? + mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; +} + +static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) +{ + if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) + mcfqspi->cs_control->teardown(mcfqspi->cs_control); +} + +static u8 mcfqspi_qmr_baud(u32 speed_hz) +{ + return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); +} + +static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) +{ + return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; +} + +static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) +{ + struct mcfqspi *mcfqspi = dev_id; + + /* clear interrupt */ + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); + wake_up(&mcfqspi->waitq); + + return IRQ_HANDLED; +} + +static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, + const u8 *txbuf, u8 *rxbuf) +{ + unsigned i, n, offset = 0; + + n = min(count, 16u); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); + if (txbuf) + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + else + for (i = 0; i < count; ++i) + mcfqspi_wr_qdr(mcfqspi, 0); + + count -= n; + if (count) { + u16 qwr = 0xf08; + mcfqspi_wr_qwr(mcfqspi, 0x700); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + + do { + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } + n = min(count, 8u); + if (txbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_TXBUF + offset); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + } + qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); + offset ^= 8; + count -= n; + } while (count); + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + offset ^= 8; + } + } else { + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + } + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < n; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } +} + +static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, + const u16 *txbuf, u16 *rxbuf) +{ + unsigned i, n, offset = 0; + + n = min(count, 16u); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); + if (txbuf) + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + else + for (i = 0; i < count; ++i) + mcfqspi_wr_qdr(mcfqspi, 0); + + count -= n; + if (count) { + u16 qwr = 0xf08; + mcfqspi_wr_qwr(mcfqspi, 0x700); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + + do { + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } + n = min(count, 8u); + if (txbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_TXBUF + offset); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + } + qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); + offset ^= 8; + count -= n; + } while (count); + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + offset ^= 8; + } + } else { + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + } + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < n; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } +} + +static void mcfqspi_work(struct work_struct *work) +{ + struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); + unsigned long flags; + + spin_lock_irqsave(&mcfqspi->lock, flags); + while (!list_empty(&mcfqspi->msgq)) { + struct spi_message *msg; + struct spi_device *spi; + struct spi_transfer *xfer; + int status = 0; + + msg = container_of(mcfqspi->msgq.next, struct spi_message, + queue); + + list_del_init(&msg->queue); + spin_unlock_irqrestore(&mcfqspi->lock, flags); + + spi = msg->spi; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + bool cs_high = spi->mode & SPI_CS_HIGH; + u16 qmr = MCFQSPI_QMR_MSTR; + + if (xfer->bits_per_word) + qmr |= xfer->bits_per_word << 10; + else + qmr |= spi->bits_per_word << 10; + if (spi->mode & SPI_CPHA) + qmr |= MCFQSPI_QMR_CPHA; + if (spi->mode & SPI_CPOL) + qmr |= MCFQSPI_QMR_CPOL; + if (xfer->speed_hz) + qmr |= mcfqspi_qmr_baud(xfer->speed_hz); + else + qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); + mcfqspi_wr_qmr(mcfqspi, qmr); + + mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); + + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); + if ((xfer->bits_per_word ? xfer->bits_per_word : + spi->bits_per_word) == 8) + mcfqspi_transfer_msg8(mcfqspi, xfer->len, + xfer->tx_buf, + xfer->rx_buf); + else + mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, + xfer->tx_buf, + xfer->rx_buf); + mcfqspi_wr_qir(mcfqspi, 0); + + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + if (xfer->cs_change) { + if (!list_is_last(&xfer->transfer_list, + &msg->transfers)) + mcfqspi_cs_deselect(mcfqspi, + spi->chip_select, + cs_high); + } else { + if (list_is_last(&xfer->transfer_list, + &msg->transfers)) + mcfqspi_cs_deselect(mcfqspi, + spi->chip_select, + cs_high); + } + msg->actual_length += xfer->len; + } + msg->status = status; + msg->complete(msg->context); + + spin_lock_irqsave(&mcfqspi->lock, flags); + } + spin_unlock_irqrestore(&mcfqspi->lock, flags); +} + +static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct mcfqspi *mcfqspi; + struct spi_transfer *xfer; + unsigned long flags; + + mcfqspi = spi_master_get_devdata(spi->master); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->bits_per_word && ((xfer->bits_per_word < 8) + || (xfer->bits_per_word > 16))) { + dev_dbg(&spi->dev, + "%d bits per word is not supported\n", + xfer->bits_per_word); + goto fail; + } + if (xfer->speed_hz) { + u32 real_speed = MCFQSPI_BUSCLK / + mcfqspi_qmr_baud(xfer->speed_hz); + if (real_speed != xfer->speed_hz) + dev_dbg(&spi->dev, + "using speed %d instead of %d\n", + real_speed, xfer->speed_hz); + } + } + msg->status = -EINPROGRESS; + msg->actual_length = 0; + + spin_lock_irqsave(&mcfqspi->lock, flags); + list_add_tail(&msg->queue, &mcfqspi->msgq); + queue_work(mcfqspi->workq, &mcfqspi->work); + spin_unlock_irqrestore(&mcfqspi->lock, flags); + + return 0; +fail: + msg->status = -EINVAL; + return -EINVAL; +} + +static int mcfqspi_setup(struct spi_device *spi) +{ + if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { + dev_dbg(&spi->dev, "%d bits per word is not supported\n", + spi->bits_per_word); + return -EINVAL; + } + if (spi->chip_select >= spi->master->num_chipselect) { + dev_dbg(&spi->dev, "%d chip select is out of range\n", + spi->chip_select); + return -EINVAL; + } + + mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), + spi->chip_select, spi->mode & SPI_CS_HIGH); + + dev_dbg(&spi->dev, + "bits per word %d, chip select %d, speed %d KHz\n", + spi->bits_per_word, spi->chip_select, + (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) + / 1000); + + return 0; +} + +static int __devinit mcfqspi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct mcfqspi *mcfqspi; + struct resource *res; + struct mcfqspi_platform_data *pdata; + int status; + + master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); + if (master == NULL) { + dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); + return -ENOMEM; + } + + mcfqspi = spi_master_get_devdata(master); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(&pdev->dev, "platform_get_resource failed\n"); + status = -ENXIO; + goto fail0; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + dev_dbg(&pdev->dev, "request_mem_region failed\n"); + status = -EBUSY; + goto fail0; + } + + mcfqspi->iobase = ioremap(res->start, resource_size(res)); + if (!mcfqspi->iobase) { + dev_dbg(&pdev->dev, "ioremap failed\n"); + status = -ENOMEM; + goto fail1; + } + + mcfqspi->irq = platform_get_irq(pdev, 0); + if (mcfqspi->irq < 0) { + dev_dbg(&pdev->dev, "platform_get_irq failed\n"); + status = -ENXIO; + goto fail2; + } + + status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, + pdev->name, mcfqspi); + if (status) { + dev_dbg(&pdev->dev, "request_irq failed\n"); + goto fail2; + } + + mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); + if (IS_ERR(mcfqspi->clk)) { + dev_dbg(&pdev->dev, "clk_get failed\n"); + status = PTR_ERR(mcfqspi->clk); + goto fail3; + } + clk_enable(mcfqspi->clk); + + mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); + if (!mcfqspi->workq) { + dev_dbg(&pdev->dev, "create_workqueue failed\n"); + status = -ENOMEM; + goto fail4; + } + INIT_WORK(&mcfqspi->work, mcfqspi_work); + spin_lock_init(&mcfqspi->lock); + INIT_LIST_HEAD(&mcfqspi->msgq); + init_waitqueue_head(&mcfqspi->waitq); + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_dbg(&pdev->dev, "platform data is missing\n"); + goto fail5; + } + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->num_chipselect; + + mcfqspi->cs_control = pdata->cs_control; + status = mcfqspi_cs_setup(mcfqspi); + if (status) { + dev_dbg(&pdev->dev, "error initializing cs_control\n"); + goto fail5; + } + + master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; + master->setup = mcfqspi_setup; + master->transfer = mcfqspi_transfer; + + platform_set_drvdata(pdev, master); + + status = spi_register_master(master); + if (status) { + dev_dbg(&pdev->dev, "spi_register_master failed\n"); + goto fail6; + } + dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); + + return 0; + +fail6: + mcfqspi_cs_teardown(mcfqspi); +fail5: + destroy_workqueue(mcfqspi->workq); +fail4: + clk_disable(mcfqspi->clk); + clk_put(mcfqspi->clk); +fail3: + free_irq(mcfqspi->irq, mcfqspi); +fail2: + iounmap(mcfqspi->iobase); +fail1: + release_mem_region(res->start, resource_size(res)); +fail0: + spi_master_put(master); + + dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); + + return status; +} + +static int __devexit mcfqspi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + /* disable the hardware (set the baud rate to 0) */ + mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); + + platform_set_drvdata(pdev, NULL); + mcfqspi_cs_teardown(mcfqspi); + destroy_workqueue(mcfqspi->workq); + clk_disable(mcfqspi->clk); + clk_put(mcfqspi->clk); + free_irq(mcfqspi->irq, mcfqspi); + iounmap(mcfqspi->iobase); + release_mem_region(res->start, resource_size(res)); + spi_unregister_master(master); + spi_master_put(master); + + return 0; +} + +#ifdef CONFIG_PM + +static int mcfqspi_suspend(struct device *dev) +{ + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); + + clk_disable(mcfqspi->clk); + + return 0; +} + +static int mcfqspi_resume(struct device *dev) +{ + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); + + clk_enable(mcfqspi->clk); + + return 0; +} + +static struct dev_pm_ops mcfqspi_dev_pm_ops = { + .suspend = mcfqspi_suspend, + .resume = mcfqspi_resume, +}; + +#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) +#else +#define MCFQSPI_DEV_PM_OPS NULL +#endif + +static struct platform_driver mcfqspi_driver = { + .driver.name = DRIVER_NAME, + .driver.owner = THIS_MODULE, + .driver.pm = MCFQSPI_DEV_PM_OPS, + .remove = __devexit_p(mcfqspi_remove), +}; + +static int __init mcfqspi_init(void) +{ + return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); +} +module_init(mcfqspi_init); + +static void __exit mcfqspi_exit(void) +{ + platform_driver_unregister(&mcfqspi_driver); +} +module_exit(mcfqspi_exit); + +MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); +MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c new file mode 100644 index 00000000000..6beab99bf95 --- /dev/null +++ b/drivers/spi/davinci_spi.c @@ -0,0 +1,1018 @@ +/* + * Copyright (C) 2009 Texas Instruments. + * Copyright (C) 2010 EF Johnson Technologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/slab.h> + +#include <mach/spi.h> +#include <mach/edma.h> + +#define SPI_NO_RESOURCE ((resource_size_t)-1) + +#define SPI_MAX_CHIPSELECT 2 + +#define CS_DEFAULT 0xFF + +#define SPIFMT_PHASE_MASK BIT(16) +#define SPIFMT_POLARITY_MASK BIT(17) +#define SPIFMT_DISTIMER_MASK BIT(18) +#define SPIFMT_SHIFTDIR_MASK BIT(20) +#define SPIFMT_WAITENA_MASK BIT(21) +#define SPIFMT_PARITYENA_MASK BIT(22) +#define SPIFMT_ODD_PARITY_MASK BIT(23) +#define SPIFMT_WDELAY_MASK 0x3f000000u +#define SPIFMT_WDELAY_SHIFT 24 +#define SPIFMT_PRESCALE_SHIFT 8 + +/* SPIPC0 */ +#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ +#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ +#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ +#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ + +#define SPIINT_MASKALL 0x0101035F +#define SPIINT_MASKINT 0x0000015F +#define SPI_INTLVL_1 0x000001FF +#define SPI_INTLVL_0 0x00000000 + +/* SPIDAT1 (upper 16 bit defines) */ +#define SPIDAT1_CSHOLD_MASK BIT(12) + +/* SPIGCR1 */ +#define SPIGCR1_CLKMOD_MASK BIT(1) +#define SPIGCR1_MASTER_MASK BIT(0) +#define SPIGCR1_POWERDOWN_MASK BIT(8) +#define SPIGCR1_LOOPBACK_MASK BIT(16) +#define SPIGCR1_SPIENA_MASK BIT(24) + +/* SPIBUF */ +#define SPIBUF_TXFULL_MASK BIT(29) +#define SPIBUF_RXEMPTY_MASK BIT(31) + +/* SPIDELAY */ +#define SPIDELAY_C2TDELAY_SHIFT 24 +#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) +#define SPIDELAY_T2CDELAY_SHIFT 16 +#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) +#define SPIDELAY_T2EDELAY_SHIFT 8 +#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) +#define SPIDELAY_C2EDELAY_SHIFT 0 +#define SPIDELAY_C2EDELAY_MASK 0xFF + +/* Error Masks */ +#define SPIFLG_DLEN_ERR_MASK BIT(0) +#define SPIFLG_TIMEOUT_MASK BIT(1) +#define SPIFLG_PARERR_MASK BIT(2) +#define SPIFLG_DESYNC_MASK BIT(3) +#define SPIFLG_BITERR_MASK BIT(4) +#define SPIFLG_OVRRUN_MASK BIT(6) +#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) +#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ + | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ + | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ + | SPIFLG_OVRRUN_MASK) + +#define SPIINT_DMA_REQ_EN BIT(16) + +/* SPI Controller registers */ +#define SPIGCR0 0x00 +#define SPIGCR1 0x04 +#define SPIINT 0x08 +#define SPILVL 0x0c +#define SPIFLG 0x10 +#define SPIPC0 0x14 +#define SPIDAT1 0x3c +#define SPIBUF 0x40 +#define SPIDELAY 0x48 +#define SPIDEF 0x4c +#define SPIFMT0 0x50 + +/* We have 2 DMA channels per CS, one for RX and one for TX */ +struct davinci_spi_dma { + int tx_channel; + int rx_channel; + int dummy_param_slot; + enum dma_event_q eventq; +}; + +/* SPI Controller driver's private data. */ +struct davinci_spi { + struct spi_bitbang bitbang; + struct clk *clk; + + u8 version; + resource_size_t pbase; + void __iomem *base; + u32 irq; + struct completion done; + + const void *tx; + void *rx; +#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) + u8 rx_tmp_buf[SPI_TMP_BUFSZ]; + int rcount; + int wcount; + struct davinci_spi_dma dma; + struct davinci_spi_platform_data *pdata; + + void (*get_rx)(u32 rx_data, struct davinci_spi *); + u32 (*get_tx)(struct davinci_spi *); + + u8 bytes_per_word[SPI_MAX_CHIPSELECT]; +}; + +static struct davinci_spi_config davinci_spi_default_cfg; + +static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) +{ + if (dspi->rx) { + u8 *rx = dspi->rx; + *rx++ = (u8)data; + dspi->rx = rx; + } +} + +static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) +{ + if (dspi->rx) { + u16 *rx = dspi->rx; + *rx++ = (u16)data; + dspi->rx = rx; + } +} + +static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) +{ + u32 data = 0; + if (dspi->tx) { + const u8 *tx = dspi->tx; + data = *tx++; + dspi->tx = tx; + } + return data; +} + +static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) +{ + u32 data = 0; + if (dspi->tx) { + const u16 *tx = dspi->tx; + data = *tx++; + dspi->tx = tx; + } + return data; +} + +static inline void set_io_bits(void __iomem *addr, u32 bits) +{ + u32 v = ioread32(addr); + + v |= bits; + iowrite32(v, addr); +} + +static inline void clear_io_bits(void __iomem *addr, u32 bits) +{ + u32 v = ioread32(addr); + + v &= ~bits; + iowrite32(v, addr); +} + +/* + * Interface to control the chip select signal + */ +static void davinci_spi_chipselect(struct spi_device *spi, int value) +{ + struct davinci_spi *dspi; + struct davinci_spi_platform_data *pdata; + u8 chip_sel = spi->chip_select; + u16 spidat1 = CS_DEFAULT; + bool gpio_chipsel = false; + + dspi = spi_master_get_devdata(spi->master); + pdata = dspi->pdata; + + if (pdata->chip_sel && chip_sel < pdata->num_chipselect && + pdata->chip_sel[chip_sel] != SPI_INTERN_CS) + gpio_chipsel = true; + + /* + * Board specific chip select logic decides the polarity and cs + * line for the controller + */ + if (gpio_chipsel) { + if (value == BITBANG_CS_ACTIVE) + gpio_set_value(pdata->chip_sel[chip_sel], 0); + else + gpio_set_value(pdata->chip_sel[chip_sel], 1); + } else { + if (value == BITBANG_CS_ACTIVE) { + spidat1 |= SPIDAT1_CSHOLD_MASK; + spidat1 &= ~(0x1 << chip_sel); + } + + iowrite16(spidat1, dspi->base + SPIDAT1 + 2); + } +} + +/** + * davinci_spi_get_prescale - Calculates the correct prescale value + * @maxspeed_hz: the maximum rate the SPI clock can run at + * + * This function calculates the prescale value that generates a clock rate + * less than or equal to the specified maximum. + * + * Returns: calculated prescale - 1 for easy programming into SPI registers + * or negative error number if valid prescalar cannot be updated. + */ +static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, + u32 max_speed_hz) +{ + int ret; + + ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz); + + if (ret < 3 || ret > 256) + return -EINVAL; + + return ret - 1; +} + +/** + * davinci_spi_setup_transfer - This functions will determine transfer method + * @spi: spi device on which data transfer to be done + * @t: spi transfer in which transfer info is filled + * + * This function determines data transfer method (8/16/32 bit transfer). + * It will also set the SPI Clock Control register according to + * SPI slave device freq. + */ +static int davinci_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + + struct davinci_spi *dspi; + struct davinci_spi_config *spicfg; + u8 bits_per_word = 0; + u32 hz = 0, spifmt = 0, prescale = 0; + + dspi = spi_master_get_devdata(spi->master); + spicfg = (struct davinci_spi_config *)spi->controller_data; + if (!spicfg) + spicfg = &davinci_spi_default_cfg; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* if bits_per_word is not set then set it default */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* + * Assign function pointer to appropriate transfer method + * 8bit, 16bit or 32bit transfer + */ + if (bits_per_word <= 8 && bits_per_word >= 2) { + dspi->get_rx = davinci_spi_rx_buf_u8; + dspi->get_tx = davinci_spi_tx_buf_u8; + dspi->bytes_per_word[spi->chip_select] = 1; + } else if (bits_per_word <= 16 && bits_per_word >= 2) { + dspi->get_rx = davinci_spi_rx_buf_u16; + dspi->get_tx = davinci_spi_tx_buf_u16; + dspi->bytes_per_word[spi->chip_select] = 2; + } else + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + /* Set up SPIFMTn register, unique to this chipselect. */ + + prescale = davinci_spi_get_prescale(dspi, hz); + if (prescale < 0) + return prescale; + + spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); + + if (spi->mode & SPI_LSB_FIRST) + spifmt |= SPIFMT_SHIFTDIR_MASK; + + if (spi->mode & SPI_CPOL) + spifmt |= SPIFMT_POLARITY_MASK; + + if (!(spi->mode & SPI_CPHA)) + spifmt |= SPIFMT_PHASE_MASK; + + /* + * Version 1 hardware supports two basic SPI modes: + * - Standard SPI mode uses 4 pins, with chipselect + * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) + * (distinct from SPI_3WIRE, with just one data wire; + * or similar variants without MOSI or without MISO) + * + * Version 2 hardware supports an optional handshaking signal, + * so it can support two more modes: + * - 5 pin SPI variant is standard SPI plus SPI_READY + * - 4 pin with enable is (SPI_READY | SPI_NO_CS) + */ + + if (dspi->version == SPI_VERSION_2) { + + u32 delay = 0; + + spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) + & SPIFMT_WDELAY_MASK); + + if (spicfg->odd_parity) + spifmt |= SPIFMT_ODD_PARITY_MASK; + + if (spicfg->parity_enable) + spifmt |= SPIFMT_PARITYENA_MASK; + + if (spicfg->timer_disable) { + spifmt |= SPIFMT_DISTIMER_MASK; + } else { + delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) + & SPIDELAY_C2TDELAY_MASK; + delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) + & SPIDELAY_T2CDELAY_MASK; + } + + if (spi->mode & SPI_READY) { + spifmt |= SPIFMT_WAITENA_MASK; + delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) + & SPIDELAY_T2EDELAY_MASK; + delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) + & SPIDELAY_C2EDELAY_MASK; + } + + iowrite32(delay, dspi->base + SPIDELAY); + } + + iowrite32(spifmt, dspi->base + SPIFMT0); + + return 0; +} + +/** + * davinci_spi_setup - This functions will set default transfer method + * @spi: spi device on which data transfer to be done + * + * This functions sets the default transfer method. + */ +static int davinci_spi_setup(struct spi_device *spi) +{ + int retval = 0; + struct davinci_spi *dspi; + struct davinci_spi_platform_data *pdata; + + dspi = spi_master_get_devdata(spi->master); + pdata = dspi->pdata; + + /* if bits per word length is zero then set it default 8 */ + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + if (!(spi->mode & SPI_NO_CS)) { + if ((pdata->chip_sel == NULL) || + (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)) + set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); + + } + + if (spi->mode & SPI_READY) + set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); + + if (spi->mode & SPI_LOOP) + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); + else + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); + + return retval; +} + +static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) +{ + struct device *sdev = dspi->bitbang.master->dev.parent; + + if (int_status & SPIFLG_TIMEOUT_MASK) { + dev_dbg(sdev, "SPI Time-out Error\n"); + return -ETIMEDOUT; + } + if (int_status & SPIFLG_DESYNC_MASK) { + dev_dbg(sdev, "SPI Desynchronization Error\n"); + return -EIO; + } + if (int_status & SPIFLG_BITERR_MASK) { + dev_dbg(sdev, "SPI Bit error\n"); + return -EIO; + } + + if (dspi->version == SPI_VERSION_2) { + if (int_status & SPIFLG_DLEN_ERR_MASK) { + dev_dbg(sdev, "SPI Data Length Error\n"); + return -EIO; + } + if (int_status & SPIFLG_PARERR_MASK) { + dev_dbg(sdev, "SPI Parity Error\n"); + return -EIO; + } + if (int_status & SPIFLG_OVRRUN_MASK) { + dev_dbg(sdev, "SPI Data Overrun error\n"); + return -EIO; + } + if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { + dev_dbg(sdev, "SPI Buffer Init Active\n"); + return -EBUSY; + } + } + + return 0; +} + +/** + * davinci_spi_process_events - check for and handle any SPI controller events + * @dspi: the controller data + * + * This function will check the SPIFLG register and handle any events that are + * detected there + */ +static int davinci_spi_process_events(struct davinci_spi *dspi) +{ + u32 buf, status, errors = 0, spidat1; + + buf = ioread32(dspi->base + SPIBUF); + + if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { + dspi->get_rx(buf & 0xFFFF, dspi); + dspi->rcount--; + } + + status = ioread32(dspi->base + SPIFLG); + + if (unlikely(status & SPIFLG_ERROR_MASK)) { + errors = status & SPIFLG_ERROR_MASK; + goto out; + } + + if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { + spidat1 = ioread32(dspi->base + SPIDAT1); + dspi->wcount--; + spidat1 &= ~0xFFFF; + spidat1 |= 0xFFFF & dspi->get_tx(dspi); + iowrite32(spidat1, dspi->base + SPIDAT1); + } + +out: + return errors; +} + +static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) +{ + struct davinci_spi *dspi = data; + struct davinci_spi_dma *dma = &dspi->dma; + + edma_stop(lch); + + if (status == DMA_COMPLETE) { + if (lch == dma->rx_channel) + dspi->rcount = 0; + if (lch == dma->tx_channel) + dspi->wcount = 0; + } + + if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) + complete(&dspi->done); +} + +/** + * davinci_spi_bufs - functions which will handle transfer data + * @spi: spi device on which data transfer to be done + * @t: spi transfer in which transfer info is filled + * + * This function will put data to be transferred into data register + * of SPI controller and then wait until the completion will be marked + * by the IRQ Handler. + */ +static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct davinci_spi *dspi; + int data_type, ret; + u32 tx_data, spidat1; + u32 errors = 0; + struct davinci_spi_config *spicfg; + struct davinci_spi_platform_data *pdata; + unsigned uninitialized_var(rx_buf_count); + struct device *sdev; + + dspi = spi_master_get_devdata(spi->master); + pdata = dspi->pdata; + spicfg = (struct davinci_spi_config *)spi->controller_data; + if (!spicfg) + spicfg = &davinci_spi_default_cfg; + sdev = dspi->bitbang.master->dev.parent; + + /* convert len to words based on bits_per_word */ + data_type = dspi->bytes_per_word[spi->chip_select]; + + dspi->tx = t->tx_buf; + dspi->rx = t->rx_buf; + dspi->wcount = t->len / data_type; + dspi->rcount = dspi->wcount; + + spidat1 = ioread32(dspi->base + SPIDAT1); + + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); + + INIT_COMPLETION(dspi->done); + + if (spicfg->io_type == SPI_IO_TYPE_INTR) + set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); + + if (spicfg->io_type != SPI_IO_TYPE_DMA) { + /* start the transfer */ + dspi->wcount--; + tx_data = dspi->get_tx(dspi); + spidat1 &= 0xFFFF0000; + spidat1 |= tx_data & 0xFFFF; + iowrite32(spidat1, dspi->base + SPIDAT1); + } else { + struct davinci_spi_dma *dma; + unsigned long tx_reg, rx_reg; + struct edmacc_param param; + void *rx_buf; + + dma = &dspi->dma; + + tx_reg = (unsigned long)dspi->pbase + SPIDAT1; + rx_reg = (unsigned long)dspi->pbase + SPIBUF; + + /* + * Transmit DMA setup + * + * If there is transmit data, map the transmit buffer, set it + * as the source of data and set the source B index to data + * size. If there is no transmit data, set the transmit register + * as the source of data, and set the source B index to zero. + * + * The destination is always the transmit register itself. And + * the destination never increments. + */ + + if (t->tx_buf) { + t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, + dspi->wcount, DMA_TO_DEVICE); + if (dma_mapping_error(&spi->dev, t->tx_dma)) { + dev_dbg(sdev, "Unable to DMA map %d bytes" + "TX buffer\n", dspi->wcount); + return -ENOMEM; + } + } + + param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); + param.src = t->tx_buf ? t->tx_dma : tx_reg; + param.a_b_cnt = dspi->wcount << 16 | data_type; + param.dst = tx_reg; + param.src_dst_bidx = t->tx_buf ? data_type : 0; + param.link_bcntrld = 0xffff; + param.src_dst_cidx = 0; + param.ccnt = 1; + edma_write_slot(dma->tx_channel, ¶m); + edma_link(dma->tx_channel, dma->dummy_param_slot); + + /* + * Receive DMA setup + * + * If there is receive buffer, use it to receive data. If there + * is none provided, use a temporary receive buffer. Set the + * destination B index to 0 so effectively only one byte is used + * in the temporary buffer (address does not increment). + * + * The source of receive data is the receive data register. The + * source address never increments. + */ + + if (t->rx_buf) { + rx_buf = t->rx_buf; + rx_buf_count = dspi->rcount; + } else { + rx_buf = dspi->rx_tmp_buf; + rx_buf_count = sizeof(dspi->rx_tmp_buf); + } + + t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, + DMA_FROM_DEVICE); + if (dma_mapping_error(&spi->dev, t->rx_dma)) { + dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", + rx_buf_count); + if (t->tx_buf) + dma_unmap_single(NULL, t->tx_dma, dspi->wcount, + DMA_TO_DEVICE); + return -ENOMEM; + } + + param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); + param.src = rx_reg; + param.a_b_cnt = dspi->rcount << 16 | data_type; + param.dst = t->rx_dma; + param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; + param.link_bcntrld = 0xffff; + param.src_dst_cidx = 0; + param.ccnt = 1; + edma_write_slot(dma->rx_channel, ¶m); + + if (pdata->cshold_bug) + iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); + + edma_start(dma->rx_channel); + edma_start(dma->tx_channel); + set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); + } + + /* Wait for the transfer to complete */ + if (spicfg->io_type != SPI_IO_TYPE_POLL) { + wait_for_completion_interruptible(&(dspi->done)); + } else { + while (dspi->rcount > 0 || dspi->wcount > 0) { + errors = davinci_spi_process_events(dspi); + if (errors) + break; + cpu_relax(); + } + } + + clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); + if (spicfg->io_type == SPI_IO_TYPE_DMA) { + + if (t->tx_buf) + dma_unmap_single(NULL, t->tx_dma, dspi->wcount, + DMA_TO_DEVICE); + + dma_unmap_single(NULL, t->rx_dma, rx_buf_count, + DMA_FROM_DEVICE); + + clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); + } + + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + + /* + * Check for bit error, desync error,parity error,timeout error and + * receive overflow errors + */ + if (errors) { + ret = davinci_spi_check_error(dspi, errors); + WARN(!ret, "%s: error reported but no error found!\n", + dev_name(&spi->dev)); + return ret; + } + + if (dspi->rcount != 0 || dspi->wcount != 0) { + dev_err(sdev, "SPI data transfer error\n"); + return -EIO; + } + + return t->len; +} + +/** + * davinci_spi_irq - Interrupt handler for SPI Master Controller + * @irq: IRQ number for this SPI Master + * @context_data: structure for SPI Master controller davinci_spi + * + * ISR will determine that interrupt arrives either for READ or WRITE command. + * According to command it will do the appropriate action. It will check + * transfer length and if it is not zero then dispatch transfer command again. + * If transfer length is zero then it will indicate the COMPLETION so that + * davinci_spi_bufs function can go ahead. + */ +static irqreturn_t davinci_spi_irq(s32 irq, void *data) +{ + struct davinci_spi *dspi = data; + int status; + + status = davinci_spi_process_events(dspi); + if (unlikely(status != 0)) + clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); + + if ((!dspi->rcount && !dspi->wcount) || status) + complete(&dspi->done); + + return IRQ_HANDLED; +} + +static int davinci_spi_request_dma(struct davinci_spi *dspi) +{ + int r; + struct davinci_spi_dma *dma = &dspi->dma; + + r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, + dma->eventq); + if (r < 0) { + pr_err("Unable to request DMA channel for SPI RX\n"); + r = -EAGAIN; + goto rx_dma_failed; + } + + r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, + dma->eventq); + if (r < 0) { + pr_err("Unable to request DMA channel for SPI TX\n"); + r = -EAGAIN; + goto tx_dma_failed; + } + + r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); + if (r < 0) { + pr_err("Unable to request SPI TX DMA param slot\n"); + r = -EAGAIN; + goto param_failed; + } + dma->dummy_param_slot = r; + edma_link(dma->dummy_param_slot, dma->dummy_param_slot); + + return 0; +param_failed: + edma_free_channel(dma->tx_channel); +tx_dma_failed: + edma_free_channel(dma->rx_channel); +rx_dma_failed: + return r; +} + +/** + * davinci_spi_probe - probe function for SPI Master Controller + * @pdev: platform_device structure which contains plateform specific data + * + * According to Linux Device Model this function will be invoked by Linux + * with platform_device struct which contains the device specific info. + * This function will map the SPI controller's memory, register IRQ, + * Reset SPI controller and setting its registers to default value. + * It will invoke spi_bitbang_start to create work queue so that client driver + * can register transfer method to work queue. + */ +static int davinci_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct davinci_spi *dspi; + struct davinci_spi_platform_data *pdata; + struct resource *r, *mem; + resource_size_t dma_rx_chan = SPI_NO_RESOURCE; + resource_size_t dma_tx_chan = SPI_NO_RESOURCE; + resource_size_t dma_eventq = SPI_NO_RESOURCE; + int i = 0, ret = 0; + u32 spipc0; + + pdata = pdev->dev.platform_data; + if (pdata == NULL) { + ret = -ENODEV; + goto err; + } + + master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); + if (master == NULL) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(&pdev->dev, master); + + dspi = spi_master_get_devdata(master); + if (dspi == NULL) { + ret = -ENOENT; + goto free_master; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENOENT; + goto free_master; + } + + dspi->pbase = r->start; + dspi->pdata = pdata; + + mem = request_mem_region(r->start, resource_size(r), pdev->name); + if (mem == NULL) { + ret = -EBUSY; + goto free_master; + } + + dspi->base = ioremap(r->start, resource_size(r)); + if (dspi->base == NULL) { + ret = -ENOMEM; + goto release_region; + } + + dspi->irq = platform_get_irq(pdev, 0); + if (dspi->irq <= 0) { + ret = -EINVAL; + goto unmap_io; + } + + ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev), + dspi); + if (ret) + goto unmap_io; + + dspi->bitbang.master = spi_master_get(master); + if (dspi->bitbang.master == NULL) { + ret = -ENODEV; + goto irq_free; + } + + dspi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(dspi->clk)) { + ret = -ENODEV; + goto put_master; + } + clk_enable(dspi->clk); + + master->bus_num = pdev->id; + master->num_chipselect = pdata->num_chipselect; + master->setup = davinci_spi_setup; + + dspi->bitbang.chipselect = davinci_spi_chipselect; + dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; + + dspi->version = pdata->version; + + dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; + if (dspi->version == SPI_VERSION_2) + dspi->bitbang.flags |= SPI_READY; + + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (r) + dma_rx_chan = r->start; + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (r) + dma_tx_chan = r->start; + r = platform_get_resource(pdev, IORESOURCE_DMA, 2); + if (r) + dma_eventq = r->start; + + dspi->bitbang.txrx_bufs = davinci_spi_bufs; + if (dma_rx_chan != SPI_NO_RESOURCE && + dma_tx_chan != SPI_NO_RESOURCE && + dma_eventq != SPI_NO_RESOURCE) { + dspi->dma.rx_channel = dma_rx_chan; + dspi->dma.tx_channel = dma_tx_chan; + dspi->dma.eventq = dma_eventq; + + ret = davinci_spi_request_dma(dspi); + if (ret) + goto free_clk; + + dev_info(&pdev->dev, "DMA: supported\n"); + dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, " + "event queue: %d\n", dma_rx_chan, dma_tx_chan, + dma_eventq); + } + + dspi->get_rx = davinci_spi_rx_buf_u8; + dspi->get_tx = davinci_spi_tx_buf_u8; + + init_completion(&dspi->done); + + /* Reset In/OUT SPI module */ + iowrite32(0, dspi->base + SPIGCR0); + udelay(100); + iowrite32(1, dspi->base + SPIGCR0); + + /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ + spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; + iowrite32(spipc0, dspi->base + SPIPC0); + + /* initialize chip selects */ + if (pdata->chip_sel) { + for (i = 0; i < pdata->num_chipselect; i++) { + if (pdata->chip_sel[i] != SPI_INTERN_CS) + gpio_direction_output(pdata->chip_sel[i], 1); + } + } + + if (pdata->intr_line) + iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); + else + iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); + + iowrite32(CS_DEFAULT, dspi->base + SPIDEF); + + /* master mode default */ + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + + ret = spi_bitbang_start(&dspi->bitbang); + if (ret) + goto free_dma; + + dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); + + return ret; + +free_dma: + edma_free_channel(dspi->dma.tx_channel); + edma_free_channel(dspi->dma.rx_channel); + edma_free_slot(dspi->dma.dummy_param_slot); +free_clk: + clk_disable(dspi->clk); + clk_put(dspi->clk); +put_master: + spi_master_put(master); +irq_free: + free_irq(dspi->irq, dspi); +unmap_io: + iounmap(dspi->base); +release_region: + release_mem_region(dspi->pbase, resource_size(r)); +free_master: + kfree(master); +err: + return ret; +} + +/** + * davinci_spi_remove - remove function for SPI Master Controller + * @pdev: platform_device structure which contains plateform specific data + * + * This function will do the reverse action of davinci_spi_probe function + * It will free the IRQ and SPI controller's memory region. + * It will also call spi_bitbang_stop to destroy the work queue which was + * created by spi_bitbang_start. + */ +static int __exit davinci_spi_remove(struct platform_device *pdev) +{ + struct davinci_spi *dspi; + struct spi_master *master; + struct resource *r; + + master = dev_get_drvdata(&pdev->dev); + dspi = spi_master_get_devdata(master); + + spi_bitbang_stop(&dspi->bitbang); + + clk_disable(dspi->clk); + clk_put(dspi->clk); + spi_master_put(master); + free_irq(dspi->irq, dspi); + iounmap(dspi->base); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(dspi->pbase, resource_size(r)); + + return 0; +} + +static struct platform_driver davinci_spi_driver = { + .driver = { + .name = "spi_davinci", + .owner = THIS_MODULE, + }, + .remove = __exit_p(davinci_spi_remove), +}; + +static int __init davinci_spi_init(void) +{ + return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); +} +module_init(davinci_spi_init); + +static void __exit davinci_spi_exit(void) +{ + platform_driver_unregister(&davinci_spi_driver); +} +module_exit(davinci_spi_exit); + +MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c new file mode 100644 index 00000000000..22af77f9881 --- /dev/null +++ b/drivers/spi/dw_spi.c @@ -0,0 +1,1006 @@ +/* + * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c) + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/delay.h> +#include <linux/slab.h> + +#include <linux/spi/dw_spi.h> +#include <linux/spi/spi.h> + +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> +#endif + +#define START_STATE ((void *)0) +#define RUNNING_STATE ((void *)1) +#define DONE_STATE ((void *)2) +#define ERROR_STATE ((void *)-1) + +#define QUEUE_RUNNING 0 +#define QUEUE_STOPPED 1 + +#define MRST_SPI_DEASSERT 0 +#define MRST_SPI_ASSERT 1 + +/* Slave spi_dev related */ +struct chip_data { + u16 cr0; + u8 cs; /* chip select pin */ + u8 n_bytes; /* current is a 1/2/4 byte op */ + u8 tmode; /* TR/TO/RO/EEPROM */ + u8 type; /* SPI/SSP/MicroWire */ + + u8 poll_mode; /* 1 means use poll mode */ + + u32 dma_width; + u32 rx_threshold; + u32 tx_threshold; + u8 enable_dma; + u8 bits_per_word; + u16 clk_div; /* baud rate divider */ + u32 speed_hz; /* baud rate */ + int (*write)(struct dw_spi *dws); + int (*read)(struct dw_spi *dws); + void (*cs_control)(u32 command); +}; + +#ifdef CONFIG_DEBUG_FS +static int spi_show_regs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define SPI_REGS_BUFSIZE 1024 +static ssize_t spi_show_regs(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dw_spi *dws; + char *buf; + u32 len = 0; + ssize_t ret; + + dws = file->private_data; + + buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL); + if (!buf) + return 0; + + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "MRST SPI0 registers:\n"); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "=================================\n"); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "SER: \t\t0x%08x\n", dw_readl(dws, ser)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "SR: \t\t0x%08x\n", dw_readl(dws, sr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "IMR: \t\t0x%08x\n", dw_readl(dws, imr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "ISR: \t\t0x%08x\n", dw_readl(dws, isr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "=================================\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret; +} + +static const struct file_operations mrst_spi_regs_ops = { + .owner = THIS_MODULE, + .open = spi_show_regs_open, + .read = spi_show_regs, + .llseek = default_llseek, +}; + +static int mrst_spi_debugfs_init(struct dw_spi *dws) +{ + dws->debugfs = debugfs_create_dir("mrst_spi", NULL); + if (!dws->debugfs) + return -ENOMEM; + + debugfs_create_file("registers", S_IFREG | S_IRUGO, + dws->debugfs, (void *)dws, &mrst_spi_regs_ops); + return 0; +} + +static void mrst_spi_debugfs_remove(struct dw_spi *dws) +{ + if (dws->debugfs) + debugfs_remove_recursive(dws->debugfs); +} + +#else +static inline int mrst_spi_debugfs_init(struct dw_spi *dws) +{ + return 0; +} + +static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +static void wait_till_not_busy(struct dw_spi *dws) +{ + unsigned long end = jiffies + 1 + usecs_to_jiffies(5000); + + while (time_before(jiffies, end)) { + if (!(dw_readw(dws, sr) & SR_BUSY)) + return; + cpu_relax(); + } + dev_err(&dws->master->dev, + "DW SPI: Status keeps busy for 5000us after a read/write!\n"); +} + +static void flush(struct dw_spi *dws) +{ + while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) { + dw_readw(dws, dr); + cpu_relax(); + } + + wait_till_not_busy(dws); +} + +static int null_writer(struct dw_spi *dws) +{ + u8 n_bytes = dws->n_bytes; + + if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) + || (dws->tx == dws->tx_end)) + return 0; + dw_writew(dws, dr, 0); + dws->tx += n_bytes; + + wait_till_not_busy(dws); + return 1; +} + +static int null_reader(struct dw_spi *dws) +{ + u8 n_bytes = dws->n_bytes; + + while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) + && (dws->rx < dws->rx_end)) { + dw_readw(dws, dr); + dws->rx += n_bytes; + } + wait_till_not_busy(dws); + return dws->rx == dws->rx_end; +} + +static int u8_writer(struct dw_spi *dws) +{ + if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) + || (dws->tx == dws->tx_end)) + return 0; + + dw_writew(dws, dr, *(u8 *)(dws->tx)); + ++dws->tx; + + wait_till_not_busy(dws); + return 1; +} + +static int u8_reader(struct dw_spi *dws) +{ + while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) + && (dws->rx < dws->rx_end)) { + *(u8 *)(dws->rx) = dw_readw(dws, dr); + ++dws->rx; + } + + wait_till_not_busy(dws); + return dws->rx == dws->rx_end; +} + +static int u16_writer(struct dw_spi *dws) +{ + if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) + || (dws->tx == dws->tx_end)) + return 0; + + dw_writew(dws, dr, *(u16 *)(dws->tx)); + dws->tx += 2; + + wait_till_not_busy(dws); + return 1; +} + +static int u16_reader(struct dw_spi *dws) +{ + u16 temp; + + while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) + && (dws->rx < dws->rx_end)) { + temp = dw_readw(dws, dr); + *(u16 *)(dws->rx) = temp; + dws->rx += 2; + } + + wait_till_not_busy(dws); + return dws->rx == dws->rx_end; +} + +static void *next_transfer(struct dw_spi *dws) +{ + struct spi_message *msg = dws->cur_msg; + struct spi_transfer *trans = dws->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + dws->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, + transfer_list); + return RUNNING_STATE; + } else + return DONE_STATE; +} + +/* + * Note: first step is the protocol driver prepares + * a dma-capable memory, and this func just need translate + * the virt addr to physical + */ +static int map_dma_buffers(struct dw_spi *dws) +{ + if (!dws->cur_msg->is_dma_mapped + || !dws->dma_inited + || !dws->cur_chip->enable_dma + || !dws->dma_ops) + return 0; + + if (dws->cur_transfer->tx_dma) + dws->tx_dma = dws->cur_transfer->tx_dma; + + if (dws->cur_transfer->rx_dma) + dws->rx_dma = dws->cur_transfer->rx_dma; + + return 1; +} + +/* Caller already set message->status; dma and pio irqs are blocked */ +static void giveback(struct dw_spi *dws) +{ + struct spi_transfer *last_transfer; + unsigned long flags; + struct spi_message *msg; + + spin_lock_irqsave(&dws->lock, flags); + msg = dws->cur_msg; + dws->cur_msg = NULL; + dws->cur_transfer = NULL; + dws->prev_chip = dws->cur_chip; + dws->cur_chip = NULL; + dws->dma_mapped = 0; + queue_work(dws->workqueue, &dws->pump_messages); + spin_unlock_irqrestore(&dws->lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, + transfer_list); + + if (!last_transfer->cs_change && dws->cs_control) + dws->cs_control(MRST_SPI_DEASSERT); + + msg->state = NULL; + if (msg->complete) + msg->complete(msg->context); +} + +static void int_error_stop(struct dw_spi *dws, const char *msg) +{ + /* Stop and reset hw */ + flush(dws); + spi_enable_chip(dws, 0); + + dev_err(&dws->master->dev, "%s\n", msg); + dws->cur_msg->state = ERROR_STATE; + tasklet_schedule(&dws->pump_transfers); +} + +void dw_spi_xfer_done(struct dw_spi *dws) +{ + /* Update total byte transfered return count actual bytes read */ + dws->cur_msg->actual_length += dws->len; + + /* Move to next transfer */ + dws->cur_msg->state = next_transfer(dws); + + /* Handle end of message */ + if (dws->cur_msg->state == DONE_STATE) { + dws->cur_msg->status = 0; + giveback(dws); + } else + tasklet_schedule(&dws->pump_transfers); +} +EXPORT_SYMBOL_GPL(dw_spi_xfer_done); + +static irqreturn_t interrupt_transfer(struct dw_spi *dws) +{ + u16 irq_status, irq_mask = 0x3f; + u32 int_level = dws->fifo_len / 2; + u32 left; + + irq_status = dw_readw(dws, isr) & irq_mask; + /* Error handling */ + if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { + dw_readw(dws, txoicr); + dw_readw(dws, rxoicr); + dw_readw(dws, rxuicr); + int_error_stop(dws, "interrupt_transfer: fifo overrun"); + return IRQ_HANDLED; + } + + if (irq_status & SPI_INT_TXEI) { + spi_mask_intr(dws, SPI_INT_TXEI); + + left = (dws->tx_end - dws->tx) / dws->n_bytes; + left = (left > int_level) ? int_level : left; + + while (left--) + dws->write(dws); + dws->read(dws); + + /* Re-enable the IRQ if there is still data left to tx */ + if (dws->tx_end > dws->tx) + spi_umask_intr(dws, SPI_INT_TXEI); + else + dw_spi_xfer_done(dws); + } + + return IRQ_HANDLED; +} + +static irqreturn_t dw_spi_irq(int irq, void *dev_id) +{ + struct dw_spi *dws = dev_id; + u16 irq_status, irq_mask = 0x3f; + + irq_status = dw_readw(dws, isr) & irq_mask; + if (!irq_status) + return IRQ_NONE; + + if (!dws->cur_msg) { + spi_mask_intr(dws, SPI_INT_TXEI); + /* Never fail */ + return IRQ_HANDLED; + } + + return dws->transfer_handler(dws); +} + +/* Must be called inside pump_transfers() */ +static void poll_transfer(struct dw_spi *dws) +{ + while (dws->write(dws)) + dws->read(dws); + /* + * There is a possibility that the last word of a transaction + * will be lost if data is not ready. Re-read to solve this issue. + */ + dws->read(dws); + + dw_spi_xfer_done(dws); +} + +static void pump_transfers(unsigned long data) +{ + struct dw_spi *dws = (struct dw_spi *)data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct spi_device *spi = NULL; + struct chip_data *chip = NULL; + u8 bits = 0; + u8 imask = 0; + u8 cs_change = 0; + u16 txint_level = 0; + u16 clk_div = 0; + u32 speed = 0; + u32 cr0 = 0; + + /* Get current state information */ + message = dws->cur_msg; + transfer = dws->cur_transfer; + chip = dws->cur_chip; + spi = message->spi; + + if (unlikely(!chip->clk_div)) + chip->clk_div = dws->max_freq / chip->speed_hz; + + if (message->state == ERROR_STATE) { + message->status = -EIO; + goto early_exit; + } + + /* Handle end of message */ + if (message->state == DONE_STATE) { + message->status = 0; + goto early_exit; + } + + /* Delay if requested at end of transfer*/ + if (message->state == RUNNING_STATE) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, + transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + } + + dws->n_bytes = chip->n_bytes; + dws->dma_width = chip->dma_width; + dws->cs_control = chip->cs_control; + + dws->rx_dma = transfer->rx_dma; + dws->tx_dma = transfer->tx_dma; + dws->tx = (void *)transfer->tx_buf; + dws->tx_end = dws->tx + transfer->len; + dws->rx = transfer->rx_buf; + dws->rx_end = dws->rx + transfer->len; + dws->write = dws->tx ? chip->write : null_writer; + dws->read = dws->rx ? chip->read : null_reader; + dws->cs_change = transfer->cs_change; + dws->len = dws->cur_transfer->len; + if (chip != dws->prev_chip) + cs_change = 1; + + cr0 = chip->cr0; + + /* Handle per transfer options for bpw and speed */ + if (transfer->speed_hz) { + speed = chip->speed_hz; + + if (transfer->speed_hz != speed) { + speed = transfer->speed_hz; + if (speed > dws->max_freq) { + printk(KERN_ERR "MRST SPI0: unsupported" + "freq: %dHz\n", speed); + message->status = -EIO; + goto early_exit; + } + + /* clk_div doesn't support odd number */ + clk_div = dws->max_freq / speed; + clk_div = (clk_div + 1) & 0xfffe; + + chip->speed_hz = speed; + chip->clk_div = clk_div; + } + } + if (transfer->bits_per_word) { + bits = transfer->bits_per_word; + + switch (bits) { + case 8: + dws->n_bytes = 1; + dws->dma_width = 1; + dws->read = (dws->read != null_reader) ? + u8_reader : null_reader; + dws->write = (dws->write != null_writer) ? + u8_writer : null_writer; + break; + case 16: + dws->n_bytes = 2; + dws->dma_width = 2; + dws->read = (dws->read != null_reader) ? + u16_reader : null_reader; + dws->write = (dws->write != null_writer) ? + u16_writer : null_writer; + break; + default: + printk(KERN_ERR "MRST SPI0: unsupported bits:" + "%db\n", bits); + message->status = -EIO; + goto early_exit; + } + + cr0 = (bits - 1) + | (chip->type << SPI_FRF_OFFSET) + | (spi->mode << SPI_MODE_OFFSET) + | (chip->tmode << SPI_TMOD_OFFSET); + } + message->state = RUNNING_STATE; + + /* + * Adjust transfer mode if necessary. Requires platform dependent + * chipselect mechanism. + */ + if (dws->cs_control) { + if (dws->rx && dws->tx) + chip->tmode = SPI_TMOD_TR; + else if (dws->rx) + chip->tmode = SPI_TMOD_RO; + else + chip->tmode = SPI_TMOD_TO; + + cr0 &= ~SPI_TMOD_MASK; + cr0 |= (chip->tmode << SPI_TMOD_OFFSET); + } + + /* Check if current transfer is a DMA transaction */ + dws->dma_mapped = map_dma_buffers(dws); + + /* + * Interrupt mode + * we only need set the TXEI IRQ, as TX/RX always happen syncronizely + */ + if (!dws->dma_mapped && !chip->poll_mode) { + int templen = dws->len / dws->n_bytes; + txint_level = dws->fifo_len / 2; + txint_level = (templen > txint_level) ? txint_level : templen; + + imask |= SPI_INT_TXEI; + dws->transfer_handler = interrupt_transfer; + } + + /* + * Reprogram registers only if + * 1. chip select changes + * 2. clk_div is changed + * 3. control value changes + */ + if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { + spi_enable_chip(dws, 0); + + if (dw_readw(dws, ctrl0) != cr0) + dw_writew(dws, ctrl0, cr0); + + spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); + spi_chip_sel(dws, spi->chip_select); + + /* Set the interrupt mask, for poll mode just disable all int */ + spi_mask_intr(dws, 0xff); + if (imask) + spi_umask_intr(dws, imask); + if (txint_level) + dw_writew(dws, txfltr, txint_level); + + spi_enable_chip(dws, 1); + if (cs_change) + dws->prev_chip = chip; + } + + if (dws->dma_mapped) + dws->dma_ops->dma_transfer(dws, cs_change); + + if (chip->poll_mode) + poll_transfer(dws); + + return; + +early_exit: + giveback(dws); + return; +} + +static void pump_messages(struct work_struct *work) +{ + struct dw_spi *dws = + container_of(work, struct dw_spi, pump_messages); + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&dws->lock, flags); + if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) { + dws->busy = 0; + spin_unlock_irqrestore(&dws->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (dws->cur_msg) { + spin_unlock_irqrestore(&dws->lock, flags); + return; + } + + /* Extract head of queue */ + dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue); + list_del_init(&dws->cur_msg->queue); + + /* Initial message state*/ + dws->cur_msg->state = START_STATE; + dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, + struct spi_transfer, + transfer_list); + dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&dws->pump_transfers); + + dws->busy = 1; + spin_unlock_irqrestore(&dws->lock, flags); +} + +/* spi_device use this to queue in their spi_msg */ +static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct dw_spi *dws = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&dws->lock, flags); + + if (dws->run == QUEUE_STOPPED) { + spin_unlock_irqrestore(&dws->lock, flags); + return -ESHUTDOWN; + } + + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + list_add_tail(&msg->queue, &dws->queue); + + if (dws->run == QUEUE_RUNNING && !dws->busy) { + + if (dws->cur_transfer || dws->cur_msg) + queue_work(dws->workqueue, + &dws->pump_messages); + else { + /* If no other data transaction in air, just go */ + spin_unlock_irqrestore(&dws->lock, flags); + pump_messages(&dws->pump_messages); + return 0; + } + } + + spin_unlock_irqrestore(&dws->lock, flags); + return 0; +} + +/* This may be called twice for each spi dev */ +static int dw_spi_setup(struct spi_device *spi) +{ + struct dw_spi_chip *chip_info = NULL; + struct chip_data *chip; + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) + return -EINVAL; + + /* Only alloc on first setup */ + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + } + + /* + * Protocol drivers may change the chip settings, so... + * if chip_info exists, use it + */ + chip_info = spi->controller_data; + + /* chip_info doesn't always exist */ + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + + chip->rx_threshold = 0; + chip->tx_threshold = 0; + + chip->enable_dma = chip_info->enable_dma; + } + + if (spi->bits_per_word <= 8) { + chip->n_bytes = 1; + chip->dma_width = 1; + chip->read = u8_reader; + chip->write = u8_writer; + } else if (spi->bits_per_word <= 16) { + chip->n_bytes = 2; + chip->dma_width = 2; + chip->read = u16_reader; + chip->write = u16_writer; + } else { + /* Never take >16b case for MRST SPIC */ + dev_err(&spi->dev, "invalid wordsize\n"); + return -EINVAL; + } + chip->bits_per_word = spi->bits_per_word; + + if (!spi->max_speed_hz) { + dev_err(&spi->dev, "No max speed HZ parameter\n"); + return -EINVAL; + } + chip->speed_hz = spi->max_speed_hz; + + chip->tmode = 0; /* Tx & Rx */ + /* Default SPI mode is SCPOL = 0, SCPH = 0 */ + chip->cr0 = (chip->bits_per_word - 1) + | (chip->type << SPI_FRF_OFFSET) + | (spi->mode << SPI_MODE_OFFSET) + | (chip->tmode << SPI_TMOD_OFFSET); + + spi_set_ctldata(spi, chip); + return 0; +} + +static void dw_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + kfree(chip); +} + +static int __devinit init_queue(struct dw_spi *dws) +{ + INIT_LIST_HEAD(&dws->queue); + spin_lock_init(&dws->lock); + + dws->run = QUEUE_STOPPED; + dws->busy = 0; + + tasklet_init(&dws->pump_transfers, + pump_transfers, (unsigned long)dws); + + INIT_WORK(&dws->pump_messages, pump_messages); + dws->workqueue = create_singlethread_workqueue( + dev_name(dws->master->dev.parent)); + if (dws->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static int start_queue(struct dw_spi *dws) +{ + unsigned long flags; + + spin_lock_irqsave(&dws->lock, flags); + + if (dws->run == QUEUE_RUNNING || dws->busy) { + spin_unlock_irqrestore(&dws->lock, flags); + return -EBUSY; + } + + dws->run = QUEUE_RUNNING; + dws->cur_msg = NULL; + dws->cur_transfer = NULL; + dws->cur_chip = NULL; + dws->prev_chip = NULL; + spin_unlock_irqrestore(&dws->lock, flags); + + queue_work(dws->workqueue, &dws->pump_messages); + + return 0; +} + +static int stop_queue(struct dw_spi *dws) +{ + unsigned long flags; + unsigned limit = 50; + int status = 0; + + spin_lock_irqsave(&dws->lock, flags); + dws->run = QUEUE_STOPPED; + while (!list_empty(&dws->queue) && dws->busy && limit--) { + spin_unlock_irqrestore(&dws->lock, flags); + msleep(10); + spin_lock_irqsave(&dws->lock, flags); + } + + if (!list_empty(&dws->queue) || dws->busy) + status = -EBUSY; + spin_unlock_irqrestore(&dws->lock, flags); + + return status; +} + +static int destroy_queue(struct dw_spi *dws) +{ + int status; + + status = stop_queue(dws); + if (status != 0) + return status; + destroy_workqueue(dws->workqueue); + return 0; +} + +/* Restart the controller, disable all interrupts, clean rx fifo */ +static void spi_hw_init(struct dw_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_mask_intr(dws, 0xff); + spi_enable_chip(dws, 1); + flush(dws); + + /* + * Try to detect the FIFO depth if not set by interface driver, + * the depth could be from 2 to 256 from HW spec + */ + if (!dws->fifo_len) { + u32 fifo; + for (fifo = 2; fifo <= 257; fifo++) { + dw_writew(dws, txfltr, fifo); + if (fifo != dw_readw(dws, txfltr)) + break; + } + + dws->fifo_len = (fifo == 257) ? 0 : fifo; + dw_writew(dws, txfltr, 0); + } +} + +int __devinit dw_spi_add_host(struct dw_spi *dws) +{ + struct spi_master *master; + int ret; + + BUG_ON(dws == NULL); + + master = spi_alloc_master(dws->parent_dev, 0); + if (!master) { + ret = -ENOMEM; + goto exit; + } + + dws->master = master; + dws->type = SSI_MOTO_SPI; + dws->prev_chip = NULL; + dws->dma_inited = 0; + dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); + + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, + "dw_spi", dws); + if (ret < 0) { + dev_err(&master->dev, "can not get IRQ\n"); + goto err_free_master; + } + + master->mode_bits = SPI_CPOL | SPI_CPHA; + master->bus_num = dws->bus_num; + master->num_chipselect = dws->num_cs; + master->cleanup = dw_spi_cleanup; + master->setup = dw_spi_setup; + master->transfer = dw_spi_transfer; + + /* Basic HW init */ + spi_hw_init(dws); + + if (dws->dma_ops && dws->dma_ops->dma_init) { + ret = dws->dma_ops->dma_init(dws); + if (ret) { + dev_warn(&master->dev, "DMA init failed\n"); + dws->dma_inited = 0; + } + } + + /* Initial and start queue */ + ret = init_queue(dws); + if (ret) { + dev_err(&master->dev, "problem initializing queue\n"); + goto err_diable_hw; + } + ret = start_queue(dws); + if (ret) { + dev_err(&master->dev, "problem starting queue\n"); + goto err_diable_hw; + } + + spi_master_set_devdata(master, dws); + ret = spi_register_master(master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_queue_alloc; + } + + mrst_spi_debugfs_init(dws); + return 0; + +err_queue_alloc: + destroy_queue(dws); + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); +err_diable_hw: + spi_enable_chip(dws, 0); + free_irq(dws->irq, dws); +err_free_master: + spi_master_put(master); +exit: + return ret; +} +EXPORT_SYMBOL_GPL(dw_spi_add_host); + +void __devexit dw_spi_remove_host(struct dw_spi *dws) +{ + int status = 0; + + if (!dws) + return; + mrst_spi_debugfs_remove(dws); + + /* Remove the queue */ + status = destroy_queue(dws); + if (status != 0) + dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " + "complete, message memory not freed\n"); + + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); + spi_enable_chip(dws, 0); + /* Disable clk */ + spi_set_clk(dws, 0); + free_irq(dws->irq, dws); + + /* Disconnect from the SPI framework */ + spi_unregister_master(dws->master); +} +EXPORT_SYMBOL_GPL(dw_spi_remove_host); + +int dw_spi_suspend_host(struct dw_spi *dws) +{ + int ret = 0; + + ret = stop_queue(dws); + if (ret) + return ret; + spi_enable_chip(dws, 0); + spi_set_clk(dws, 0); + return ret; +} +EXPORT_SYMBOL_GPL(dw_spi_suspend_host); + +int dw_spi_resume_host(struct dw_spi *dws) +{ + int ret; + + spi_hw_init(dws); + ret = start_queue(dws); + if (ret) + dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(dw_spi_resume_host); + +MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); +MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/dw_spi_mid.c new file mode 100644 index 00000000000..c91c966e071 --- /dev/null +++ b/drivers/spi/dw_spi_mid.c @@ -0,0 +1,223 @@ +/* + * dw_spi_mid.c - special handling for DW core on Intel MID platform + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/spi/dw_spi.h> + +#ifdef CONFIG_SPI_DW_MID_DMA +#include <linux/intel_mid_dma.h> +#include <linux/pci.h> + +struct mid_dma { + struct intel_mid_dma_slave dmas_tx; + struct intel_mid_dma_slave dmas_rx; +}; + +static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct dw_spi *dws = param; + + return dws->dmac && (&dws->dmac->dev == chan->device->dev); +} + +static int mid_spi_dma_init(struct dw_spi *dws) +{ + struct mid_dma *dw_dma = dws->dma_priv; + struct intel_mid_dma_slave *rxs, *txs; + dma_cap_mask_t mask; + + /* + * Get pci device for DMA controller, currently it could only + * be the DMA controller of either Moorestown or Medfield + */ + dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL); + if (!dws->dmac) + dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* 1. Init rx channel */ + dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); + if (!dws->rxchan) + goto err_exit; + rxs = &dw_dma->dmas_rx; + rxs->hs_mode = LNW_DMA_HW_HS; + rxs->cfg_mode = LNW_DMA_PER_TO_MEM; + dws->rxchan->private = rxs; + + /* 2. Init tx channel */ + dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); + if (!dws->txchan) + goto free_rxchan; + txs = &dw_dma->dmas_tx; + txs->hs_mode = LNW_DMA_HW_HS; + txs->cfg_mode = LNW_DMA_MEM_TO_PER; + dws->txchan->private = txs; + + dws->dma_inited = 1; + return 0; + +free_rxchan: + dma_release_channel(dws->rxchan); +err_exit: + return -1; + +} + +static void mid_spi_dma_exit(struct dw_spi *dws) +{ + dma_release_channel(dws->txchan); + dma_release_channel(dws->rxchan); +} + +/* + * dws->dma_chan_done is cleared before the dma transfer starts, + * callback for rx/tx channel will each increment it by 1. + * Reaching 2 means the whole spi transaction is done. + */ +static void dw_spi_dma_done(void *arg) +{ + struct dw_spi *dws = arg; + + if (++dws->dma_chan_done != 2) + return; + dw_spi_xfer_done(dws); +} + +static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) +{ + struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL; + struct dma_chan *txchan, *rxchan; + struct dma_slave_config txconf, rxconf; + u16 dma_ctrl = 0; + + /* 1. setup DMA related registers */ + if (cs_change) { + spi_enable_chip(dws, 0); + dw_writew(dws, dmardlr, 0xf); + dw_writew(dws, dmatdlr, 0x10); + if (dws->tx_dma) + dma_ctrl |= 0x2; + if (dws->rx_dma) + dma_ctrl |= 0x1; + dw_writew(dws, dmacr, dma_ctrl); + spi_enable_chip(dws, 1); + } + + dws->dma_chan_done = 0; + txchan = dws->txchan; + rxchan = dws->rxchan; + + /* 2. Prepare the TX dma transfer */ + txconf.direction = DMA_TO_DEVICE; + txconf.dst_addr = dws->dma_addr; + txconf.dst_maxburst = LNW_DMA_MSIZE_16; + txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + + txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, + (unsigned long) &txconf); + + memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); + dws->tx_sgl.dma_address = dws->tx_dma; + dws->tx_sgl.length = dws->len; + + txdesc = txchan->device->device_prep_slave_sg(txchan, + &dws->tx_sgl, + 1, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + txdesc->callback = dw_spi_dma_done; + txdesc->callback_param = dws; + + /* 3. Prepare the RX dma transfer */ + rxconf.direction = DMA_FROM_DEVICE; + rxconf.src_addr = dws->dma_addr; + rxconf.src_maxburst = LNW_DMA_MSIZE_16; + rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + + rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, + (unsigned long) &rxconf); + + memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); + dws->rx_sgl.dma_address = dws->rx_dma; + dws->rx_sgl.length = dws->len; + + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, + &dws->rx_sgl, + 1, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + rxdesc->callback = dw_spi_dma_done; + rxdesc->callback_param = dws; + + /* rx must be started before tx due to spi instinct */ + rxdesc->tx_submit(rxdesc); + txdesc->tx_submit(txdesc); + return 0; +} + +static struct dw_spi_dma_ops mid_dma_ops = { + .dma_init = mid_spi_dma_init, + .dma_exit = mid_spi_dma_exit, + .dma_transfer = mid_spi_dma_transfer, +}; +#endif + +/* Some specific info for SPI0 controller on Moorestown */ + +/* HW info for MRST CLk Control Unit, one 32b reg */ +#define MRST_SPI_CLK_BASE 100000000 /* 100m */ +#define MRST_CLK_SPI0_REG 0xff11d86c +#define CLK_SPI_BDIV_OFFSET 0 +#define CLK_SPI_BDIV_MASK 0x00000007 +#define CLK_SPI_CDIV_OFFSET 9 +#define CLK_SPI_CDIV_MASK 0x00000e00 +#define CLK_SPI_DISABLE_OFFSET 8 + +int dw_spi_mid_init(struct dw_spi *dws) +{ + u32 *clk_reg, clk_cdiv; + + clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); + if (!clk_reg) + return -ENOMEM; + + /* get SPI controller operating freq info */ + clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; + dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); + iounmap(clk_reg); + + dws->num_cs = 16; + dws->fifo_len = 40; /* FIFO has 40 words buffer */ + +#ifdef CONFIG_SPI_DW_MID_DMA + dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); + if (!dws->dma_priv) + return -ENOMEM; + dws->dma_ops = &mid_dma_ops; +#endif + return 0; +} diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c new file mode 100644 index 00000000000..db35bd9c1b2 --- /dev/null +++ b/drivers/spi/dw_spi_mmio.c @@ -0,0 +1,148 @@ +/* + * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core + * + * Copyright (c) 2010, Octasic semiconductor. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spi/dw_spi.h> +#include <linux/spi/spi.h> + +#define DRIVER_NAME "dw_spi_mmio" + +struct dw_spi_mmio { + struct dw_spi dws; + struct clk *clk; +}; + +static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) +{ + struct dw_spi_mmio *dwsmmio; + struct dw_spi *dws; + struct resource *mem, *ioarea; + int ret; + + dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); + if (!dwsmmio) { + ret = -ENOMEM; + goto err_end; + } + + dws = &dwsmmio->dws; + + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + ret = -EINVAL; + goto err_kfree; + } + + ioarea = request_mem_region(mem->start, resource_size(mem), + pdev->name); + if (!ioarea) { + dev_err(&pdev->dev, "SPI region already claimed\n"); + ret = -EBUSY; + goto err_kfree; + } + + dws->regs = ioremap_nocache(mem->start, resource_size(mem)); + if (!dws->regs) { + dev_err(&pdev->dev, "SPI region already mapped\n"); + ret = -ENOMEM; + goto err_release_reg; + } + + dws->irq = platform_get_irq(pdev, 0); + if (dws->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + ret = dws->irq; /* -ENXIO */ + goto err_unmap; + } + + dwsmmio->clk = clk_get(&pdev->dev, NULL); + if (!dwsmmio->clk) { + ret = -ENODEV; + goto err_irq; + } + clk_enable(dwsmmio->clk); + + dws->parent_dev = &pdev->dev; + dws->bus_num = 0; + dws->num_cs = 4; + dws->max_freq = clk_get_rate(dwsmmio->clk); + + ret = dw_spi_add_host(dws); + if (ret) + goto err_clk; + + platform_set_drvdata(pdev, dwsmmio); + return 0; + +err_clk: + clk_disable(dwsmmio->clk); + clk_put(dwsmmio->clk); + dwsmmio->clk = NULL; +err_irq: + free_irq(dws->irq, dws); +err_unmap: + iounmap(dws->regs); +err_release_reg: + release_mem_region(mem->start, resource_size(mem)); +err_kfree: + kfree(dwsmmio); +err_end: + return ret; +} + +static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) +{ + struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); + struct resource *mem; + + platform_set_drvdata(pdev, NULL); + + clk_disable(dwsmmio->clk); + clk_put(dwsmmio->clk); + dwsmmio->clk = NULL; + + free_irq(dwsmmio->dws.irq, &dwsmmio->dws); + dw_spi_remove_host(&dwsmmio->dws); + iounmap(dwsmmio->dws.regs); + kfree(dwsmmio); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + return 0; +} + +static struct platform_driver dw_spi_mmio_driver = { + .remove = __devexit_p(dw_spi_mmio_remove), + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init dw_spi_mmio_init(void) +{ + return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); +} +module_init(dw_spi_mmio_init); + +static void __exit dw_spi_mmio_exit(void) +{ + platform_driver_unregister(&dw_spi_mmio_driver); +} +module_exit(dw_spi_mmio_exit); + +MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>"); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c new file mode 100644 index 00000000000..49ec3aa1219 --- /dev/null +++ b/drivers/spi/dw_spi_pci.c @@ -0,0 +1,180 @@ +/* + * dw_spi_pci.c - PCI interface driver for DW SPI Core + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spi/dw_spi.h> +#include <linux/spi/spi.h> + +#define DRIVER_NAME "dw_spi_pci" + +struct dw_spi_pci { + struct pci_dev *pdev; + struct dw_spi dws; +}; + +static int __devinit spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct dw_spi_pci *dwpci; + struct dw_spi *dws; + int pci_bar = 0; + int ret; + + printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n", + pdev->vendor, pdev->device); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL); + if (!dwpci) { + ret = -ENOMEM; + goto err_disable; + } + + dwpci->pdev = pdev; + dws = &dwpci->dws; + + /* Get basic io resource and map it */ + dws->paddr = pci_resource_start(pdev, pci_bar); + dws->iolen = pci_resource_len(pdev, pci_bar); + + ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); + if (ret) + goto err_kfree; + + dws->regs = ioremap_nocache((unsigned long)dws->paddr, + pci_resource_len(pdev, pci_bar)); + if (!dws->regs) { + ret = -ENOMEM; + goto err_release_reg; + } + + dws->parent_dev = &pdev->dev; + dws->bus_num = 0; + dws->num_cs = 4; + dws->irq = pdev->irq; + + /* + * Specific handling for Intel MID paltforms, like dma setup, + * clock rate, FIFO depth. + */ + if (pdev->device == 0x0800) { + ret = dw_spi_mid_init(dws); + if (ret) + goto err_unmap; + } + + ret = dw_spi_add_host(dws); + if (ret) + goto err_unmap; + + /* PCI hook and SPI hook use the same drv data */ + pci_set_drvdata(pdev, dwpci); + return 0; + +err_unmap: + iounmap(dws->regs); +err_release_reg: + pci_release_region(pdev, pci_bar); +err_kfree: + kfree(dwpci); +err_disable: + pci_disable_device(pdev); + return ret; +} + +static void __devexit spi_pci_remove(struct pci_dev *pdev) +{ + struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + dw_spi_remove_host(&dwpci->dws); + iounmap(dwpci->dws.regs); + pci_release_region(pdev, 0); + kfree(dwpci); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int spi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); + int ret; + + ret = dw_spi_suspend_host(&dwpci->dws); + if (ret) + return ret; + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return ret; +} + +static int spi_resume(struct pci_dev *pdev) +{ + struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + return dw_spi_resume_host(&dwpci->dws); +} +#else +#define spi_suspend NULL +#define spi_resume NULL +#endif + +static const struct pci_device_id pci_ids[] __devinitdata = { + /* Intel MID platform SPI controller 0 */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, + {}, +}; + +static struct pci_driver dw_spi_driver = { + .name = DRIVER_NAME, + .id_table = pci_ids, + .probe = spi_pci_probe, + .remove = __devexit_p(spi_pci_remove), + .suspend = spi_suspend, + .resume = spi_resume, +}; + +static int __init mrst_spi_init(void) +{ + return pci_register_driver(&dw_spi_driver); +} + +static void __exit mrst_spi_exit(void) +{ + pci_unregister_driver(&dw_spi_driver); +} + +module_init(mrst_spi_init); +module_exit(mrst_spi_exit); + +MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); +MODULE_DESCRIPTION("PCI interface driver for DW SPI Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c new file mode 100644 index 00000000000..0ba35df9a6d --- /dev/null +++ b/drivers/spi/ep93xx_spi.c @@ -0,0 +1,938 @@ +/* + * Driver for Cirrus Logic EP93xx SPI controller. + * + * Copyright (c) 2010 Mika Westerberg + * + * Explicit FIFO handling code was inspired by amba-pl022 driver. + * + * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. + * + * For more information about the SPI controller see documentation on Cirrus + * Logic web site: + * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/sched.h> +#include <linux/spi/spi.h> + +#include <mach/ep93xx_spi.h> + +#define SSPCR0 0x0000 +#define SSPCR0_MODE_SHIFT 6 +#define SSPCR0_SCR_SHIFT 8 + +#define SSPCR1 0x0004 +#define SSPCR1_RIE BIT(0) +#define SSPCR1_TIE BIT(1) +#define SSPCR1_RORIE BIT(2) +#define SSPCR1_LBM BIT(3) +#define SSPCR1_SSE BIT(4) +#define SSPCR1_MS BIT(5) +#define SSPCR1_SOD BIT(6) + +#define SSPDR 0x0008 + +#define SSPSR 0x000c +#define SSPSR_TFE BIT(0) +#define SSPSR_TNF BIT(1) +#define SSPSR_RNE BIT(2) +#define SSPSR_RFF BIT(3) +#define SSPSR_BSY BIT(4) +#define SSPCPSR 0x0010 + +#define SSPIIR 0x0014 +#define SSPIIR_RIS BIT(0) +#define SSPIIR_TIS BIT(1) +#define SSPIIR_RORIS BIT(2) +#define SSPICR SSPIIR + +/* timeout in milliseconds */ +#define SPI_TIMEOUT 5 +/* maximum depth of RX/TX FIFO */ +#define SPI_FIFO_SIZE 8 + +/** + * struct ep93xx_spi - EP93xx SPI controller structure + * @lock: spinlock that protects concurrent accesses to fields @running, + * @current_msg and @msg_queue + * @pdev: pointer to platform device + * @clk: clock for the controller + * @regs_base: pointer to ioremap()'d registers + * @irq: IRQ number used by the driver + * @min_rate: minimum clock rate (in Hz) supported by the controller + * @max_rate: maximum clock rate (in Hz) supported by the controller + * @running: is the queue running + * @wq: workqueue used by the driver + * @msg_work: work that is queued for the driver + * @wait: wait here until given transfer is completed + * @msg_queue: queue for the messages + * @current_msg: message that is currently processed (or %NULL if none) + * @tx: current byte in transfer to transmit + * @rx: current byte in transfer to receive + * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one + * frame decreases this level and sending one frame increases it. + * + * This structure holds EP93xx SPI controller specific information. When + * @running is %true, driver accepts transfer requests from protocol drivers. + * @current_msg is used to hold pointer to the message that is currently + * processed. If @current_msg is %NULL, it means that no processing is going + * on. + * + * Most of the fields are only written once and they can be accessed without + * taking the @lock. Fields that are accessed concurrently are: @current_msg, + * @running, and @msg_queue. + */ +struct ep93xx_spi { + spinlock_t lock; + const struct platform_device *pdev; + struct clk *clk; + void __iomem *regs_base; + int irq; + unsigned long min_rate; + unsigned long max_rate; + bool running; + struct workqueue_struct *wq; + struct work_struct msg_work; + struct completion wait; + struct list_head msg_queue; + struct spi_message *current_msg; + size_t tx; + size_t rx; + size_t fifo_level; +}; + +/** + * struct ep93xx_spi_chip - SPI device hardware settings + * @spi: back pointer to the SPI device + * @rate: max rate in hz this chip supports + * @div_cpsr: cpsr (pre-scaler) divider + * @div_scr: scr divider + * @dss: bits per word (4 - 16 bits) + * @ops: private chip operations + * + * This structure is used to store hardware register specific settings for each + * SPI device. Settings are written to hardware by function + * ep93xx_spi_chip_setup(). + */ +struct ep93xx_spi_chip { + const struct spi_device *spi; + unsigned long rate; + u8 div_cpsr; + u8 div_scr; + u8 dss; + struct ep93xx_spi_chip_ops *ops; +}; + +/* converts bits per word to CR0.DSS value */ +#define bits_per_word_to_dss(bpw) ((bpw) - 1) + +static inline void +ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) +{ + __raw_writeb(value, espi->regs_base + reg); +} + +static inline u8 +ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readb(spi->regs_base + reg); +} + +static inline void +ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) +{ + __raw_writew(value, espi->regs_base + reg); +} + +static inline u16 +ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readw(spi->regs_base + reg); +} + +static int ep93xx_spi_enable(const struct ep93xx_spi *espi) +{ + u8 regval; + int err; + + err = clk_enable(espi->clk); + if (err) + return err; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + return 0; +} + +static void ep93xx_spi_disable(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + clk_disable(espi->clk); +} + +static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +/** + * ep93xx_spi_calc_divisors() - calculates SPI clock divisors + * @espi: ep93xx SPI controller struct + * @chip: divisors are calculated for this chip + * @rate: desired SPI output clock rate + * + * Function calculates cpsr (clock pre-scaler) and scr divisors based on + * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, + * for some reason, divisors cannot be calculated nothing is stored and + * %-EINVAL is returned. + */ +static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, + struct ep93xx_spi_chip *chip, + unsigned long rate) +{ + unsigned long spi_clk_rate = clk_get_rate(espi->clk); + int cpsr, scr; + + /* + * Make sure that max value is between values supported by the + * controller. Note that minimum value is already checked in + * ep93xx_spi_transfer(). + */ + rate = clamp(rate, espi->min_rate, espi->max_rate); + + /* + * Calculate divisors so that we can get speed according the + * following formula: + * rate = spi_clock_rate / (cpsr * (1 + scr)) + * + * cpsr must be even number and starts from 2, scr can be any number + * between 0 and 255. + */ + for (cpsr = 2; cpsr <= 254; cpsr += 2) { + for (scr = 0; scr <= 255; scr++) { + if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { + chip->div_scr = (u8)scr; + chip->div_cpsr = (u8)cpsr; + return 0; + } + } + } + + return -EINVAL; +} + +static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); + int value = (spi->mode & SPI_CS_HIGH) ? control : !control; + + if (chip->ops && chip->ops->cs_control) + chip->ops->cs_control(spi, value); +} + +/** + * ep93xx_spi_setup() - setup an SPI device + * @spi: SPI device to setup + * + * This function sets up SPI device mode, speed etc. Can be called multiple + * times for a single device. Returns %0 in case of success, negative error in + * case of failure. When this function returns success, the device is + * deselected. + */ +static int ep93xx_spi_setup(struct spi_device *spi) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct ep93xx_spi_chip *chip; + + if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { + dev_err(&espi->pdev->dev, "invalid bits per word %d\n", + spi->bits_per_word); + return -EINVAL; + } + + chip = spi_get_ctldata(spi); + if (!chip) { + dev_dbg(&espi->pdev->dev, "initial setup for %s\n", + spi->modalias); + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->spi = spi; + chip->ops = spi->controller_data; + + if (chip->ops && chip->ops->setup) { + int ret = chip->ops->setup(spi); + if (ret) { + kfree(chip); + return ret; + } + } + + spi_set_ctldata(spi, chip); + } + + if (spi->max_speed_hz != chip->rate) { + int err; + + err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); + if (err != 0) { + spi_set_ctldata(spi, NULL); + kfree(chip); + return err; + } + chip->rate = spi->max_speed_hz; + } + + chip->dss = bits_per_word_to_dss(spi->bits_per_word); + + ep93xx_spi_cs_control(spi, false); + return 0; +} + +/** + * ep93xx_spi_transfer() - queue message to be transferred + * @spi: target SPI device + * @msg: message to be transferred + * + * This function is called by SPI device drivers when they are going to transfer + * a new message. It simply puts the message in the queue and schedules + * workqueue to perform the actual transfer later on. + * + * Returns %0 on success and negative error in case of failure. + */ +static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct spi_transfer *t; + unsigned long flags; + + if (!msg || !msg->complete) + return -EINVAL; + + /* first validate each transfer */ + list_for_each_entry(t, &msg->transfers, transfer_list) { + if (t->bits_per_word) { + if (t->bits_per_word < 4 || t->bits_per_word > 16) + return -EINVAL; + } + if (t->speed_hz && t->speed_hz < espi->min_rate) + return -EINVAL; + } + + /* + * Now that we own the message, let's initialize it so that it is + * suitable for us. We use @msg->status to signal whether there was + * error in transfer and @msg->state is used to hold pointer to the + * current transfer (or %NULL if no active current transfer). + */ + msg->state = NULL; + msg->status = 0; + msg->actual_length = 0; + + spin_lock_irqsave(&espi->lock, flags); + if (!espi->running) { + spin_unlock_irqrestore(&espi->lock, flags); + return -ESHUTDOWN; + } + list_add_tail(&msg->queue, &espi->msg_queue); + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irqrestore(&espi->lock, flags); + + return 0; +} + +/** + * ep93xx_spi_cleanup() - cleans up master controller specific state + * @spi: SPI device to cleanup + * + * This function releases master controller specific state for given @spi + * device. + */ +static void ep93xx_spi_cleanup(struct spi_device *spi) +{ + struct ep93xx_spi_chip *chip; + + chip = spi_get_ctldata(spi); + if (chip) { + if (chip->ops && chip->ops->cleanup) + chip->ops->cleanup(spi); + spi_set_ctldata(spi, NULL); + kfree(chip); + } +} + +/** + * ep93xx_spi_chip_setup() - configures hardware according to given @chip + * @espi: ep93xx SPI controller struct + * @chip: chip specific settings + * + * This function sets up the actual hardware registers with settings given in + * @chip. Note that no validation is done so make sure that callers validate + * settings before calling this. + */ +static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, + const struct ep93xx_spi_chip *chip) +{ + u16 cr0; + + cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; + cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; + cr0 |= chip->dss; + + dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", + chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); + dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); + + ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); + ep93xx_spi_write_u16(espi, SSPCR0, cr0); +} + +static inline int bits_per_word(const struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; +} + +static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u16 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u16(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } else { + u8 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u8 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u8(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } +} + +static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 rx_val; + + rx_val = ep93xx_spi_read_u16(espi, SSPDR); + if (t->rx_buf) + ((u16 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } else { + u8 rx_val; + + rx_val = ep93xx_spi_read_u8(espi, SSPDR); + if (t->rx_buf) + ((u8 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } +} + +/** + * ep93xx_spi_read_write() - perform next RX/TX transfer + * @espi: ep93xx SPI controller struct + * + * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If + * called several times, the whole transfer will be completed. Returns + * %-EINPROGRESS when current transfer was not yet completed otherwise %0. + * + * When this function is finished, RX FIFO should be empty and TX FIFO should be + * full. + */ +static int ep93xx_spi_read_write(struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + /* read as long as RX FIFO has frames in it */ + while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { + ep93xx_do_read(espi, t); + espi->fifo_level--; + } + + /* write as long as TX FIFO has room */ + while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { + ep93xx_do_write(espi, t); + espi->fifo_level++; + } + + if (espi->rx == t->len) { + msg->actual_length += t->len; + return 0; + } + + return -EINPROGRESS; +} + +/** + * ep93xx_spi_process_transfer() - processes one SPI transfer + * @espi: ep93xx SPI controller struct + * @msg: current message + * @t: transfer to process + * + * This function processes one SPI transfer given in @t. Function waits until + * transfer is complete (may sleep) and updates @msg->status based on whether + * transfer was succesfully processed or not. + */ +static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, + struct spi_message *msg, + struct spi_transfer *t) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); + + msg->state = t; + + /* + * Handle any transfer specific settings if needed. We use + * temporary chip settings here and restore original later when + * the transfer is finished. + */ + if (t->speed_hz || t->bits_per_word) { + struct ep93xx_spi_chip tmp_chip = *chip; + + if (t->speed_hz) { + int err; + + err = ep93xx_spi_calc_divisors(espi, &tmp_chip, + t->speed_hz); + if (err) { + dev_err(&espi->pdev->dev, + "failed to adjust speed\n"); + msg->status = err; + return; + } + } + + if (t->bits_per_word) + tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); + + /* + * Set up temporary new hw settings for this transfer. + */ + ep93xx_spi_chip_setup(espi, &tmp_chip); + } + + espi->rx = 0; + espi->tx = 0; + + /* + * Now everything is set up for the current transfer. We prime the TX + * FIFO, enable interrupts, and wait for the transfer to complete. + */ + if (ep93xx_spi_read_write(espi)) { + ep93xx_spi_enable_interrupts(espi); + wait_for_completion(&espi->wait); + } + + /* + * In case of error during transmit, we bail out from processing + * the message. + */ + if (msg->status) + return; + + /* + * After this transfer is finished, perform any possible + * post-transfer actions requested by the protocol driver. + */ + if (t->delay_usecs) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(t->delay_usecs)); + } + if (t->cs_change) { + if (!list_is_last(&t->transfer_list, &msg->transfers)) { + /* + * In case protocol driver is asking us to drop the + * chipselect briefly, we let the scheduler to handle + * any "delay" here. + */ + ep93xx_spi_cs_control(msg->spi, false); + cond_resched(); + ep93xx_spi_cs_control(msg->spi, true); + } + } + + if (t->speed_hz || t->bits_per_word) + ep93xx_spi_chip_setup(espi, chip); +} + +/* + * ep93xx_spi_process_message() - process one SPI message + * @espi: ep93xx SPI controller struct + * @msg: message to process + * + * This function processes a single SPI message. We go through all transfers in + * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is + * asserted during the whole message (unless per transfer cs_change is set). + * + * @msg->status contains %0 in case of success or negative error code in case of + * failure. + */ +static void ep93xx_spi_process_message(struct ep93xx_spi *espi, + struct spi_message *msg) +{ + unsigned long timeout; + struct spi_transfer *t; + int err; + + /* + * Enable the SPI controller and its clock. + */ + err = ep93xx_spi_enable(espi); + if (err) { + dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); + msg->status = err; + return; + } + + /* + * Just to be sure: flush any data from RX FIFO. + */ + timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); + while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { + if (time_after(jiffies, timeout)) { + dev_warn(&espi->pdev->dev, + "timeout while flushing RX FIFO\n"); + msg->status = -ETIMEDOUT; + return; + } + ep93xx_spi_read_u16(espi, SSPDR); + } + + /* + * We explicitly handle FIFO level. This way we don't have to check TX + * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. + */ + espi->fifo_level = 0; + + /* + * Update SPI controller registers according to spi device and assert + * the chipselect. + */ + ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); + ep93xx_spi_cs_control(msg->spi, true); + + list_for_each_entry(t, &msg->transfers, transfer_list) { + ep93xx_spi_process_transfer(espi, msg, t); + if (msg->status) + break; + } + + /* + * Now the whole message is transferred (or failed for some reason). We + * deselect the device and disable the SPI controller. + */ + ep93xx_spi_cs_control(msg->spi, false); + ep93xx_spi_disable(espi); +} + +#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) + +/** + * ep93xx_spi_work() - EP93xx SPI workqueue worker function + * @work: work struct + * + * Workqueue worker function. This function is called when there are new + * SPI messages to be processed. Message is taken out from the queue and then + * passed to ep93xx_spi_process_message(). + * + * After message is transferred, protocol driver is notified by calling + * @msg->complete(). In case of error, @msg->status is set to negative error + * number, otherwise it contains zero (and @msg->actual_length is updated). + */ +static void ep93xx_spi_work(struct work_struct *work) +{ + struct ep93xx_spi *espi = work_to_espi(work); + struct spi_message *msg; + + spin_lock_irq(&espi->lock); + if (!espi->running || espi->current_msg || + list_empty(&espi->msg_queue)) { + spin_unlock_irq(&espi->lock); + return; + } + msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); + list_del_init(&msg->queue); + espi->current_msg = msg; + spin_unlock_irq(&espi->lock); + + ep93xx_spi_process_message(espi, msg); + + /* + * Update the current message and re-schedule ourselves if there are + * more messages in the queue. + */ + spin_lock_irq(&espi->lock); + espi->current_msg = NULL; + if (espi->running && !list_empty(&espi->msg_queue)) + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irq(&espi->lock); + + /* notify the protocol driver that we are done with this message */ + msg->complete(msg->context); +} + +static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) +{ + struct ep93xx_spi *espi = dev_id; + u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); + + /* + * If we got ROR (receive overrun) interrupt we know that something is + * wrong. Just abort the message. + */ + if (unlikely(irq_status & SSPIIR_RORIS)) { + /* clear the overrun interrupt */ + ep93xx_spi_write_u8(espi, SSPICR, 0); + dev_warn(&espi->pdev->dev, + "receive overrun, aborting the message\n"); + espi->current_msg->status = -EIO; + } else { + /* + * Interrupt is either RX (RIS) or TX (TIS). For both cases we + * simply execute next data transfer. + */ + if (ep93xx_spi_read_write(espi)) { + /* + * In normal case, there still is some processing left + * for current transfer. Let's wait for the next + * interrupt then. + */ + return IRQ_HANDLED; + } + } + + /* + * Current transfer is finished, either with error or with success. In + * any case we disable interrupts and notify the worker to handle + * any post-processing of the message. + */ + ep93xx_spi_disable_interrupts(espi); + complete(&espi->wait); + return IRQ_HANDLED; +} + +static int __init ep93xx_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct ep93xx_spi_info *info; + struct ep93xx_spi *espi; + struct resource *res; + int error; + + info = pdev->dev.platform_data; + + master = spi_alloc_master(&pdev->dev, sizeof(*espi)); + if (!master) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + return -ENOMEM; + } + + master->setup = ep93xx_spi_setup; + master->transfer = ep93xx_spi_transfer; + master->cleanup = ep93xx_spi_cleanup; + master->bus_num = pdev->id; + master->num_chipselect = info->num_chipselect; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + platform_set_drvdata(pdev, master); + + espi = spi_master_get_devdata(master); + + espi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(espi->clk)) { + dev_err(&pdev->dev, "unable to get spi clock\n"); + error = PTR_ERR(espi->clk); + goto fail_release_master; + } + + spin_lock_init(&espi->lock); + init_completion(&espi->wait); + + /* + * Calculate maximum and minimum supported clock rates + * for the controller. + */ + espi->max_rate = clk_get_rate(espi->clk) / 2; + espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); + espi->pdev = pdev; + + espi->irq = platform_get_irq(pdev, 0); + if (espi->irq < 0) { + error = -EBUSY; + dev_err(&pdev->dev, "failed to get irq resources\n"); + goto fail_put_clock; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "unable to get iomem resource\n"); + error = -ENODEV; + goto fail_put_clock; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (!res) { + dev_err(&pdev->dev, "unable to request iomem resources\n"); + error = -EBUSY; + goto fail_put_clock; + } + + espi->regs_base = ioremap(res->start, resource_size(res)); + if (!espi->regs_base) { + dev_err(&pdev->dev, "failed to map resources\n"); + error = -ENODEV; + goto fail_free_mem; + } + + error = request_irq(espi->irq, ep93xx_spi_interrupt, 0, + "ep93xx-spi", espi); + if (error) { + dev_err(&pdev->dev, "failed to request irq\n"); + goto fail_unmap_regs; + } + + espi->wq = create_singlethread_workqueue("ep93xx_spid"); + if (!espi->wq) { + dev_err(&pdev->dev, "unable to create workqueue\n"); + goto fail_free_irq; + } + INIT_WORK(&espi->msg_work, ep93xx_spi_work); + INIT_LIST_HEAD(&espi->msg_queue); + espi->running = true; + + /* make sure that the hardware is disabled */ + ep93xx_spi_write_u8(espi, SSPCR1, 0); + + error = spi_register_master(master); + if (error) { + dev_err(&pdev->dev, "failed to register SPI master\n"); + goto fail_free_queue; + } + + dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", + (unsigned long)res->start, espi->irq); + + return 0; + +fail_free_queue: + destroy_workqueue(espi->wq); +fail_free_irq: + free_irq(espi->irq, espi); +fail_unmap_regs: + iounmap(espi->regs_base); +fail_free_mem: + release_mem_region(res->start, resource_size(res)); +fail_put_clock: + clk_put(espi->clk); +fail_release_master: + spi_master_put(master); + platform_set_drvdata(pdev, NULL); + + return error; +} + +static int __exit ep93xx_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct ep93xx_spi *espi = spi_master_get_devdata(master); + struct resource *res; + + spin_lock_irq(&espi->lock); + espi->running = false; + spin_unlock_irq(&espi->lock); + + destroy_workqueue(espi->wq); + + /* + * Complete remaining messages with %-ESHUTDOWN status. + */ + spin_lock_irq(&espi->lock); + while (!list_empty(&espi->msg_queue)) { + struct spi_message *msg; + + msg = list_first_entry(&espi->msg_queue, + struct spi_message, queue); + list_del_init(&msg->queue); + msg->status = -ESHUTDOWN; + spin_unlock_irq(&espi->lock); + msg->complete(msg->context); + spin_lock_irq(&espi->lock); + } + spin_unlock_irq(&espi->lock); + + free_irq(espi->irq, espi); + iounmap(espi->regs_base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + clk_put(espi->clk); + platform_set_drvdata(pdev, NULL); + + spi_unregister_master(master); + return 0; +} + +static struct platform_driver ep93xx_spi_driver = { + .driver = { + .name = "ep93xx-spi", + .owner = THIS_MODULE, + }, + .remove = __exit_p(ep93xx_spi_remove), +}; + +static int __init ep93xx_spi_init(void) +{ + return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe); +} +module_init(ep93xx_spi_init); + +static void __exit ep93xx_spi_exit(void) +{ + platform_driver_unregister(&ep93xx_spi_driver); +} +module_exit(ep93xx_spi_exit); + +MODULE_DESCRIPTION("EP93xx SPI Controller driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-spi"); diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c new file mode 100644 index 00000000000..77d9e7ee8b2 --- /dev/null +++ b/drivers/spi/mpc512x_psc_spi.c @@ -0,0 +1,578 @@ +/* + * MPC512x PSC in SPI mode driver. + * + * Copyright (C) 2007,2008 Freescale Semiconductor Inc. + * Original port from 52xx driver: + * Hongjun Chen <hong-jun.chen@freescale.com> + * + * Fork of mpc52xx_psc_spi.c: + * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/spi/spi.h> +#include <linux/fsl_devices.h> +#include <asm/mpc52xx_psc.h> + +struct mpc512x_psc_spi { + void (*cs_control)(struct spi_device *spi, bool on); + u32 sysclk; + + /* driver internal data */ + struct mpc52xx_psc __iomem *psc; + struct mpc512x_psc_fifo __iomem *fifo; + unsigned int irq; + u8 bits_per_word; + u8 busy; + u32 mclk; + u8 eofbyte; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; /* Message queue lock */ + + struct completion done; +}; + +/* controller state */ +struct mpc512x_psc_spi_cs { + int bits_per_word; + int speed_hz; +}; + +/* set clock freq, clock ramp, bits per work + * if t is NULL then reset the values to the default values + */ +static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + + cs->speed_hz = (t && t->speed_hz) + ? t->speed_hz : spi->max_speed_hz; + cs->bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; + cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; + return 0; +} + +static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sicr = in_be32(&psc->sicr); + + /* Set clock phase and polarity */ + if (spi->mode & SPI_CPHA) + sicr |= 0x00001000; + else + sicr &= ~0x00001000; + + if (spi->mode & SPI_CPOL) + sicr |= 0x00002000; + else + sicr &= ~0x00002000; + + if (spi->mode & SPI_LSB_FIRST) + sicr |= 0x10000000; + else + sicr &= ~0x10000000; + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + if (cs->speed_hz) + bclkdiv = (mps->mclk / cs->speed_hz) - 1; + else + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + mps->bits_per_word = cs->bits_per_word; + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); +} + +static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); + +} + +/* extract and scale size field in txsz or rxsz */ +#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2); + +#define EOFBYTE 1 + +static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + size_t len = t->len; + u8 *tx_buf = (u8 *)t->tx_buf; + u8 *rx_buf = (u8 *)t->rx_buf; + + if (!tx_buf && !rx_buf && t->len) + return -EINVAL; + + /* Zero MR2 */ + in_8(&psc->mode); + out_8(&psc->mode, 0x0); + + while (len) { + int count; + int i; + u8 data; + size_t fifosz; + int rxcount; + + /* + * The number of bytes that can be sent at a time + * depends on the fifo size. + */ + fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz)); + count = min(fifosz, len); + + for (i = count; i > 0; i--) { + data = tx_buf ? *tx_buf++ : 0; + if (len == EOFBYTE) + setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); + out_8(&fifo->txdata_8, data); + len--; + } + + INIT_COMPLETION(mps->done); + + /* interrupt on tx fifo empty */ + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); + + /* enable transmiter/receiver */ + out_8(&psc->command, + MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); + + wait_for_completion(&mps->done); + + mdelay(1); + + /* rx fifo should have count bytes in it */ + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) + mdelay(1); + + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) { + dev_warn(&spi->dev, "expected %d bytes in rx fifo " + "but got %d\n", count, rxcount); + } + + rxcount = min(rxcount, count); + for (i = rxcount; i > 0; i--) { + data = in_8(&fifo->rxdata_8); + if (rx_buf) + *rx_buf++ = data; + } + while (in_be32(&fifo->rxcnt)) { + in_8(&fifo->rxdata_8); + } + + out_8(&psc->command, + MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + } + /* disable transmiter/receiver and fifo interrupt */ + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + out_be32(&fifo->tximr, 0); + return 0; +} + +static void mpc512x_psc_spi_work(struct work_struct *work) +{ + struct mpc512x_psc_spi *mps = container_of(work, + struct mpc512x_psc_spi, + work); + + spin_lock_irq(&mps->lock); + mps->busy = 1; + while (!list_empty(&mps->queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + unsigned cs_change; + int status; + + m = container_of(mps->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irq(&mps->lock); + + spi = m->spi; + cs_change = 1; + status = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = mpc512x_psc_spi_transfer_setup(spi, t); + if (status < 0) + break; + } + + if (cs_change) + mpc512x_psc_spi_activate_cs(spi); + cs_change = t->cs_change; + + status = mpc512x_psc_spi_transfer_rxtx(spi, t); + if (status) + break; + m->actual_length += t->len; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + } + + m->status = status; + m->complete(m->context); + + if (status || !cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + + mpc512x_psc_spi_transfer_setup(spi, NULL); + + spin_lock_irq(&mps->lock); + } + mps->busy = 0; + spin_unlock_irq(&mps->lock); +} + +static int mpc512x_psc_spi_setup(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + unsigned long flags; + + if (spi->bits_per_word % 8) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + cs->bits_per_word = spi->bits_per_word; + cs->speed_hz = spi->max_speed_hz; + + spin_lock_irqsave(&mps->lock, flags); + if (!mps->busy) + mpc512x_psc_spi_deactivate_cs(spi); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static int mpc512x_psc_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mps->lock, flags); + list_add_tail(&m->queue, &mps->queue); + queue_work(mps->workqueue, &mps->work); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static void mpc512x_psc_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static int mpc512x_psc_spi_port_config(struct spi_master *master, + struct mpc512x_psc_spi *mps) +{ + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + struct clk *spiclk; + int ret = 0; + char name[32]; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sprintf(name, "psc%d_mclk", master->bus_num); + spiclk = clk_get(&master->dev, name); + clk_enable(spiclk); + mps->mclk = clk_get_rate(spiclk); + clk_put(spiclk); + + /* Reset the PSC into a known state */ + out_8(&psc->command, MPC52xx_PSC_RST_RX); + out_8(&psc->command, MPC52xx_PSC_RST_TX); + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + + /* Disable psc interrupts all useful interrupts are in fifo */ + out_be16(&psc->isr_imr.imr, 0); + + /* Disable fifo interrupts, will be enabled later */ + out_be32(&fifo->tximr, 0); + out_be32(&fifo->rximr, 0); + + /* Setup fifo slice address and size */ + /*out_be32(&fifo->txsz, 0x0fe00004);*/ + /*out_be32(&fifo->rxsz, 0x0ff00004);*/ + + sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */ + 0x00800000 | /* GenClk = 1 -- internal clk */ + 0x00008000 | /* SPI = 1 */ + 0x00004000 | /* MSTR = 1 -- SPI master */ + 0x00000800; /* UseEOF = 1 -- SS low until EOF */ + + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + + /* Set 2ms DTL delay */ + out_8(&psc->ctur, 0x00); + out_8(&psc->ctlr, 0x82); + + /* we don't use the alarms */ + out_be32(&fifo->rxalarm, 0xfff); + out_be32(&fifo->txalarm, 0); + + /* Enable FIFO slices for Rx/Tx */ + out_be32(&fifo->rxcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + out_be32(&fifo->txcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + + mps->bits_per_word = 8; + + return ret; +} + +static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) +{ + struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + + /* clear interrupt and wake up the work queue */ + if (in_be32(&fifo->txisr) & + in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) { + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, 0); + complete(&mps->done); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* bus_num is used only for the case dev->platform_data == NULL */ +static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, + u32 size, unsigned int irq, + s16 bus_num) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc512x_psc_spi *mps; + struct spi_master *master; + int ret; + void *tempp; + + master = spi_alloc_master(dev, sizeof *mps); + if (master == NULL) + return -ENOMEM; + + dev_set_drvdata(dev, master); + mps = spi_master_get_devdata(master); + mps->irq = irq; + + if (pdata == NULL) { + dev_err(dev, "probe called without platform data, no " + "cs_control function will be called\n"); + mps->cs_control = NULL; + mps->sysclk = 0; + master->bus_num = bus_num; + master->num_chipselect = 255; + } else { + mps->cs_control = pdata->cs_control; + mps->sysclk = pdata->sysclk; + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + } + + master->setup = mpc512x_psc_spi_setup; + master->transfer = mpc512x_psc_spi_transfer; + master->cleanup = mpc512x_psc_spi_cleanup; + master->dev.of_node = dev->of_node; + + tempp = ioremap(regaddr, size); + if (!tempp) { + dev_err(dev, "could not ioremap I/O port range\n"); + ret = -EFAULT; + goto free_master; + } + mps->psc = tempp; + mps->fifo = + (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc)); + + ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED, + "mpc512x-psc-spi", mps); + if (ret) + goto free_master; + + ret = mpc512x_psc_spi_port_config(master, mps); + if (ret < 0) + goto free_irq; + + spin_lock_init(&mps->lock); + init_completion(&mps->done); + INIT_WORK(&mps->work, mpc512x_psc_spi_work); + INIT_LIST_HEAD(&mps->queue); + + mps->workqueue = + create_singlethread_workqueue(dev_name(master->dev.parent)); + if (mps->workqueue == NULL) { + ret = -EBUSY; + goto free_irq; + } + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + return ret; + +unreg_master: + destroy_workqueue(mps->workqueue); +free_irq: + free_irq(mps->irq, mps); +free_master: + if (mps->psc) + iounmap(mps->psc); + spi_master_put(master); + + return ret; +} + +static int __devexit mpc512x_psc_spi_do_remove(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); + + flush_workqueue(mps->workqueue); + destroy_workqueue(mps->workqueue); + spi_unregister_master(master); + free_irq(mps->irq, mps); + if (mps->psc) + iounmap(mps->psc); + + return 0; +} + +static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op, + const struct of_device_id *match) +{ + const u32 *regaddr_p; + u64 regaddr64, size64; + s16 id = -1; + + regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); + if (!regaddr_p) { + dev_err(&op->dev, "Invalid PSC address\n"); + return -EINVAL; + } + regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); + + /* get PSC id (0..11, used by port_config) */ + if (op->dev.platform_data == NULL) { + const u32 *psc_nump; + + psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); + if (!psc_nump || *psc_nump > 11) { + dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " + "has invalid cell-index property\n", + op->dev.of_node->full_name); + return -EINVAL; + } + id = *psc_nump; + } + + return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, + irq_of_parse_and_map(op->dev.of_node, 0), id); +} + +static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op) +{ + return mpc512x_psc_spi_do_remove(&op->dev); +} + +static struct of_device_id mpc512x_psc_spi_of_match[] = { + { .compatible = "fsl,mpc5121-psc-spi", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); + +static struct of_platform_driver mpc512x_psc_spi_of_driver = { + .probe = mpc512x_psc_spi_of_probe, + .remove = __devexit_p(mpc512x_psc_spi_of_remove), + .driver = { + .name = "mpc512x-psc-spi", + .owner = THIS_MODULE, + .of_match_table = mpc512x_psc_spi_of_match, + }, +}; + +static int __init mpc512x_psc_spi_init(void) +{ + return of_register_platform_driver(&mpc512x_psc_spi_of_driver); +} +module_init(mpc512x_psc_spi_init); + +static void __exit mpc512x_psc_spi_exit(void) +{ + of_unregister_platform_driver(&mpc512x_psc_spi_of_driver); +} +module_exit(mpc512x_psc_spi_exit); + +MODULE_AUTHOR("John Rigby"); +MODULE_DESCRIPTION("MPC512x PSC SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 1b74d5ca03f..8a904c1c848 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/workqueue.h> #include <linux/completion.h> @@ -23,6 +24,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/fsl_devices.h> +#include <linux/slab.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> @@ -313,11 +315,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) struct mpc52xx_psc __iomem *psc = mps->psc; struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; u32 mclken_div; - int ret = 0; + int ret; /* default sysclk is 512MHz */ mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; - mpc52xx_set_psc_clkdiv(psc_id, mclken_div); + ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div); + if (ret) + return ret; /* Reset the PSC into a known state */ out_8(&psc->command, MPC52xx_PSC_RST_RX); @@ -341,7 +345,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) mps->bits_per_word = 8; - return ret; + return 0; } static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) @@ -359,7 +363,7 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) } /* bus_num is used only for the case dev->platform_data == NULL */ -static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, +static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, u32 size, unsigned int irq, s16 bus_num) { struct fsl_spi_platform_data *pdata = dev->platform_data; @@ -394,6 +398,7 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, master->setup = mpc52xx_psc_spi_setup; master->transfer = mpc52xx_psc_spi_transfer; master->cleanup = mpc52xx_psc_spi_cleanup; + master->dev.of_node = dev->of_node; mps->psc = ioremap(regaddr, size); if (!mps->psc) { @@ -410,8 +415,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, goto free_master; ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); - if (ret < 0) + if (ret < 0) { + dev_err(dev, "can't configure PSC! Is it capable of SPI?\n"); goto free_irq; + } spin_lock_init(&mps->lock); init_completion(&mps->done); @@ -443,58 +450,52 @@ free_master: return ret; } -static int __exit mpc52xx_psc_spi_do_remove(struct device *dev) -{ - struct spi_master *master = dev_get_drvdata(dev); - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); - - flush_workqueue(mps->workqueue); - destroy_workqueue(mps->workqueue); - spi_unregister_master(master); - free_irq(mps->irq, mps); - if (mps->psc) - iounmap(mps->psc); - - return 0; -} - -static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, +static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op, const struct of_device_id *match) { const u32 *regaddr_p; u64 regaddr64, size64; s16 id = -1; - regaddr_p = of_get_address(op->node, 0, &size64, NULL); + regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); if (!regaddr_p) { - printk(KERN_ERR "Invalid PSC address\n"); + dev_err(&op->dev, "Invalid PSC address\n"); return -EINVAL; } - regaddr64 = of_translate_address(op->node, regaddr_p); + regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); /* get PSC id (1..6, used by port_config) */ if (op->dev.platform_data == NULL) { const u32 *psc_nump; - psc_nump = of_get_property(op->node, "cell-index", NULL); + psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); if (!psc_nump || *psc_nump > 5) { - printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid " - "cell-index property\n", op->node->full_name); + dev_err(&op->dev, "Invalid cell-index property\n"); return -EINVAL; } id = *psc_nump + 1; } return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, - irq_of_parse_and_map(op->node, 0), id); + irq_of_parse_and_map(op->dev.of_node, 0), id); } -static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) +static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op) { - return mpc52xx_psc_spi_do_remove(&op->dev); + struct spi_master *master = dev_get_drvdata(&op->dev); + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); + + flush_workqueue(mps->workqueue); + destroy_workqueue(mps->workqueue); + spi_unregister_master(master); + free_irq(mps->irq, mps); + if (mps->psc) + iounmap(mps->psc); + + return 0; } -static struct of_device_id mpc52xx_psc_spi_of_match[] = { +static const struct of_device_id mpc52xx_psc_spi_of_match[] = { { .compatible = "fsl,mpc5200-psc-spi", }, { .compatible = "mpc5200-psc-spi", }, /* old */ {} @@ -503,14 +504,12 @@ static struct of_device_id mpc52xx_psc_spi_of_match[] = { MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); static struct of_platform_driver mpc52xx_psc_spi_of_driver = { - .owner = THIS_MODULE, - .name = "mpc52xx-psc-spi", - .match_table = mpc52xx_psc_spi_of_match, .probe = mpc52xx_psc_spi_of_probe, - .remove = __exit_p(mpc52xx_psc_spi_of_remove), + .remove = __devexit_p(mpc52xx_psc_spi_of_remove), .driver = { .name = "mpc52xx-psc-spi", .owner = THIS_MODULE, + .of_match_table = mpc52xx_psc_spi_of_match, }, }; diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c new file mode 100644 index 00000000000..84439f65560 --- /dev/null +++ b/drivers/spi/mpc52xx_spi.c @@ -0,0 +1,580 @@ +/* + * MPC52xx SPI bus driver. + * + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * This file is released under the GPLv2 + * + * This is the driver for the MPC5200's dedicated SPI controller. + * + * Note: this driver does not support the MPC5200 PSC in SPI mode. For + * that driver see drivers/spi/mpc52xx_psc_spi.c + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/of_platform.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <linux/io.h> +#include <linux/of_gpio.h> +#include <linux/slab.h> +#include <asm/time.h> +#include <asm/mpc52xx.h> + +MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); +MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver"); +MODULE_LICENSE("GPL"); + +/* Register offsets */ +#define SPI_CTRL1 0x00 +#define SPI_CTRL1_SPIE (1 << 7) +#define SPI_CTRL1_SPE (1 << 6) +#define SPI_CTRL1_MSTR (1 << 4) +#define SPI_CTRL1_CPOL (1 << 3) +#define SPI_CTRL1_CPHA (1 << 2) +#define SPI_CTRL1_SSOE (1 << 1) +#define SPI_CTRL1_LSBFE (1 << 0) + +#define SPI_CTRL2 0x01 +#define SPI_BRR 0x04 + +#define SPI_STATUS 0x05 +#define SPI_STATUS_SPIF (1 << 7) +#define SPI_STATUS_WCOL (1 << 6) +#define SPI_STATUS_MODF (1 << 4) + +#define SPI_DATA 0x09 +#define SPI_PORTDATA 0x0d +#define SPI_DATADIR 0x10 + +/* FSM state return values */ +#define FSM_STOP 0 /* Nothing more for the state machine to */ + /* do. If something interesting happens */ + /* then an IRQ will be received */ +#define FSM_POLL 1 /* need to poll for completion, an IRQ is */ + /* not expected */ +#define FSM_CONTINUE 2 /* Keep iterating the state machine */ + +/* Driver internal data */ +struct mpc52xx_spi { + struct spi_master *master; + void __iomem *regs; + int irq0; /* MODF irq */ + int irq1; /* SPIF irq */ + unsigned int ipb_freq; + + /* Statistics; not used now, but will be reintroduced for debugfs */ + int msg_count; + int wcol_count; + int wcol_ticks; + u32 wcol_tx_timestamp; + int modf_count; + int byte_count; + + struct list_head queue; /* queue of pending messages */ + spinlock_t lock; + struct work_struct work; + + /* Details of current transfer (length, and buffer pointers) */ + struct spi_message *message; /* current message */ + struct spi_transfer *transfer; /* current transfer */ + int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data); + int len; + int timestamp; + u8 *rx_buf; + const u8 *tx_buf; + int cs_change; + int gpio_cs_count; + unsigned int *gpio_cs; +}; + +/* + * CS control function + */ +static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value) +{ + int cs; + + if (ms->gpio_cs_count > 0) { + cs = ms->message->spi->chip_select; + gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1); + } else + out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08); +} + +/* + * Start a new transfer. This is called both by the idle state + * for the first transfer in a message, and by the wait state when the + * previous transfer in a message is complete. + */ +static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms) +{ + ms->rx_buf = ms->transfer->rx_buf; + ms->tx_buf = ms->transfer->tx_buf; + ms->len = ms->transfer->len; + + /* Activate the chip select */ + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 1); + ms->cs_change = ms->transfer->cs_change; + + /* Write out the first byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); +} + +/* Forward declaration of state handlers */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); +static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); + +/* + * IDLE state + * + * No transfers are in progress; if another transfer is pending then retrieve + * it and kick it off. Otherwise, stop processing the state machine + */ +static int +mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + struct spi_device *spi; + int spr, sppr; + u8 ctrl1; + + if (status && (irq != NO_IRQ)) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + /* Check if there is another transfer waiting. */ + if (list_empty(&ms->queue)) + return FSM_STOP; + + /* get the head of the queue */ + ms->message = list_first_entry(&ms->queue, struct spi_message, queue); + list_del_init(&ms->message->queue); + + /* Setup the controller parameters */ + ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; + spi = ms->message->spi; + if (spi->mode & SPI_CPHA) + ctrl1 |= SPI_CTRL1_CPHA; + if (spi->mode & SPI_CPOL) + ctrl1 |= SPI_CTRL1_CPOL; + if (spi->mode & SPI_LSB_FIRST) + ctrl1 |= SPI_CTRL1_LSBFE; + out_8(ms->regs + SPI_CTRL1, ctrl1); + + /* Setup the controller speed */ + /* minimum divider is '2'. Also, add '1' to force rounding the + * divider up. */ + sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1; + spr = 0; + if (sppr < 1) + sppr = 1; + while (((sppr - 1) & ~0x7) != 0) { + sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */ + spr++; + } + sppr--; /* sppr quantity in register is offset by 1 */ + if (spr > 7) { + /* Don't overrun limits of SPI baudrate register */ + spr = 7; + sppr = 7; + } + out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */ + + ms->cs_change = 1; + ms->transfer = container_of(ms->message->transfers.next, + struct spi_transfer, transfer_list); + + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + + return FSM_CONTINUE; +} + +/* + * TRANSFER state + * + * In the middle of a transfer. If the SPI core has completed processing + * a byte, then read out the received data and write out the next byte + * (unless this transfer is finished; in which case go on to the wait + * state) + */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data) +{ + if (!status) + return ms->irq0 ? FSM_STOP : FSM_POLL; + + if (status & SPI_STATUS_WCOL) { + /* The SPI controller is stoopid. At slower speeds, it may + * raise the SPIF flag before the state machine is actually + * finished, which causes a collision (internal to the state + * machine only). The manual recommends inserting a delay + * between receiving the interrupt and sending the next byte, + * but it can also be worked around simply by retrying the + * transfer which is what we do here. */ + ms->wcol_count++; + ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp; + ms->wcol_tx_timestamp = get_tbl(); + data = 0; + if (ms->tx_buf) + data = *(ms->tx_buf - 1); + out_8(ms->regs + SPI_DATA, data); /* try again */ + return FSM_CONTINUE; + } else if (status & SPI_STATUS_MODF) { + ms->modf_count++; + dev_err(&ms->master->dev, "mode fault\n"); + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = -EIO; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* Read data out of the spi device */ + ms->byte_count++; + if (ms->rx_buf) + *ms->rx_buf++ = data; + + /* Is the transfer complete? */ + ms->len--; + if (ms->len == 0) { + ms->timestamp = get_tbl(); + ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec; + ms->state = mpc52xx_spi_fsmstate_wait; + return FSM_CONTINUE; + } + + /* Write out the next byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); + + return FSM_CONTINUE; +} + +/* + * WAIT state + * + * A transfer has completed; need to wait for the delay period to complete + * before starting the next transfer + */ +static int +mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + if (status && irq) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + if (((int)get_tbl()) - ms->timestamp < 0) + return FSM_POLL; + + ms->message->actual_length += ms->transfer->len; + + /* Check if there is another transfer in this message. If there + * aren't then deactivate CS, notify sender, and drop back to idle + * to start the next message. */ + if (ms->transfer->transfer_list.next == &ms->message->transfers) { + ms->msg_count++; + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = 0; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* There is another transfer; kick it off */ + + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 0); + + ms->transfer = container_of(ms->transfer->transfer_list.next, + struct spi_transfer, transfer_list); + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + return FSM_CONTINUE; +} + +/** + * mpc52xx_spi_fsm_process - Finite State Machine iteration function + * @irq: irq number that triggered the FSM or 0 for polling + * @ms: pointer to mpc52xx_spi driver data + */ +static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms) +{ + int rc = FSM_CONTINUE; + u8 status, data; + + while (rc == FSM_CONTINUE) { + /* Interrupt cleared by read of STATUS followed by + * read of DATA registers */ + status = in_8(ms->regs + SPI_STATUS); + data = in_8(ms->regs + SPI_DATA); + rc = ms->state(irq, ms, status, data); + } + + if (rc == FSM_POLL) + schedule_work(&ms->work); +} + +/** + * mpc52xx_spi_irq - IRQ handler + */ +static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms) +{ + struct mpc52xx_spi *ms = _ms; + spin_lock(&ms->lock); + mpc52xx_spi_fsm_process(irq, ms); + spin_unlock(&ms->lock); + return IRQ_HANDLED; +} + +/** + * mpc52xx_spi_wq - Workqueue function for polling the state machine + */ +static void mpc52xx_spi_wq(struct work_struct *work) +{ + struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work); + unsigned long flags; + + spin_lock_irqsave(&ms->lock, flags); + mpc52xx_spi_fsm_process(0, ms); + spin_unlock_irqrestore(&ms->lock, flags); +} + +/* + * spi_master ops + */ + +static int mpc52xx_spi_setup(struct spi_device *spi) +{ + if (spi->bits_per_word % 8) + return -EINVAL; + + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) + return -EINVAL; + + if (spi->chip_select >= spi->master->num_chipselect) + return -EINVAL; + + return 0; +} + +static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&ms->lock, flags); + list_add_tail(&m->queue, &ms->queue); + spin_unlock_irqrestore(&ms->lock, flags); + schedule_work(&ms->work); + + return 0; +} + +/* + * OF Platform Bus Binding + */ +static int __devinit mpc52xx_spi_probe(struct platform_device *op, + const struct of_device_id *match) +{ + struct spi_master *master; + struct mpc52xx_spi *ms; + void __iomem *regs; + u8 ctrl1; + int rc, i = 0; + int gpio_cs; + + /* MMIO registers */ + dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); + regs = of_iomap(op->dev.of_node, 0); + if (!regs) + return -ENODEV; + + /* initialize the device */ + ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; + out_8(regs + SPI_CTRL1, ctrl1); + out_8(regs + SPI_CTRL2, 0x0); + out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */ + out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */ + + /* Clear the status register and re-read it to check for a MODF + * failure. This driver cannot currently handle multiple masters + * on the SPI bus. This fault will also occur if the SPI signals + * are not connected to any pins (port_config setting) */ + in_8(regs + SPI_STATUS); + out_8(regs + SPI_CTRL1, ctrl1); + + in_8(regs + SPI_DATA); + if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) { + dev_err(&op->dev, "mode fault; is port_config correct?\n"); + rc = -EIO; + goto err_init; + } + + dev_dbg(&op->dev, "allocating spi_master struct\n"); + master = spi_alloc_master(&op->dev, sizeof *ms); + if (!master) { + rc = -ENOMEM; + goto err_alloc; + } + + master->bus_num = -1; + master->setup = mpc52xx_spi_setup; + master->transfer = mpc52xx_spi_transfer; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + master->dev.of_node = op->dev.of_node; + + dev_set_drvdata(&op->dev, master); + + ms = spi_master_get_devdata(master); + ms->master = master; + ms->regs = regs; + ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0); + ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1); + ms->state = mpc52xx_spi_fsmstate_idle; + ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); + ms->gpio_cs_count = of_gpio_count(op->dev.of_node); + if (ms->gpio_cs_count > 0) { + master->num_chipselect = ms->gpio_cs_count; + ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int), + GFP_KERNEL); + if (!ms->gpio_cs) { + rc = -ENOMEM; + goto err_alloc; + } + + for (i = 0; i < ms->gpio_cs_count; i++) { + gpio_cs = of_get_gpio(op->dev.of_node, i); + if (gpio_cs < 0) { + dev_err(&op->dev, + "could not parse the gpio field " + "in oftree\n"); + rc = -ENODEV; + goto err_gpio; + } + + rc = gpio_request(gpio_cs, dev_name(&op->dev)); + if (rc) { + dev_err(&op->dev, + "can't request spi cs gpio #%d " + "on gpio line %d\n", i, gpio_cs); + goto err_gpio; + } + + gpio_direction_output(gpio_cs, 1); + ms->gpio_cs[i] = gpio_cs; + } + } else { + master->num_chipselect = 1; + } + + spin_lock_init(&ms->lock); + INIT_LIST_HEAD(&ms->queue); + INIT_WORK(&ms->work, mpc52xx_spi_wq); + + /* Decide if interrupts can be used */ + if (ms->irq0 && ms->irq1) { + rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0, + "mpc5200-spi-modf", ms); + rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0, + "mpc5200-spi-spif", ms); + if (rc) { + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + ms->irq0 = ms->irq1 = 0; + } + } else { + /* operate in polled mode */ + ms->irq0 = ms->irq1 = 0; + } + + if (!ms->irq0) + dev_info(&op->dev, "using polled mode\n"); + + dev_dbg(&op->dev, "registering spi_master struct\n"); + rc = spi_register_master(master); + if (rc) + goto err_register; + + dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); + + return rc; + + err_register: + dev_err(&ms->master->dev, "initialization failed\n"); + spi_master_put(master); + err_gpio: + while (i-- > 0) + gpio_free(ms->gpio_cs[i]); + + kfree(ms->gpio_cs); + err_alloc: + err_init: + iounmap(regs); + return rc; +} + +static int __devexit mpc52xx_spi_remove(struct platform_device *op) +{ + struct spi_master *master = dev_get_drvdata(&op->dev); + struct mpc52xx_spi *ms = spi_master_get_devdata(master); + int i; + + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + + for (i = 0; i < ms->gpio_cs_count; i++) + gpio_free(ms->gpio_cs[i]); + + kfree(ms->gpio_cs); + spi_unregister_master(master); + spi_master_put(master); + iounmap(ms->regs); + + return 0; +} + +static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { + { .compatible = "fsl,mpc5200-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); + +static struct of_platform_driver mpc52xx_spi_of_driver = { + .driver = { + .name = "mpc52xx-spi", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_spi_match, + }, + .probe = mpc52xx_spi_probe, + .remove = __devexit_p(mpc52xx_spi_remove), +}; + +static int __init mpc52xx_spi_init(void) +{ + return of_register_platform_driver(&mpc52xx_spi_of_driver); +} +module_init(mpc52xx_spi_init); + +static void __exit mpc52xx_spi_exit(void) +{ + of_unregister_platform_driver(&mpc52xx_spi_of_driver); +} +module_exit(mpc52xx_spi_exit); + diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index eee4b6e0af2..abb1ffbf3d2 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -32,15 +32,19 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/slab.h> #include <linux/spi/spi.h> -#include <mach/dma.h> -#include <mach/clock.h> - +#include <plat/dma.h> +#include <plat/clock.h> +#include <plat/mcspi.h> #define OMAP2_MCSPI_MAX_FREQ 48000000 +/* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */ +#define OMAP2_MCSPI_MAX_CTRL 4 + #define OMAP2_MCSPI_REVISION 0x00 #define OMAP2_MCSPI_SYSCONFIG 0x10 #define OMAP2_MCSPI_SYSSTATUS 0x14 @@ -59,37 +63,40 @@ /* per-register bitmasks: */ -#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0) -#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1) +#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4) +#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2) +#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0) +#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1) -#define OMAP2_MCSPI_SYSSTATUS_RESETDONE (1 << 0) +#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0) -#define OMAP2_MCSPI_MODULCTRL_SINGLE (1 << 0) -#define OMAP2_MCSPI_MODULCTRL_MS (1 << 2) -#define OMAP2_MCSPI_MODULCTRL_STEST (1 << 3) +#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) +#define OMAP2_MCSPI_MODULCTRL_MS BIT(2) +#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) -#define OMAP2_MCSPI_CHCONF_PHA (1 << 0) -#define OMAP2_MCSPI_CHCONF_POL (1 << 1) +#define OMAP2_MCSPI_CHCONF_PHA BIT(0) +#define OMAP2_MCSPI_CHCONF_POL BIT(1) #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2) -#define OMAP2_MCSPI_CHCONF_EPOL (1 << 6) +#define OMAP2_MCSPI_CHCONF_EPOL BIT(6) #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7) -#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY (0x01 << 12) -#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY (0x02 << 12) +#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12) +#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13) #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12) -#define OMAP2_MCSPI_CHCONF_DMAW (1 << 14) -#define OMAP2_MCSPI_CHCONF_DMAR (1 << 15) -#define OMAP2_MCSPI_CHCONF_DPE0 (1 << 16) -#define OMAP2_MCSPI_CHCONF_DPE1 (1 << 17) -#define OMAP2_MCSPI_CHCONF_IS (1 << 18) -#define OMAP2_MCSPI_CHCONF_TURBO (1 << 19) -#define OMAP2_MCSPI_CHCONF_FORCE (1 << 20) +#define OMAP2_MCSPI_CHCONF_DMAW BIT(14) +#define OMAP2_MCSPI_CHCONF_DMAR BIT(15) +#define OMAP2_MCSPI_CHCONF_DPE0 BIT(16) +#define OMAP2_MCSPI_CHCONF_DPE1 BIT(17) +#define OMAP2_MCSPI_CHCONF_IS BIT(18) +#define OMAP2_MCSPI_CHCONF_TURBO BIT(19) +#define OMAP2_MCSPI_CHCONF_FORCE BIT(20) -#define OMAP2_MCSPI_CHSTAT_RXS (1 << 0) -#define OMAP2_MCSPI_CHSTAT_TXS (1 << 1) -#define OMAP2_MCSPI_CHSTAT_EOT (1 << 2) +#define OMAP2_MCSPI_CHSTAT_RXS BIT(0) +#define OMAP2_MCSPI_CHSTAT_TXS BIT(1) +#define OMAP2_MCSPI_CHSTAT_EOT BIT(2) -#define OMAP2_MCSPI_CHCTRL_EN (1 << 0) +#define OMAP2_MCSPI_CHCTRL_EN BIT(0) +#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0) /* We have 2 DMA channels per CS, one for RX and one for TX */ struct omap2_mcspi_dma { @@ -106,7 +113,7 @@ struct omap2_mcspi_dma { /* use PIO for small transfers, avoiding DMA setup/teardown overhead and * cache operations; better heuristics consider wordsize and bitrate. */ -#define DMA_MIN_BYTES 8 +#define DMA_MIN_BYTES 160 struct omap2_mcspi { @@ -128,8 +135,23 @@ struct omap2_mcspi_cs { void __iomem *base; unsigned long phys; int word_len; + struct list_head node; + /* Context save and restore shadow register */ + u32 chconf0; +}; + +/* used for context save and restore, structure members to be updated whenever + * corresponding registers are modified. + */ +struct omap2_mcspi_regs { + u32 sysconfig; + u32 modulctrl; + u32 wakeupenable; + struct list_head cs; }; +static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL]; + static struct workqueue_struct *omap2_mcspi_wq; #define MOD_REG_BIT(val, mask, set) do { \ @@ -169,12 +191,28 @@ static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx) return __raw_readl(cs->base + idx); } +static inline u32 mcspi_cached_chconf0(const struct spi_device *spi) +{ + struct omap2_mcspi_cs *cs = spi->controller_state; + + return cs->chconf0; +} + +static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) +{ + struct omap2_mcspi_cs *cs = spi->controller_state; + + cs->chconf0 = val; + mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); + mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); +} + static void omap2_mcspi_set_dma_req(const struct spi_device *spi, int is_read, int enable) { u32 l, rw; - l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); + l = mcspi_cached_chconf0(spi); if (is_read) /* 1 is read, 0 write */ rw = OMAP2_MCSPI_CHCONF_DMAR; @@ -182,7 +220,7 @@ static void omap2_mcspi_set_dma_req(const struct spi_device *spi, rw = OMAP2_MCSPI_CHCONF_DMAW; MOD_REG_BIT(l, rw, enable); - mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l); + mcspi_write_chconf0(spi, l); } static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) @@ -191,15 +229,17 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0; mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l); + /* Flash post-writes */ + mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); } static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) { u32 l; - l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); + l = mcspi_cached_chconf0(spi); MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active); - mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l); + mcspi_write_chconf0(spi, l); } static void omap2_mcspi_set_master_mode(struct spi_master *master) @@ -214,6 +254,59 @@ static void omap2_mcspi_set_master_mode(struct spi_master *master) MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0); MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1); mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); + + omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l; +} + +static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) +{ + struct spi_master *spi_cntrl; + struct omap2_mcspi_cs *cs; + spi_cntrl = mcspi->master; + + /* McSPI: context restore */ + mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, + omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); + + mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG, + omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig); + + mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, + omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); + + list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs, + node) + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); +} +static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) +{ + clk_disable(mcspi->ick); + clk_disable(mcspi->fck); +} + +static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) +{ + if (clk_enable(mcspi->ick)) + return -ENODEV; + if (clk_enable(mcspi->fck)) + return -ENODEV; + + omap2_mcspi_restore_ctx(mcspi); + + return 0; +} + +static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(__raw_readl(reg) & bit)) { + if (time_after(jiffies, timeout)) + return -1; + cpu_relax(); + } + return 0; } static unsigned @@ -225,11 +318,17 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) unsigned int count, c; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; + int elements; + u32 l; u8 * rx; const u8 * tx; + void __iomem *chstat_reg; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + l = mcspi_cached_chconf0(spi); + + chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; count = xfer->len; c = count; @@ -268,8 +367,12 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) } if (rx != NULL) { + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) + elements--; + omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, element_count, 1, + data_type, elements, 1, OMAP_DMA_SYNC_ELEMENT, mcspi_dma->dma_rx_sync_dev, 1); @@ -294,27 +397,68 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) if (tx != NULL) { wait_for_completion(&mcspi_dma->dma_tx_completion); - dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); + dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE); + + /* for TX_ONLY mode, be sure all words have shifted out */ + if (rx == NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) + dev_err(&spi->dev, "TXS timed out\n"); + else if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_EOT) < 0) + dev_err(&spi->dev, "EOT timed out\n"); + } } if (rx != NULL) { wait_for_completion(&mcspi_dma->dma_rx_completion); - dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); - } - return count; -} + dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE); + omap2_mcspi_set_enable(spi, 0); -static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) -{ - unsigned long timeout; + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) + & OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); + if (word_len <= 8) + ((u8 *)xfer->rx_buf)[elements++] = w; + else if (word_len <= 16) + ((u16 *)xfer->rx_buf)[elements++] = w; + else /* word_len <= 32 */ + ((u32 *)xfer->rx_buf)[elements++] = w; + } else { + dev_err(&spi->dev, + "DMA RX penultimate word empty"); + count -= (word_len <= 8) ? 2 : + (word_len <= 16) ? 4 : + /* word_len <= 32 */ 8; + omap2_mcspi_set_enable(spi, 1); + return count; + } + } - timeout = jiffies + msecs_to_jiffies(1000); - while (!(__raw_readl(reg) & bit)) { - if (time_after(jiffies, timeout)) - return -1; - cpu_relax(); + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) + & OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); + if (word_len <= 8) + ((u8 *)xfer->rx_buf)[elements] = w; + else if (word_len <= 16) + ((u16 *)xfer->rx_buf)[elements] = w; + else /* word_len <= 32 */ + ((u32 *)xfer->rx_buf)[elements] = w; + } else { + dev_err(&spi->dev, "DMA RX last word empty"); + count -= (word_len <= 8) ? 1 : + (word_len <= 16) ? 2 : + /* word_len <= 32 */ 4; + } + omap2_mcspi_set_enable(spi, 1); } - return 0; + return count; } static unsigned @@ -335,8 +479,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) c = count; word_len = cs->word_len; - l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); - l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; + l = mcspi_cached_chconf0(spi); /* We store the pre-calculated register addresses on stack to speed * up the transfer loop. */ @@ -359,10 +502,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %02x\n", + dev_vdbg(&spi->dev, "write-%d %02x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -371,17 +512,27 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_cs_reg(spi, - OMAP2_MCSPI_CHCONF0, l); + + if (c == 1 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %02x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %02x\n", + dev_vdbg(&spi->dev, "read-%d %02x\n", word_len, *(rx - 1)); -#endif } } while (c); } else if (word_len <= 16) { @@ -398,10 +549,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %04x\n", + dev_vdbg(&spi->dev, "write-%d %04x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -410,17 +559,27 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_cs_reg(spi, - OMAP2_MCSPI_CHCONF0, l); + + if (c == 2 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %04x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", + dev_vdbg(&spi->dev, "read-%d %04x\n", word_len, *(rx - 1)); -#endif } } while (c); } else if (word_len <= 32) { @@ -437,10 +596,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %04x\n", + dev_vdbg(&spi->dev, "write-%d %08x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -449,17 +606,27 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_cs_reg(spi, - OMAP2_MCSPI_CHCONF0, l); + + if (c == 4 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %08x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", + dev_vdbg(&spi->dev, "read-%d %08x\n", word_len, *(rx - 1)); -#endif } } while (c); } @@ -472,8 +639,15 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } else if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_EOT) < 0) dev_err(&spi->dev, "EOT timed out\n"); + + /* disable chan to purge rx datas received in TX_ONLY transfer, + * otherwise these rx datas will affect the direct following + * RX_ONLY transfer. + */ + omap2_mcspi_set_enable(spi, 0); } out: + omap2_mcspi_set_enable(spi, 1); return count - c; } @@ -483,24 +657,30 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, { struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi *mcspi; + struct spi_master *spi_cntrl; u32 l = 0, div = 0; u8 word_len = spi->bits_per_word; + u32 speed_hz = spi->max_speed_hz; mcspi = spi_master_get_devdata(spi->master); + spi_cntrl = mcspi->master; if (t != NULL && t->bits_per_word) word_len = t->bits_per_word; cs->word_len = word_len; - if (spi->max_speed_hz) { + if (t && t->speed_hz) + speed_hz = t->speed_hz; + + if (speed_hz) { while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div)) - > spi->max_speed_hz) + > speed_hz) div++; } else div = 15; - l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); + l = mcspi_cached_chconf0(spi); /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS * REVISIT: this controller could support SPI_3WIRE mode. @@ -532,7 +712,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, else l &= ~OMAP2_MCSPI_CHCONF_PHA; - mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l); + mcspi_write_chconf0(spi, l); dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", OMAP2_MCSPI_MAX_FREQ / (1 << div), @@ -625,7 +805,11 @@ static int omap2_mcspi_setup(struct spi_device *spi) return -ENOMEM; cs->base = mcspi->base + spi->chip_select * 0x14; cs->phys = mcspi->phys + spi->chip_select * 0x14; + cs->chconf0 = 0; spi->controller_state = cs; + /* Link this to context save list */ + list_add_tail(&cs->node, + &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs); } if (mcspi_dma->dma_rx_channel == -1 @@ -635,11 +819,11 @@ static int omap2_mcspi_setup(struct spi_device *spi) return ret; } - clk_enable(mcspi->ick); - clk_enable(mcspi->fck); + if (omap2_mcspi_enable_clocks(mcspi)) + return -ENODEV; + ret = omap2_mcspi_setup_transfer(spi, NULL); - clk_disable(mcspi->fck); - clk_disable(mcspi->ick); + omap2_mcspi_disable_clocks(mcspi); return ret; } @@ -648,19 +832,29 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) { struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; + struct omap2_mcspi_cs *cs; mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - kfree(spi->controller_state); + if (spi->controller_state) { + /* Unlink controller state from context save list */ + cs = spi->controller_state; + list_del(&cs->node); - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; + kfree(spi->controller_state); } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; + + if (spi->chip_select < spi->master->num_chipselect) { + mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + if (mcspi_dma->dma_rx_channel != -1) { + omap_free_dma(mcspi_dma->dma_rx_channel); + mcspi_dma->dma_rx_channel = -1; + } + if (mcspi_dma->dma_tx_channel != -1) { + omap_free_dma(mcspi_dma->dma_tx_channel); + mcspi_dma->dma_tx_channel = -1; + } } } @@ -671,8 +865,8 @@ static void omap2_mcspi_work(struct work_struct *work) mcspi = container_of(work, struct omap2_mcspi, work); spin_lock_irq(&mcspi->lock); - clk_enable(mcspi->ick); - clk_enable(mcspi->fck); + if (omap2_mcspi_enable_clocks(mcspi)) + goto out; /* We only enable one channel at a time -- the one whose message is * at the head of the queue -- although this controller would gladly @@ -686,6 +880,7 @@ static void omap2_mcspi_work(struct work_struct *work) struct spi_transfer *t = NULL; int cs_active = 0; struct omap2_mcspi_cs *cs; + struct omap2_mcspi_device_config *cd; int par_override = 0; int status = 0; u32 chconf; @@ -698,6 +893,7 @@ static void omap2_mcspi_work(struct work_struct *work) spi = m->spi; cs = spi->controller_state; + cd = spi->controller_data; omap2_mcspi_set_enable(spi, 1); list_for_each_entry(t, &m->transfers, transfer_list) { @@ -719,13 +915,22 @@ static void omap2_mcspi_work(struct work_struct *work) cs_active = 1; } - chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); + chconf = mcspi_cached_chconf0(spi); chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; + chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; + if (t->tx_buf == NULL) chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; else if (t->rx_buf == NULL) chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; - mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf); + + if (cd && cd->turbo_mode && t->tx_buf == NULL) { + /* Turbo mode is for more than one word */ + if (t->len > ((cs->word_len + 7) >> 3)) + chconf |= OMAP2_MCSPI_CHCONF_TURBO; + } + + mcspi_write_chconf0(spi, chconf); if (t->len) { unsigned count; @@ -774,9 +979,9 @@ static void omap2_mcspi_work(struct work_struct *work) spin_lock_irq(&mcspi->lock); } - clk_disable(mcspi->fck); - clk_disable(mcspi->ick); + omap2_mcspi_disable_clocks(mcspi); +out: spin_unlock_irq(&mcspi->lock); } @@ -820,11 +1025,6 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) if (m->is_dma_mapped || len < DMA_MIN_BYTES) continue; - /* Do DMA mapping "early" for better error reporting and - * dcache use. Note that if dma_unmap_single() ever starts - * to do real work on ARM, we'd need to clean up mappings - * for previous transfers on *ALL* exits of this loop... - */ if (tx_buf != NULL) { t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, len, DMA_TO_DEVICE); @@ -841,7 +1041,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 'R', len); if (tx_buf != NULL) - dma_unmap_single(NULL, t->tx_dma, + dma_unmap_single(&spi->dev, t->tx_dma, len, DMA_TO_DEVICE); return -EINVAL; } @@ -863,8 +1063,8 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) struct spi_master *master = mcspi->master; u32 tmp; - clk_enable(mcspi->ick); - clk_enable(mcspi->fck); + if (omap2_mcspi_enable_clocks(mcspi)) + return -1; mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, OMAP2_MCSPI_SYSCONFIG_SOFTRESET); @@ -872,14 +1072,18 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS); } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); - mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, - /* (3 << 8) | (2 << 3) | */ - OMAP2_MCSPI_SYSCONFIG_AUTOIDLE); + tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | + OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | + OMAP2_MCSPI_SYSCONFIG_SMARTIDLE; + mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp); + omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp; - omap2_mcspi_set_master_mode(master); + tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; + mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); + omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp; - clk_disable(mcspi->fck); - clk_disable(mcspi->ick); + omap2_mcspi_set_master_mode(master); + omap2_mcspi_disable_clocks(mcspi); return 0; } @@ -907,7 +1111,8 @@ static u8 __initdata spi2_txdma_id[] = { OMAP24XX_DMA_SPI2_TX1, }; -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ + || defined(CONFIG_ARCH_OMAP4) static u8 __initdata spi3_rxdma_id[] = { OMAP24XX_DMA_SPI3_RX0, OMAP24XX_DMA_SPI3_RX1, @@ -919,7 +1124,7 @@ static u8 __initdata spi3_txdma_id[] = { }; #endif -#ifdef CONFIG_ARCH_OMAP3 +#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) static u8 __initdata spi4_rxdma_id[] = { OMAP34XX_DMA_SPI4_RX0, }; @@ -949,14 +1154,15 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) txdma_id = spi2_txdma_id; num_chipselect = 2; break; -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ + || defined(CONFIG_ARCH_OMAP4) case 3: rxdma_id = spi3_rxdma_id; txdma_id = spi3_txdma_id; num_chipselect = 2; break; #endif -#ifdef CONFIG_ARCH_OMAP3 +#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) case 4: rxdma_id = spi4_rxdma_id; txdma_id = spi4_txdma_id; @@ -1012,6 +1218,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) spin_lock_init(&mcspi->lock); INIT_LIST_HEAD(&mcspi->msg_queue); + INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); mcspi->ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(mcspi->ick)) { @@ -1093,10 +1300,49 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev) /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap2_mcspi"); +#ifdef CONFIG_SUSPEND +/* + * When SPI wake up from off-mode, CS is in activate state. If it was in + * unactive state when driver was suspend, then force it to unactive state at + * wake up. + */ +static int omap2_mcspi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi_cs *cs; + + omap2_mcspi_enable_clocks(mcspi); + list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs, + node) { + if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { + + /* + * We need to toggle CS state for OMAP take this + * change in account. + */ + MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1); + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); + MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0); + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); + } + } + omap2_mcspi_disable_clocks(mcspi); + return 0; +} +#else +#define omap2_mcspi_resume NULL +#endif + +static const struct dev_pm_ops omap2_mcspi_pm_ops = { + .resume = omap2_mcspi_resume, +}; + static struct platform_driver omap2_mcspi_driver = { .driver = { .name = "omap2_mcspi", .owner = THIS_MODULE, + .pm = &omap2_mcspi_pm_ops }, .remove = __exit_p(omap2_mcspi_remove), }; diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c new file mode 100644 index 00000000000..9bd1c92ad96 --- /dev/null +++ b/drivers/spi/omap_spi_100k.c @@ -0,0 +1,637 @@ +/* + * OMAP7xx SPI 100k controller driver + * Author: Fabrice Crohas <fcrohas@gmail.com> + * from original omap1_mcspi driver + * + * Copyright (C) 2005, 2006 Nokia Corporation + * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and + * Juha Yrj�l� <juha.yrjola@nokia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/slab.h> + +#include <linux/spi/spi.h> + +#include <plat/clock.h> + +#define OMAP1_SPI100K_MAX_FREQ 48000000 + +#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12) + +#define SPI_SETUP1 0x00 +#define SPI_SETUP2 0x02 +#define SPI_CTRL 0x04 +#define SPI_STATUS 0x06 +#define SPI_TX_LSB 0x08 +#define SPI_TX_MSB 0x0a +#define SPI_RX_LSB 0x0c +#define SPI_RX_MSB 0x0e + +#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5) +#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4) +#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1) +#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0) + +#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0) +#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0) +#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5) +#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5) +#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10) +#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10) + +#define SPI_CTRL_SEN(x) ((x) << 7) +#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2) +#define SPI_CTRL_WR (1UL << 1) +#define SPI_CTRL_RD (1UL << 0) + +#define SPI_STATUS_WE (1UL << 1) +#define SPI_STATUS_RD (1UL << 0) + +#define WRITE 0 +#define READ 1 + + +/* use PIO for small transfers, avoiding DMA setup/teardown overhead and + * cache operations; better heuristics consider wordsize and bitrate. + */ +#define DMA_MIN_BYTES 8 + +#define SPI_RUNNING 0 +#define SPI_SHUTDOWN 1 + +struct omap1_spi100k { + struct work_struct work; + + /* lock protects queue and registers */ + spinlock_t lock; + struct list_head msg_queue; + struct spi_master *master; + struct clk *ick; + struct clk *fck; + + /* Virtual base address of the controller */ + void __iomem *base; + + /* State of the SPI */ + unsigned int state; +}; + +struct omap1_spi100k_cs { + void __iomem *base; + int word_len; +}; + +static struct workqueue_struct *omap1_spi100k_wq; + +#define MOD_REG_BIT(val, mask, set) do { \ + if (set) \ + val |= mask; \ + else \ + val &= ~mask; \ +} while (0) + +static void spi100k_enable_clock(struct spi_master *master) +{ + unsigned int val; + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* enable SPI */ + val = readw(spi100k->base + SPI_SETUP1); + val |= SPI_SETUP1_CLOCK_ENABLE; + writew(val, spi100k->base + SPI_SETUP1); +} + +static void spi100k_disable_clock(struct spi_master *master) +{ + unsigned int val; + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* disable SPI */ + val = readw(spi100k->base + SPI_SETUP1); + val &= ~SPI_SETUP1_CLOCK_ENABLE; + writew(val, spi100k->base + SPI_SETUP1); +} + +static void spi100k_write_data(struct spi_master *master, int len, int data) +{ + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* write 16-bit word, shifting 8-bit data if necessary */ + if (len <= 8) { + data <<= 8; + len = 16; + } + + spi100k_enable_clock(master); + writew( data , spi100k->base + SPI_TX_MSB); + + writew(SPI_CTRL_SEN(0) | + SPI_CTRL_WORD_SIZE(len) | + SPI_CTRL_WR, + spi100k->base + SPI_CTRL); + + /* Wait for bit ack send change */ + while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE); + udelay(1000); + + spi100k_disable_clock(master); +} + +static int spi100k_read_data(struct spi_master *master, int len) +{ + int dataH,dataL; + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* Always do at least 16 bits */ + if (len <= 8) + len = 16; + + spi100k_enable_clock(master); + writew(SPI_CTRL_SEN(0) | + SPI_CTRL_WORD_SIZE(len) | + SPI_CTRL_RD, + spi100k->base + SPI_CTRL); + + while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD); + udelay(1000); + + dataL = readw(spi100k->base + SPI_RX_LSB); + dataH = readw(spi100k->base + SPI_RX_MSB); + spi100k_disable_clock(master); + + return dataL; +} + +static void spi100k_open(struct spi_master *master) +{ + /* get control of SPI */ + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + writew(SPI_SETUP1_INT_READ_ENABLE | + SPI_SETUP1_INT_WRITE_ENABLE | + SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1); + + /* configure clock and interrupts */ + writew(SPI_SETUP2_ACTIVE_EDGE_FALLING | + SPI_SETUP2_NEGATIVE_LEVEL | + SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2); +} + +static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable) +{ + if (enable) + writew(0x05fc, spi100k->base + SPI_CTRL); + else + writew(0x05fd, spi100k->base + SPI_CTRL); +} + +static unsigned +omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) +{ + struct omap1_spi100k *spi100k; + struct omap1_spi100k_cs *cs = spi->controller_state; + unsigned int count, c; + int word_len; + + spi100k = spi_master_get_devdata(spi->master); + count = xfer->len; + c = count; + word_len = cs->word_len; + + if (word_len <= 8) { + u8 *rx; + const u8 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c-=1; + if (xfer->tx_buf != NULL) + spi100k_write_data(spi->master, word_len, *tx++); + if (xfer->rx_buf != NULL) + *rx++ = spi100k_read_data(spi->master, word_len); + } while(c); + } else if (word_len <= 16) { + u16 *rx; + const u16 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c-=2; + if (xfer->tx_buf != NULL) + spi100k_write_data(spi->master,word_len, *tx++); + if (xfer->rx_buf != NULL) + *rx++ = spi100k_read_data(spi->master,word_len); + } while(c); + } else if (word_len <= 32) { + u32 *rx; + const u32 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c-=4; + if (xfer->tx_buf != NULL) + spi100k_write_data(spi->master,word_len, *tx); + if (xfer->rx_buf != NULL) + *rx = spi100k_read_data(spi->master,word_len); + } while(c); + } + return count - c; +} + +/* called only when no transfer is active to this device */ +static int omap1_spi100k_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master); + struct omap1_spi100k_cs *cs = spi->controller_state; + u8 word_len = spi->bits_per_word; + + if (t != NULL && t->bits_per_word) + word_len = t->bits_per_word; + if (!word_len) + word_len = 8; + + if (spi->bits_per_word > 32) + return -EINVAL; + cs->word_len = word_len; + + /* SPI init before transfer */ + writew(0x3e , spi100k->base + SPI_SETUP1); + writew(0x00 , spi100k->base + SPI_STATUS); + writew(0x3e , spi100k->base + SPI_CTRL); + + return 0; +} + +/* the spi->mode bits understood by this driver: */ +#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) + +static int omap1_spi100k_setup(struct spi_device *spi) +{ + int ret; + struct omap1_spi100k *spi100k; + struct omap1_spi100k_cs *cs = spi->controller_state; + + if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { + dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", + spi->bits_per_word); + return -EINVAL; + } + + spi100k = spi_master_get_devdata(spi->master); + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + cs->base = spi100k->base + spi->chip_select * 0x14; + spi->controller_state = cs; + } + + spi100k_open(spi->master); + + clk_enable(spi100k->ick); + clk_enable(spi100k->fck); + + ret = omap1_spi100k_setup_transfer(spi, NULL); + + clk_disable(spi100k->ick); + clk_disable(spi100k->fck); + + return ret; +} + +static void omap1_spi100k_work(struct work_struct *work) +{ + struct omap1_spi100k *spi100k; + int status = 0; + + spi100k = container_of(work, struct omap1_spi100k, work); + spin_lock_irq(&spi100k->lock); + + clk_enable(spi100k->ick); + clk_enable(spi100k->fck); + + /* We only enable one channel at a time -- the one whose message is + * at the head of the queue -- although this controller would gladly + * arbitrate among multiple channels. This corresponds to "single + * channel" master mode. As a side effect, we need to manage the + * chipselect with the FORCE bit ... CS != channel enable. + */ + while (!list_empty(&spi100k->msg_queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + int cs_active = 0; + struct omap1_spi100k_cs *cs; + int par_override = 0; + + m = container_of(spi100k->msg_queue.next, struct spi_message, + queue); + + list_del_init(&m->queue); + spin_unlock_irq(&spi100k->lock); + + spi = m->spi; + cs = spi->controller_state; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { + status = -EINVAL; + break; + } + if (par_override || t->speed_hz || t->bits_per_word) { + par_override = 1; + status = omap1_spi100k_setup_transfer(spi, t); + if (status < 0) + break; + if (!t->speed_hz && !t->bits_per_word) + par_override = 0; + } + + if (!cs_active) { + omap1_spi100k_force_cs(spi100k, 1); + cs_active = 1; + } + + if (t->len) { + unsigned count; + + count = omap1_spi100k_txrx_pio(spi, t); + m->actual_length += count; + + if (count != t->len) { + status = -EIO; + break; + } + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + + /* ignore the "leave it on after last xfer" hint */ + + if (t->cs_change) { + omap1_spi100k_force_cs(spi100k, 0); + cs_active = 0; + } + } + + /* Restore defaults if they were overriden */ + if (par_override) { + par_override = 0; + status = omap1_spi100k_setup_transfer(spi, NULL); + } + + if (cs_active) + omap1_spi100k_force_cs(spi100k, 0); + + m->status = status; + m->complete(m->context); + + spin_lock_irq(&spi100k->lock); + } + + clk_disable(spi100k->ick); + clk_disable(spi100k->fck); + spin_unlock_irq(&spi100k->lock); + + if (status < 0) + printk(KERN_WARNING "spi transfer failed with %d\n", status); +} + +static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct omap1_spi100k *spi100k; + unsigned long flags; + struct spi_transfer *t; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spi100k = spi_master_get_devdata(spi->master); + + /* Don't accept new work if we're shutting down */ + if (spi100k->state == SPI_SHUTDOWN) + return -ESHUTDOWN; + + /* reject invalid messages and transfers */ + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + list_for_each_entry(t, &m->transfers, transfer_list) { + const void *tx_buf = t->tx_buf; + void *rx_buf = t->rx_buf; + unsigned len = t->len; + + if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ + || (len && !(rx_buf || tx_buf)) + || (t->bits_per_word && + ( t->bits_per_word < 4 + || t->bits_per_word > 32))) { + dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", + t->speed_hz, + len, + tx_buf ? "tx" : "", + rx_buf ? "rx" : "", + t->bits_per_word); + return -EINVAL; + } + + if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) { + dev_dbg(&spi->dev, "%d Hz max exceeds %d\n", + t->speed_hz, + OMAP1_SPI100K_MAX_FREQ/(1<<16)); + return -EINVAL; + } + + } + + spin_lock_irqsave(&spi100k->lock, flags); + list_add_tail(&m->queue, &spi100k->msg_queue); + queue_work(omap1_spi100k_wq, &spi100k->work); + spin_unlock_irqrestore(&spi100k->lock, flags); + + return 0; +} + +static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k) +{ + return 0; +} + +static int __devinit omap1_spi100k_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct omap1_spi100k *spi100k; + int status = 0; + + if (!pdev->id) + return -EINVAL; + + master = spi_alloc_master(&pdev->dev, sizeof *spi100k); + if (master == NULL) { + dev_dbg(&pdev->dev, "master allocation failed\n"); + return -ENOMEM; + } + + if (pdev->id != -1) + master->bus_num = pdev->id; + + master->setup = omap1_spi100k_setup; + master->transfer = omap1_spi100k_transfer; + master->cleanup = NULL; + master->num_chipselect = 2; + master->mode_bits = MODEBITS; + + dev_set_drvdata(&pdev->dev, master); + + spi100k = spi_master_get_devdata(master); + spi100k->master = master; + + /* + * The memory region base address is taken as the platform_data. + * You should allocate this with ioremap() before initializing + * the SPI. + */ + spi100k->base = (void __iomem *) pdev->dev.platform_data; + + INIT_WORK(&spi100k->work, omap1_spi100k_work); + + spin_lock_init(&spi100k->lock); + INIT_LIST_HEAD(&spi100k->msg_queue); + spi100k->ick = clk_get(&pdev->dev, "ick"); + if (IS_ERR(spi100k->ick)) { + dev_dbg(&pdev->dev, "can't get spi100k_ick\n"); + status = PTR_ERR(spi100k->ick); + goto err1; + } + + spi100k->fck = clk_get(&pdev->dev, "fck"); + if (IS_ERR(spi100k->fck)) { + dev_dbg(&pdev->dev, "can't get spi100k_fck\n"); + status = PTR_ERR(spi100k->fck); + goto err2; + } + + if (omap1_spi100k_reset(spi100k) < 0) + goto err3; + + status = spi_register_master(master); + if (status < 0) + goto err3; + + spi100k->state = SPI_RUNNING; + + return status; + +err3: + clk_put(spi100k->fck); +err2: + clk_put(spi100k->ick); +err1: + spi_master_put(master); + return status; +} + +static int __exit omap1_spi100k_remove(struct platform_device *pdev) +{ + struct spi_master *master; + struct omap1_spi100k *spi100k; + struct resource *r; + unsigned limit = 500; + unsigned long flags; + int status = 0; + + master = dev_get_drvdata(&pdev->dev); + spi100k = spi_master_get_devdata(master); + + spin_lock_irqsave(&spi100k->lock, flags); + + spi100k->state = SPI_SHUTDOWN; + while (!list_empty(&spi100k->msg_queue) && limit--) { + spin_unlock_irqrestore(&spi100k->lock, flags); + msleep(10); + spin_lock_irqsave(&spi100k->lock, flags); + } + + if (!list_empty(&spi100k->msg_queue)) + status = -EBUSY; + + spin_unlock_irqrestore(&spi100k->lock, flags); + + if (status != 0) + return status; + + clk_put(spi100k->fck); + clk_put(spi100k->ick); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + spi_unregister_master(master); + + return 0; +} + +static struct platform_driver omap1_spi100k_driver = { + .driver = { + .name = "omap1_spi100k", + .owner = THIS_MODULE, + }, + .remove = __exit_p(omap1_spi100k_remove), +}; + + +static int __init omap1_spi100k_init(void) +{ + omap1_spi100k_wq = create_singlethread_workqueue( + omap1_spi100k_driver.driver.name); + + if (omap1_spi100k_wq == NULL) + return -1; + + return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe); +} + +static void __exit omap1_spi100k_exit(void) +{ + platform_driver_unregister(&omap1_spi100k_driver); + + destroy_workqueue(omap1_spi100k_wq); +} + +module_init(omap1_spi100k_init); +module_exit(omap1_spi100k_exit); + +MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver"); +MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c index aa90ddb3706..160d3266205 100644 --- a/drivers/spi/omap_uwire.c +++ b/drivers/spi/omap_uwire.c @@ -41,6 +41,7 @@ #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> @@ -51,8 +52,8 @@ #include <asm/io.h> #include <asm/mach-types.h> -#include <mach/mux.h> -#include <mach/omap730.h> /* OMAP730_IO_CONF registers */ +#include <plat/mux.h> +#include <plat/omap7xx.h> /* OMAP7XX_IO_CONF registers */ /* FIXME address is now a platform device resource, @@ -213,7 +214,7 @@ static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t) unsigned bits = ust->bits_per_word; unsigned bytes; u16 val, w; - int status = 0;; + int status = 0; if (!t->tx_buf && !t->rx_buf) return 0; @@ -504,7 +505,7 @@ static int __init uwire_probe(struct platform_device *pdev) } clk_enable(uwire->ck); - if (cpu_is_omap730()) + if (cpu_is_omap7xx()) uwire_idx_shift = 1; else uwire_idx_shift = 2; @@ -514,6 +515,8 @@ static int __init uwire_probe(struct platform_device *pdev) /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->flags = SPI_MASTER_HALF_DUPLEX; + master->bus_num = 2; /* "official" */ master->num_chipselect = 4; master->setup = uwire_setup; @@ -571,8 +574,8 @@ static int __init omap_uwire_init(void) } if (machine_is_omap_perseus2()) { /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */ - int val = omap_readl(OMAP730_IO_CONF_9) & ~0x00EEE000; - omap_writel(val | 0x00AAA000, OMAP730_IO_CONF_9); + int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000; + omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9); } return platform_driver_probe(&uwire_driver, uwire_probe); diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index 3aea50da7b2..0b677dc041a 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c @@ -404,7 +404,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) goto msg_rejected; } - if ((t != NULL) && t->bits_per_word) + if (t->bits_per_word) bits_per_word = t->bits_per_word; if ((bits_per_word != 8) && (bits_per_word != 16)) { @@ -415,7 +415,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) goto msg_rejected; } /*make sure buffer length is even when working in 16 bit mode*/ - if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) { + if ((t->bits_per_word == 16) && (t->len & 1)) { dev_err(&spi->dev, "message rejected : " "odd data length (%d) while in 16 bit mode\n", diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index d949dbf1141..95928833855 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -23,21 +23,18 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/spi/pxa2xx_spi.h> #include <linux/dma-mapping.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <linux/delay.h> -#include <linux/clk.h> #include <linux/gpio.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/delay.h> -#include <mach/dma.h> -#include <mach/regs-ssp.h> -#include <mach/ssp.h> -#include <mach/pxa2xx_spi.h> MODULE_AUTHOR("Stephen Street"); MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); @@ -46,8 +43,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); #define MAX_BUSES 3 -#define RX_THRESH_DFLT 8 -#define TX_THRESH_DFLT 8 #define TIMOUT_DFLT 1000 #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) @@ -168,7 +163,10 @@ struct chip_data { u8 enable_dma; u8 bits_per_word; u32 speed_hz; - int gpio_cs; + union { + int gpio_cs; + unsigned int frm; + }; int gpio_cs_inverted; int (*write)(struct driver_data *drv_data); int (*read)(struct driver_data *drv_data); @@ -181,6 +179,11 @@ static void cs_assert(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; + if (drv_data->ssp_type == CE4100_SSP) { + write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); + return; + } + if (chip->cs_control) { chip->cs_control(PXA2XX_CS_ASSERT); return; @@ -194,6 +197,9 @@ static void cs_deassert(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; + if (drv_data->ssp_type == CE4100_SSP) + return; + if (chip->cs_control) { chip->cs_control(PXA2XX_CS_DEASSERT); return; @@ -203,6 +209,25 @@ static void cs_deassert(struct driver_data *drv_data) gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); } +static void write_SSSR_CS(struct driver_data *drv_data, u32 val) +{ + void __iomem *reg = drv_data->ioaddr; + + if (drv_data->ssp_type == CE4100_SSP) + val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; + + write_SSSR(val, reg); +} + +static int pxa25x_ssp_comp(struct driver_data *drv_data) +{ + if (drv_data->ssp_type == PXA25x_SSP) + return 1; + if (drv_data->ssp_type == CE4100_SSP) + return 1; + return 0; +} + static int flush(struct driver_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; @@ -214,7 +239,7 @@ static int flush(struct driver_data *drv_data) read_SSDR(reg); } } while ((read_SSSR(reg) & SSSR_BSY) && --limit); - write_SSSR(SSSR_ROR, reg); + write_SSSR_CS(drv_data, SSSR_ROR); return limit; } @@ -224,7 +249,7 @@ static int null_writer(struct driver_data *drv_data) void __iomem *reg = drv_data->ioaddr; u8 n_bytes = drv_data->n_bytes; - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; @@ -252,7 +277,7 @@ static int u8_writer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; @@ -279,7 +304,7 @@ static int u16_writer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; @@ -306,7 +331,7 @@ static int u32_writer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; @@ -507,9 +532,9 @@ static void dma_error_stop(struct driver_data *drv_data, const char *msg) /* Stop and reset */ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; - write_SSSR(drv_data->clear_sr, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); - if (drv_data->ssp_type != PXA25x_SSP) + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); flush(drv_data); write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); @@ -529,7 +554,7 @@ static void dma_transfer_complete(struct driver_data *drv_data) /* Clear and disable interrupts on SSP and DMA channels*/ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); - write_SSSR(drv_data->clear_sr, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; @@ -622,7 +647,7 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data) /* Clear and disable timeout interrupt, do the rest in * dma_transfer_complete */ - if (drv_data->ssp_type != PXA25x_SSP) + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); /* finish this transfer, start the next */ @@ -635,14 +660,26 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data) return IRQ_NONE; } +static void reset_sccr1(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + struct chip_data *chip = drv_data->cur_chip; + u32 sccr1_reg; + + sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; + sccr1_reg &= ~SSCR1_RFT; + sccr1_reg |= chip->threshold; + write_SSCR1(sccr1_reg, reg); +} + static void int_error_stop(struct driver_data *drv_data, const char* msg) { void __iomem *reg = drv_data->ioaddr; /* Stop and reset SSP */ - write_SSSR(drv_data->clear_sr, reg); - write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); - if (drv_data->ssp_type != PXA25x_SSP) + write_SSSR_CS(drv_data, drv_data->clear_sr); + reset_sccr1(drv_data); + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); flush(drv_data); write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); @@ -658,9 +695,9 @@ static void int_transfer_complete(struct driver_data *drv_data) void __iomem *reg = drv_data->ioaddr; /* Stop SSP */ - write_SSSR(drv_data->clear_sr, reg); - write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); - if (drv_data->ssp_type != PXA25x_SSP) + write_SSSR_CS(drv_data, drv_data->clear_sr); + reset_sccr1(drv_data); + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); /* Update total byte transfered return count actual bytes read */ @@ -714,24 +751,34 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) } if (drv_data->tx == drv_data->tx_end) { - write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); - /* PXA25x_SSP has no timeout, read trailing bytes */ - if (drv_data->ssp_type == PXA25x_SSP) { - if (!wait_ssp_rx_stall(reg)) - { - int_error_stop(drv_data, "interrupt_transfer: " - "rx stall failed"); - return IRQ_HANDLED; - } - if (!drv_data->read(drv_data)) - { - int_error_stop(drv_data, - "interrupt_transfer: " - "trailing byte read failed"); - return IRQ_HANDLED; + u32 bytes_left; + u32 sccr1_reg; + + sccr1_reg = read_SSCR1(reg); + sccr1_reg &= ~SSCR1_TIE; + + /* + * PXA25x_SSP has no timeout, set up rx threshould for the + * remaing RX bytes. + */ + if (pxa25x_ssp_comp(drv_data)) { + + sccr1_reg &= ~SSCR1_RFT; + + bytes_left = drv_data->rx_end - drv_data->rx; + switch (drv_data->n_bytes) { + case 4: + bytes_left >>= 1; + case 2: + bytes_left >>= 1; } - int_transfer_complete(drv_data); + + if (bytes_left > RX_THRESH_DFLT) + bytes_left = RX_THRESH_DFLT; + + sccr1_reg |= SSCR1_RxTresh(bytes_left); } + write_SSCR1(sccr1_reg, reg); } /* We did something */ @@ -742,14 +789,26 @@ static irqreturn_t ssp_int(int irq, void *dev_id) { struct driver_data *drv_data = dev_id; void __iomem *reg = drv_data->ioaddr; + u32 sccr1_reg = read_SSCR1(reg); + u32 mask = drv_data->mask_sr; + u32 status; + + status = read_SSSR(reg); + + /* Ignore possible writes if we don't need to write */ + if (!(sccr1_reg & SSCR1_TIE)) + mask &= ~SSSR_TFS; + + if (!(status & mask)) + return IRQ_NONE; if (!drv_data->cur_msg) { write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); - if (drv_data->ssp_type != PXA25x_SSP) + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); - write_SSSR(drv_data->clear_sr, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); dev_err(&drv_data->pdev->dev, "bad message state " "in interrupt handler\n"); @@ -862,7 +921,7 @@ static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate) { unsigned long ssp_clk = clk_get_rate(ssp->clk); - if (ssp->type == PXA25x_SSP) + if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; else return ((ssp_clk / rate - 1) & 0xfff) << 8; @@ -1088,7 +1147,7 @@ static void pump_transfers(unsigned long data) /* Clear status */ cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; - write_SSSR(drv_data->clear_sr, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); } /* see if we need to reload the config registers */ @@ -1098,7 +1157,7 @@ static void pump_transfers(unsigned long data) /* stop the SSP, and update the other bits */ write_SSCR0(cr0 & ~SSCR0_SSE, reg); - if (drv_data->ssp_type != PXA25x_SSP) + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(chip->timeout, reg); /* first set CR1 without interrupt and service enables */ write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); @@ -1106,7 +1165,7 @@ static void pump_transfers(unsigned long data) write_SSCR0(cr0, reg); } else { - if (drv_data->ssp_type != PXA25x_SSP) + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(chip->timeout, reg); } @@ -1233,14 +1292,13 @@ static int setup(struct spi_device *spi) uint tx_thres = TX_THRESH_DFLT; uint rx_thres = RX_THRESH_DFLT; - if (drv_data->ssp_type != PXA25x_SSP + if (!pxa25x_ssp_comp(drv_data) && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " "b/w not 4-32 for type non-PXA25x_SSP\n", drv_data->ssp_type, spi->bits_per_word); return -EINVAL; - } - else if (drv_data->ssp_type == PXA25x_SSP + } else if (pxa25x_ssp_comp(drv_data) && (spi->bits_per_word < 4 || spi->bits_per_word > 16)) { dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " @@ -1259,7 +1317,17 @@ static int setup(struct spi_device *spi) return -ENOMEM; } - chip->gpio_cs = -1; + if (drv_data->ssp_type == CE4100_SSP) { + if (spi->chip_select > 4) { + dev_err(&spi->dev, "failed setup: " + "cs number must not be > 4.\n"); + kfree(chip); + return -EINVAL; + } + + chip->frm = spi->chip_select; + } else + chip->gpio_cs = -1; chip->enable_dma = 0; chip->timeout = TIMOUT_DFLT; chip->dma_burst_size = drv_data->master_info->enable_dma ? @@ -1315,16 +1383,16 @@ static int setup(struct spi_device *spi) | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); /* NOTE: PXA25x_SSP _could_ use external clocking ... */ - if (drv_data->ssp_type != PXA25x_SSP) + if (!pxa25x_ssp_comp(drv_data)) dev_dbg(&spi->dev, "%ld Hz actual, %s\n", - clk_get_rate(ssp->clk) - / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), - chip->enable_dma ? "DMA" : "PIO"); + clk_get_rate(ssp->clk) + / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), + chip->enable_dma ? "DMA" : "PIO"); else dev_dbg(&spi->dev, "%ld Hz actual, %s\n", - clk_get_rate(ssp->clk) / 2 - / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), - chip->enable_dma ? "DMA" : "PIO"); + clk_get_rate(ssp->clk) / 2 + / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), + chip->enable_dma ? "DMA" : "PIO"); if (spi->bits_per_word <= 8) { chip->n_bytes = 1; @@ -1350,23 +1418,27 @@ static int setup(struct spi_device *spi) spi_set_ctldata(spi, chip); + if (drv_data->ssp_type == CE4100_SSP) + return 0; + return setup_cs(spi, chip, chip_info); } static void cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); + struct driver_data *drv_data = spi_master_get_devdata(spi->master); if (!chip) return; - if (gpio_is_valid(chip->gpio_cs)) + if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) gpio_free(chip->gpio_cs); kfree(chip); } -static int __init init_queue(struct driver_data *drv_data) +static int __devinit init_queue(struct driver_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); @@ -1454,7 +1526,7 @@ static int destroy_queue(struct driver_data *drv_data) return 0; } -static int __init pxa2xx_spi_probe(struct platform_device *pdev) +static int __devinit pxa2xx_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pxa2xx_spi_master *platform_info; @@ -1465,7 +1537,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) platform_info = dev->platform_data; - ssp = ssp_request(pdev->id, pdev->name); + ssp = pxa_ssp_request(pdev->id, pdev->name); if (ssp == NULL) { dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); return -ENODEV; @@ -1475,7 +1547,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); if (!master) { dev_err(&pdev->dev, "cannot alloc spi_master\n"); - ssp_free(ssp); + pxa_ssp_free(ssp); return -ENOMEM; } drv_data = spi_master_get_devdata(master); @@ -1484,6 +1556,10 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) drv_data->pdev = pdev; drv_data->ssp = ssp; + master->dev.parent = &pdev->dev; +#ifdef CONFIG_OF + master->dev.of_node = pdev->dev.of_node; +#endif /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; @@ -1500,7 +1576,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) drv_data->ioaddr = ssp->mmio_base; drv_data->ssdr_physical = ssp->phys_base + SSDR; - if (ssp->type == PXA25x_SSP) { + if (pxa25x_ssp_comp(drv_data)) { drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; drv_data->dma_cr1 = 0; drv_data->clear_sr = SSSR_ROR; @@ -1512,7 +1588,8 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; } - status = request_irq(ssp->irq, ssp_int, 0, dev_name(dev), drv_data); + status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), + drv_data); if (status < 0) { dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); goto out_error_master_alloc; @@ -1557,11 +1634,11 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT), drv_data->ioaddr); - write_SSCR0(SSCR0_SerClkDiv(2) + write_SSCR0(SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8), drv_data->ioaddr); - if (drv_data->ssp_type != PXA25x_SSP) + if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, drv_data->ioaddr); write_SSPSP(0, drv_data->ioaddr); @@ -1604,7 +1681,7 @@ out_error_irq_alloc: out_error_master_alloc: spi_master_put(master); - ssp_free(ssp); + pxa_ssp_free(ssp); return status; } @@ -1648,7 +1725,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) free_irq(ssp->irq, drv_data); /* Release SSP */ - ssp_free(ssp); + pxa_ssp_free(ssp); /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); @@ -1668,10 +1745,9 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev) } #ifdef CONFIG_PM - -static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state) +static int pxa2xx_spi_suspend(struct device *dev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct driver_data *drv_data = dev_get_drvdata(dev); struct ssp_device *ssp = drv_data->ssp; int status = 0; @@ -1684,9 +1760,9 @@ static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state) return 0; } -static int pxa2xx_spi_resume(struct platform_device *pdev) +static int pxa2xx_spi_resume(struct device *dev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct driver_data *drv_data = dev_get_drvdata(dev); struct ssp_device *ssp = drv_data->ssp; int status = 0; @@ -1703,33 +1779,37 @@ static int pxa2xx_spi_resume(struct platform_device *pdev) /* Start the queue running */ status = start_queue(drv_data); if (status != 0) { - dev_err(&pdev->dev, "problem starting queue (%d)\n", status); + dev_err(dev, "problem starting queue (%d)\n", status); return status; } return 0; } -#else -#define pxa2xx_spi_suspend NULL -#define pxa2xx_spi_resume NULL -#endif /* CONFIG_PM */ + +static const struct dev_pm_ops pxa2xx_spi_pm_ops = { + .suspend = pxa2xx_spi_suspend, + .resume = pxa2xx_spi_resume, +}; +#endif static struct platform_driver driver = { .driver = { - .name = "pxa2xx-spi", - .owner = THIS_MODULE, + .name = "pxa2xx-spi", + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &pxa2xx_spi_pm_ops, +#endif }, + .probe = pxa2xx_spi_probe, .remove = pxa2xx_spi_remove, .shutdown = pxa2xx_spi_shutdown, - .suspend = pxa2xx_spi_suspend, - .resume = pxa2xx_spi_resume, }; static int __init pxa2xx_spi_init(void) { - return platform_driver_probe(&driver, pxa2xx_spi_probe); + return platform_driver_register(&driver); } -module_init(pxa2xx_spi_init); +subsys_initcall(pxa2xx_spi_init); static void __exit pxa2xx_spi_exit(void) { diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c new file mode 100644 index 00000000000..351d8a375b5 --- /dev/null +++ b/drivers/spi/pxa2xx_spi_pci.c @@ -0,0 +1,201 @@ +/* + * CE4100's SPI device is more or less the same one as found on PXA + * + */ +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/spi/pxa2xx_spi.h> + +struct awesome_struct { + struct ssp_device ssp; + struct platform_device spi_pdev; + struct pxa2xx_spi_master spi_pdata; +}; + +static DEFINE_MUTEX(ssp_lock); +static LIST_HEAD(ssp_list); + +struct ssp_device *pxa_ssp_request(int port, const char *label) +{ + struct ssp_device *ssp = NULL; + + mutex_lock(&ssp_lock); + + list_for_each_entry(ssp, &ssp_list, node) { + if (ssp->port_id == port && ssp->use_count == 0) { + ssp->use_count++; + ssp->label = label; + break; + } + } + + mutex_unlock(&ssp_lock); + + if (&ssp->node == &ssp_list) + return NULL; + + return ssp; +} +EXPORT_SYMBOL_GPL(pxa_ssp_request); + +void pxa_ssp_free(struct ssp_device *ssp) +{ + mutex_lock(&ssp_lock); + if (ssp->use_count) { + ssp->use_count--; + ssp->label = NULL; + } else + dev_err(&ssp->pdev->dev, "device already free\n"); + mutex_unlock(&ssp_lock); +} +EXPORT_SYMBOL_GPL(pxa_ssp_free); + +static void plat_dev_release(struct device *dev) +{ + struct awesome_struct *as = container_of(dev, + struct awesome_struct, spi_pdev.dev); + + of_device_node_put(&as->spi_pdev.dev); +} + +static int __devinit ce4100_spi_probe(struct pci_dev *dev, + const struct pci_device_id *ent) +{ + int ret; + resource_size_t phys_beg; + resource_size_t phys_len; + struct awesome_struct *spi_info; + struct platform_device *pdev; + struct pxa2xx_spi_master *spi_pdata; + struct ssp_device *ssp; + + ret = pci_enable_device(dev); + if (ret) + return ret; + + phys_beg = pci_resource_start(dev, 0); + phys_len = pci_resource_len(dev, 0); + + if (!request_mem_region(phys_beg, phys_len, + "CE4100 SPI")) { + dev_err(&dev->dev, "Can't request register space.\n"); + ret = -EBUSY; + return ret; + } + + spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); + if (!spi_info) { + ret = -ENOMEM; + goto err_kz; + } + ssp = &spi_info->ssp; + pdev = &spi_info->spi_pdev; + spi_pdata = &spi_info->spi_pdata; + + pdev->name = "pxa2xx-spi"; + pdev->id = dev->devfn; + pdev->dev.parent = &dev->dev; + pdev->dev.platform_data = &spi_info->spi_pdata; + +#ifdef CONFIG_OF + pdev->dev.of_node = dev->dev.of_node; +#endif + pdev->dev.release = plat_dev_release; + + spi_pdata->num_chipselect = dev->devfn; + + ssp->phys_base = pci_resource_start(dev, 0); + ssp->mmio_base = ioremap(phys_beg, phys_len); + if (!ssp->mmio_base) { + dev_err(&pdev->dev, "failed to ioremap() registers\n"); + ret = -EIO; + goto err_remap; + } + ssp->irq = dev->irq; + ssp->port_id = pdev->id; + ssp->type = PXA25x_SSP; + + mutex_lock(&ssp_lock); + list_add(&ssp->node, &ssp_list); + mutex_unlock(&ssp_lock); + + pci_set_drvdata(dev, spi_info); + + ret = platform_device_register(pdev); + if (ret) + goto err_dev_add; + + return ret; + +err_dev_add: + pci_set_drvdata(dev, NULL); + mutex_lock(&ssp_lock); + list_del(&ssp->node); + mutex_unlock(&ssp_lock); + iounmap(ssp->mmio_base); + +err_remap: + kfree(spi_info); + +err_kz: + release_mem_region(phys_beg, phys_len); + + return ret; +} + +static void __devexit ce4100_spi_remove(struct pci_dev *dev) +{ + struct awesome_struct *spi_info; + struct platform_device *pdev; + struct ssp_device *ssp; + + spi_info = pci_get_drvdata(dev); + + ssp = &spi_info->ssp; + pdev = &spi_info->spi_pdev; + + platform_device_unregister(pdev); + + iounmap(ssp->mmio_base); + release_mem_region(pci_resource_start(dev, 0), + pci_resource_len(dev, 0)); + + mutex_lock(&ssp_lock); + list_del(&ssp->node); + mutex_unlock(&ssp_lock); + + pci_set_drvdata(dev, NULL); + pci_disable_device(dev); + kfree(spi_info); +} + +static struct pci_device_id ce4100_spi_devices[] __devinitdata = { + + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, + { }, +}; +MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); + +static struct pci_driver ce4100_spi_driver = { + .name = "ce4100_spi", + .id_table = ce4100_spi_devices, + .probe = ce4100_spi_probe, + .remove = __devexit_p(ce4100_spi_remove), +}; + +static int __init ce4100_spi_init(void) +{ + return pci_register_driver(&ce4100_spi_driver); +} +module_init(ce4100_spi_init); + +static void __exit ce4100_spi_exit(void) +{ + pci_unregister_driver(&ce4100_spi_driver); +} +module_exit(ce4100_spi_exit); + +MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 70845ccd85c..34bb17f0301 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -23,13 +23,13 @@ #include <linux/init.h> #include <linux/cache.h> #include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/slab.h> +#include <linux/mod_devicetable.h> #include <linux/spi/spi.h> +#include <linux/of_spi.h> +#include <linux/pm_runtime.h> - -/* SPI bustype and spi_master class are registered after board init code - * provides the SPI device tables, ensuring that both are present by the - * time controller driver registration causes spi_devices to "enumerate". - */ static void spidev_release(struct device *dev) { struct spi_device *spi = to_spi_device(dev); @@ -39,7 +39,7 @@ static void spidev_release(struct device *dev) spi->master->cleanup(spi); spi_master_put(spi->master); - kfree(dev); + kfree(spi); } static ssize_t @@ -59,9 +59,36 @@ static struct device_attribute spi_dev_attrs[] = { * and the sysfs version makes coldplug work too. */ +static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, + const struct spi_device *sdev) +{ + while (id->name[0]) { + if (!strcmp(sdev->modalias, id->name)) + return id; + id++; + } + return NULL; +} + +const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) +{ + const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); + + return spi_match_id(sdrv->id_table, sdev); +} +EXPORT_SYMBOL_GPL(spi_get_device_id); + static int spi_match_device(struct device *dev, struct device_driver *drv) { const struct spi_device *spi = to_spi_device(dev); + const struct spi_driver *sdrv = to_spi_driver(drv); + + /* Attempt an OF style match */ + if (of_driver_match_device(dev, drv)) + return 1; + + if (sdrv->id_table) + return !!spi_match_id(sdrv->id_table, spi); return strcmp(spi->modalias, drv->name) == 0; } @@ -70,13 +97,12 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) { const struct spi_device *spi = to_spi_device(dev); - add_uevent_var(env, "MODALIAS=%s", spi->modalias); + add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); return 0; } -#ifdef CONFIG_PM - -static int spi_suspend(struct device *dev, pm_message_t message) +#ifdef CONFIG_PM_SLEEP +static int spi_legacy_suspend(struct device *dev, pm_message_t message) { int value = 0; struct spi_driver *drv = to_spi_driver(dev->driver); @@ -91,7 +117,7 @@ static int spi_suspend(struct device *dev, pm_message_t message) return value; } -static int spi_resume(struct device *dev) +static int spi_legacy_resume(struct device *dev) { int value = 0; struct spi_driver *drv = to_spi_driver(dev->driver); @@ -106,18 +132,94 @@ static int spi_resume(struct device *dev) return value; } +static int spi_pm_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_suspend(dev); + else + return spi_legacy_suspend(dev, PMSG_SUSPEND); +} + +static int spi_pm_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_resume(dev); + else + return spi_legacy_resume(dev); +} + +static int spi_pm_freeze(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_freeze(dev); + else + return spi_legacy_suspend(dev, PMSG_FREEZE); +} + +static int spi_pm_thaw(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_thaw(dev); + else + return spi_legacy_resume(dev); +} + +static int spi_pm_poweroff(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_poweroff(dev); + else + return spi_legacy_suspend(dev, PMSG_HIBERNATE); +} + +static int spi_pm_restore(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_restore(dev); + else + return spi_legacy_resume(dev); +} #else -#define spi_suspend NULL -#define spi_resume NULL +#define spi_pm_suspend NULL +#define spi_pm_resume NULL +#define spi_pm_freeze NULL +#define spi_pm_thaw NULL +#define spi_pm_poweroff NULL +#define spi_pm_restore NULL #endif +static const struct dev_pm_ops spi_pm = { + .suspend = spi_pm_suspend, + .resume = spi_pm_resume, + .freeze = spi_pm_freeze, + .thaw = spi_pm_thaw, + .poweroff = spi_pm_poweroff, + .restore = spi_pm_restore, + SET_RUNTIME_PM_OPS( + pm_generic_runtime_suspend, + pm_generic_runtime_resume, + pm_generic_runtime_idle + ) +}; + struct bus_type spi_bus_type = { .name = "spi", .dev_attrs = spi_dev_attrs, .match = spi_match_device, .uevent = spi_uevent, - .suspend = spi_suspend, - .resume = spi_resume, + .pm = &spi_pm, }; EXPORT_SYMBOL_GPL(spi_bus_type); @@ -171,11 +273,16 @@ EXPORT_SYMBOL_GPL(spi_register_driver); struct boardinfo { struct list_head list; - unsigned n_board_info; - struct spi_board_info board_info[0]; + struct spi_board_info board_info; }; static LIST_HEAD(board_list); +static LIST_HEAD(spi_master_list); + +/* + * Used to protect add/del opertion for board_info list and + * spi_master list, and their matching process + */ static DEFINE_MUTEX(board_lock); /** @@ -232,6 +339,7 @@ int spi_add_device(struct spi_device *spi) { static DEFINE_MUTEX(spi_add_lock); struct device *dev = spi->master->dev.parent; + struct device *d; int status; /* Chipselects are numbered 0..max; validate. */ @@ -253,10 +361,11 @@ int spi_add_device(struct spi_device *spi) */ mutex_lock(&spi_add_lock); - if (bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)) - != NULL) { + d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); + if (d != NULL) { dev_err(dev, "chipselect %d already in use\n", spi->chip_select); + put_device(d); status = -EBUSY; goto done; } @@ -267,16 +376,16 @@ int spi_add_device(struct spi_device *spi) */ status = spi_setup(spi); if (status < 0) { - dev_err(dev, "can't %s %s, status %d\n", - "setup", dev_name(&spi->dev), status); + dev_err(dev, "can't setup %s, status %d\n", + dev_name(&spi->dev), status); goto done; } /* Device may be bound to an active driver when this returns */ status = device_add(&spi->dev); if (status < 0) - dev_err(dev, "can't %s %s, status %d\n", - "add", dev_name(&spi->dev), status); + dev_err(dev, "can't add %s, status %d\n", + dev_name(&spi->dev), status); else dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); @@ -338,6 +447,20 @@ struct spi_device *spi_new_device(struct spi_master *master, } EXPORT_SYMBOL_GPL(spi_new_device); +static void spi_match_master_to_boardinfo(struct spi_master *master, + struct spi_board_info *bi) +{ + struct spi_device *dev; + + if (master->bus_num != bi->bus_num) + return; + + dev = spi_new_device(master, bi); + if (!dev) + dev_err(master->dev.parent, "can't create new device for %s\n", + bi->modalias); +} + /** * spi_register_board_info - register SPI devices for a given board * @info: array of chip descriptors @@ -360,43 +483,25 @@ EXPORT_SYMBOL_GPL(spi_new_device); int __init spi_register_board_info(struct spi_board_info const *info, unsigned n) { - struct boardinfo *bi; + struct boardinfo *bi; + int i; - bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL); + bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); if (!bi) return -ENOMEM; - bi->n_board_info = n; - memcpy(bi->board_info, info, n * sizeof *info); - - mutex_lock(&board_lock); - list_add_tail(&bi->list, &board_list); - mutex_unlock(&board_lock); - return 0; -} - -/* FIXME someone should add support for a __setup("spi", ...) that - * creates board info from kernel command lines - */ -static void scan_boardinfo(struct spi_master *master) -{ - struct boardinfo *bi; + for (i = 0; i < n; i++, bi++, info++) { + struct spi_master *master; - mutex_lock(&board_lock); - list_for_each_entry(bi, &board_list, list) { - struct spi_board_info *chip = bi->board_info; - unsigned n; - - for (n = bi->n_board_info; n > 0; n--, chip++) { - if (chip->bus_num != master->bus_num) - continue; - /* NOTE: this relies on spi_new_device to - * issue diagnostics when given bogus inputs - */ - (void) spi_new_device(master, chip); - } + memcpy(&bi->board_info, info, sizeof(*info)); + mutex_lock(&board_lock); + list_add_tail(&bi->list, &board_list); + list_for_each_entry(master, &spi_master_list, list) + spi_match_master_to_boardinfo(master, &bi->board_info); + mutex_unlock(&board_lock); } - mutex_unlock(&board_lock); + + return 0; } /*-------------------------------------------------------------------------*/ @@ -479,6 +584,7 @@ int spi_register_master(struct spi_master *master) { static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); struct device *dev = master->dev.parent; + struct boardinfo *bi; int status = -ENODEV; int dynamic = 0; @@ -500,6 +606,10 @@ int spi_register_master(struct spi_master *master) dynamic = 1; } + spin_lock_init(&master->bus_lock_spinlock); + mutex_init(&master->bus_lock_mutex); + master->bus_lock_flag = 0; + /* register the device, then userspace will see it. * registration fails if the bus ID is in use. */ @@ -510,20 +620,25 @@ int spi_register_master(struct spi_master *master) dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), dynamic ? " (dynamic)" : ""); - /* populate children from any spi device tables */ - scan_boardinfo(master); + mutex_lock(&board_lock); + list_add_tail(&master->list, &spi_master_list); + list_for_each_entry(bi, &board_list, list) + spi_match_master_to_boardinfo(master, &bi->board_info); + mutex_unlock(&board_lock); + status = 0; + + /* Register devices from the device tree */ + of_register_spi_devices(master); done: return status; } EXPORT_SYMBOL_GPL(spi_register_master); -static int __unregister(struct device *dev, void *master_dev) +static int __unregister(struct device *dev, void *null) { - /* note: before about 2.6.14-rc1 this would corrupt memory: */ - if (dev != master_dev) - spi_unregister_device(to_spi_device(dev)); + spi_unregister_device(to_spi_device(dev)); return 0; } @@ -541,8 +656,11 @@ void spi_unregister_master(struct spi_master *master) { int dummy; - dummy = device_for_each_child(master->dev.parent, &master->dev, - __unregister); + mutex_lock(&board_lock); + list_del(&master->list); + mutex_unlock(&board_lock); + + dummy = device_for_each_child(&master->dev, NULL, __unregister); device_unregister(&master->dev); } EXPORT_SYMBOL_GPL(spi_unregister_master); @@ -615,7 +733,7 @@ int spi_setup(struct spi_device *spi) */ bad_bits = spi->mode & ~spi->master->mode_bits; if (bad_bits) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", + dev_err(&spi->dev, "setup: unsupported mode bits %x\n", bad_bits); return -EINVAL; } @@ -639,6 +757,129 @@ int spi_setup(struct spi_device *spi) } EXPORT_SYMBOL_GPL(spi_setup); +static int __spi_async(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + + /* Half-duplex links include original MicroWire, and ones with + * only one data pin like SPI_3WIRE (switches direction) or where + * either MOSI or MISO is missing. They can also be caused by + * software limitations. + */ + if ((master->flags & SPI_MASTER_HALF_DUPLEX) + || (spi->mode & SPI_3WIRE)) { + struct spi_transfer *xfer; + unsigned flags = master->flags; + + list_for_each_entry(xfer, &message->transfers, transfer_list) { + if (xfer->rx_buf && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) + return -EINVAL; + } + } + + message->spi = spi; + message->status = -EINPROGRESS; + return master->transfer(spi, message); +} + +/** + * spi_async - asynchronous SPI transfer + * @spi: device with which data will be exchanged + * @message: describes the data transfers, including completion callback + * Context: any (irqs may be blocked, etc) + * + * This call may be used in_irq and other contexts which can't sleep, + * as well as from task contexts which can sleep. + * + * The completion callback is invoked in a context which can't sleep. + * Before that invocation, the value of message->status is undefined. + * When the callback is issued, message->status holds either zero (to + * indicate complete success) or a negative error code. After that + * callback returns, the driver which issued the transfer request may + * deallocate the associated memory; it's no longer in use by any SPI + * core or controller driver code. + * + * Note that although all messages to a spi_device are handled in + * FIFO order, messages may go to different devices in other orders. + * Some device might be higher priority, or have various "hard" access + * time requirements, for example. + * + * On detection of any fault during the transfer, processing of + * the entire message is aborted, and the device is deselected. + * Until returning from the associated message completion callback, + * no other spi_message queued to that device will be processed. + * (This rule applies equally to all the synchronous transfer calls, + * which are wrappers around this core asynchronous primitive.) + */ +int spi_async(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + int ret; + unsigned long flags; + + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + + if (master->bus_lock_flag) + ret = -EBUSY; + else + ret = __spi_async(spi, message); + + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(spi_async); + +/** + * spi_async_locked - version of spi_async with exclusive bus usage + * @spi: device with which data will be exchanged + * @message: describes the data transfers, including completion callback + * Context: any (irqs may be blocked, etc) + * + * This call may be used in_irq and other contexts which can't sleep, + * as well as from task contexts which can sleep. + * + * The completion callback is invoked in a context which can't sleep. + * Before that invocation, the value of message->status is undefined. + * When the callback is issued, message->status holds either zero (to + * indicate complete success) or a negative error code. After that + * callback returns, the driver which issued the transfer request may + * deallocate the associated memory; it's no longer in use by any SPI + * core or controller driver code. + * + * Note that although all messages to a spi_device are handled in + * FIFO order, messages may go to different devices in other orders. + * Some device might be higher priority, or have various "hard" access + * time requirements, for example. + * + * On detection of any fault during the transfer, processing of + * the entire message is aborted, and the device is deselected. + * Until returning from the associated message completion callback, + * no other spi_message queued to that device will be processed. + * (This rule applies equally to all the synchronous transfer calls, + * which are wrappers around this core asynchronous primitive.) + */ +int spi_async_locked(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + int ret; + unsigned long flags; + + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + + ret = __spi_async(spi, message); + + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + return ret; + +} +EXPORT_SYMBOL_GPL(spi_async_locked); + /*-------------------------------------------------------------------------*/ @@ -652,6 +893,32 @@ static void spi_complete(void *arg) complete(arg); } +static int __spi_sync(struct spi_device *spi, struct spi_message *message, + int bus_locked) +{ + DECLARE_COMPLETION_ONSTACK(done); + int status; + struct spi_master *master = spi->master; + + message->complete = spi_complete; + message->context = &done; + + if (!bus_locked) + mutex_lock(&master->bus_lock_mutex); + + status = spi_async_locked(spi, message); + + if (!bus_locked) + mutex_unlock(&master->bus_lock_mutex); + + if (status == 0) { + wait_for_completion(&done); + status = message->status; + } + message->context = NULL; + return status; +} + /** * spi_sync - blocking/synchronous SPI data transfers * @spi: device with which data will be exchanged @@ -675,21 +942,86 @@ static void spi_complete(void *arg) */ int spi_sync(struct spi_device *spi, struct spi_message *message) { - DECLARE_COMPLETION_ONSTACK(done); - int status; - - message->complete = spi_complete; - message->context = &done; - status = spi_async(spi, message); - if (status == 0) { - wait_for_completion(&done); - status = message->status; - } - message->context = NULL; - return status; + return __spi_sync(spi, message, 0); } EXPORT_SYMBOL_GPL(spi_sync); +/** + * spi_sync_locked - version of spi_sync with exclusive bus usage + * @spi: device with which data will be exchanged + * @message: describes the data transfers + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. Low-overhead controller + * drivers may DMA directly into and out of the message buffers. + * + * This call should be used by drivers that require exclusive access to the + * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must + * be released by a spi_bus_unlock call when the exclusive access is over. + * + * It returns zero on success, else a negative error code. + */ +int spi_sync_locked(struct spi_device *spi, struct spi_message *message) +{ + return __spi_sync(spi, message, 1); +} +EXPORT_SYMBOL_GPL(spi_sync_locked); + +/** + * spi_bus_lock - obtain a lock for exclusive SPI bus usage + * @master: SPI bus master that should be locked for exclusive bus access + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. + * + * This call should be used by drivers that require exclusive access to the + * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the + * exclusive access is over. Data transfer must be done by spi_sync_locked + * and spi_async_locked calls when the SPI bus lock is held. + * + * It returns zero on success, else a negative error code. + */ +int spi_bus_lock(struct spi_master *master) +{ + unsigned long flags; + + mutex_lock(&master->bus_lock_mutex); + + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + master->bus_lock_flag = 1; + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + /* mutex remains locked until spi_bus_unlock is called */ + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bus_lock); + +/** + * spi_bus_unlock - release the lock for exclusive SPI bus usage + * @master: SPI bus master that was locked for exclusive bus access + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. + * + * This call releases an SPI bus lock previously obtained by an spi_bus_lock + * call. + * + * It returns zero on success, else a negative error code. + */ +int spi_bus_unlock(struct spi_master *master) +{ + master->bus_lock_flag = 0; + + mutex_unlock(&master->bus_lock_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bus_unlock); + /* portable code must never pass more than 32 bytes */ #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index 73e24ef5a2f..3f223511127 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c @@ -1,7 +1,7 @@ /* * Blackfin On-Chip SPI Driver * - * Copyright 2004-2007 Analog Devices Inc. + * Copyright 2004-2010 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/slab.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> @@ -40,13 +41,16 @@ MODULE_LICENSE("GPL"); #define RUNNING_STATE ((void *)1) #define DONE_STATE ((void *)2) #define ERROR_STATE ((void *)-1) -#define QUEUE_RUNNING 0 -#define QUEUE_STOPPED 1 -/* Value to send if no TX value is supplied */ -#define SPI_IDLE_TXVAL 0x0000 +struct bfin_spi_master_data; -struct driver_data { +struct bfin_spi_transfer_ops { + void (*write) (struct bfin_spi_master_data *); + void (*read) (struct bfin_spi_master_data *); + void (*duplex) (struct bfin_spi_master_data *); +}; + +struct bfin_spi_master_data { /* Driver model hookup */ struct platform_device *pdev; @@ -68,7 +72,7 @@ struct driver_data { spinlock_t lock; struct list_head queue; int busy; - int run; + bool running; /* Message Transfer pump */ struct tasklet_struct pump_transfers; @@ -76,7 +80,7 @@ struct driver_data { /* Current message transfer state info */ struct spi_message *cur_msg; struct spi_transfer *cur_transfer; - struct chip_data *cur_chip; + struct bfin_spi_slave_data *cur_chip; size_t len_in_bytes; size_t len; void *tx; @@ -91,38 +95,37 @@ struct driver_data { dma_addr_t rx_dma; dma_addr_t tx_dma; + int irq_requested; + int spi_irq; + size_t rx_map_len; size_t tx_map_len; u8 n_bytes; + u16 ctrl_reg; + u16 flag_reg; + int cs_change; - void (*write) (struct driver_data *); - void (*read) (struct driver_data *); - void (*duplex) (struct driver_data *); + const struct bfin_spi_transfer_ops *ops; }; -struct chip_data { +struct bfin_spi_slave_data { u16 ctl_reg; u16 baud; u16 flag; u8 chip_select_num; - u8 n_bytes; - u8 width; /* 0 or 1 */ u8 enable_dma; - u8 bits_per_word; /* 8 or 16 */ - u8 cs_change_per_word; u16 cs_chg_udelay; /* Some devices require > 255usec delay */ u32 cs_gpio; u16 idle_tx_val; - void (*write) (struct driver_data *); - void (*read) (struct driver_data *); - void (*duplex) (struct driver_data *); + u8 pio_interrupt; /* use spi data irq */ + const struct bfin_spi_transfer_ops *ops; }; #define DEFINE_SPI_REG(reg, off) \ -static inline u16 read_##reg(struct driver_data *drv_data) \ +static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ { return bfin_read16(drv_data->regs_base + off); } \ -static inline void write_##reg(struct driver_data *drv_data, u16 v) \ +static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ { bfin_write16(drv_data->regs_base + off, v); } DEFINE_SPI_REG(CTRL, 0x00) @@ -133,7 +136,7 @@ DEFINE_SPI_REG(RDBR, 0x10) DEFINE_SPI_REG(BAUD, 0x14) DEFINE_SPI_REG(SHAW, 0x18) -static void bfin_spi_enable(struct driver_data *drv_data) +static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) { u16 cr; @@ -141,7 +144,7 @@ static void bfin_spi_enable(struct driver_data *drv_data) write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); } -static void bfin_spi_disable(struct driver_data *drv_data) +static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) { u16 cr; @@ -164,7 +167,7 @@ static u16 hz_to_spi_baud(u32 speed_hz) return spi_baud; } -static int bfin_spi_flush(struct driver_data *drv_data) +static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; @@ -178,13 +181,12 @@ static int bfin_spi_flush(struct driver_data *drv_data) } /* Chip select operation functions for cs_change flag */ -static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip) +static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num)) { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { u16 flag = read_FLAG(drv_data); - flag |= chip->flag; - flag &= ~(chip->flag << 8); + flag &= ~chip->flag; write_FLAG(drv_data, flag); } else { @@ -192,13 +194,13 @@ static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *c } } -static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip) +static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num)) { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { u16 flag = read_FLAG(drv_data); - flag &= ~chip->flag; - flag |= (chip->flag << 8); + flag |= chip->flag; write_FLAG(drv_data, flag); } else { @@ -210,16 +212,43 @@ static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data udelay(chip->cs_chg_udelay); } +/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ +static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag |= (chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + +static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag &= ~(chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + /* stop controller and re-config current chip*/ -static void bfin_spi_restore_state(struct driver_data *drv_data) +static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) { - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; /* Clear status and disable clock */ write_STAT(drv_data, BIT_STAT_CLR); bfin_spi_disable(drv_data); dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); + SSYNC(); + /* Load the registers */ write_CTRL(drv_data, chip->ctl_reg); write_BAUD(drv_data, chip->baud); @@ -229,49 +258,12 @@ static void bfin_spi_restore_state(struct driver_data *drv_data) } /* used to kick off transfer in rx mode and read unwanted RX data */ -static inline void bfin_spi_dummy_read(struct driver_data *drv_data) +static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) { (void) read_RDBR(drv_data); } -static void bfin_spi_null_writer(struct driver_data *drv_data) -{ - u8 n_bytes = drv_data->n_bytes; - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - write_TDBR(drv_data, tx_val); - drv_data->tx += n_bytes; - /* wait until transfer finished. - checking SPIF or TXS may not guarantee transfer completion */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - /* discard RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_null_reader(struct driver_data *drv_data) -{ - u8 n_bytes = drv_data->n_bytes; - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, tx_val); - drv_data->rx += n_bytes; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_u8_writer(struct driver_data *drv_data) +static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); @@ -287,25 +279,7 @@ static void bfin_spi_u8_writer(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_writer(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - /* make sure transfer finished before deactiving CS */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u8_reader(struct driver_data *drv_data) +static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; @@ -320,25 +294,7 @@ static void bfin_spi_u8_reader(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_reader(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - u16 tx_val = chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u8_duplex(struct driver_data *drv_data) +static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -351,24 +307,13 @@ static void bfin_spi_u8_duplex(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_duplex(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { + .write = bfin_spi_u8_writer, + .read = bfin_spi_u8_reader, + .duplex = bfin_spi_u8_duplex, +}; -static void bfin_spi_u16_writer(struct driver_data *drv_data) +static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); @@ -385,26 +330,7 @@ static void bfin_spi_u16_writer(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_writer(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - /* make sure transfer finished before deactiving CS */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u16_reader(struct driver_data *drv_data) +static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; @@ -420,26 +346,7 @@ static void bfin_spi_u16_reader(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_reader(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - u16 tx_val = chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u16_duplex(struct driver_data *drv_data) +static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -454,27 +361,14 @@ static void bfin_spi_u16_duplex(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_duplex(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - bfin_spi_cs_deactive(drv_data, chip); - } -} +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { + .write = bfin_spi_u16_writer, + .read = bfin_spi_u16_reader, + .duplex = bfin_spi_u16_duplex, +}; -/* test if ther is more transfer to be done */ -static void *bfin_spi_next_transfer(struct driver_data *drv_data) +/* test if there is more transfer to be done */ +static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; @@ -493,9 +387,9 @@ static void *bfin_spi_next_transfer(struct driver_data *drv_data) * caller already set message->status; * dma and pio irqs are blocked give finished message back */ -static void bfin_spi_giveback(struct driver_data *drv_data) +static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) { - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_transfer *last_transfer; unsigned long flags; struct spi_message *msg; @@ -524,10 +418,83 @@ static void bfin_spi_giveback(struct driver_data *drv_data) msg->complete(msg->context); } +/* spi data irq handler */ +static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) +{ + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; + struct spi_message *msg = drv_data->cur_msg; + int n_bytes = drv_data->n_bytes; + + /* wait until transfer finished. */ + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + + if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || + (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { + /* last read */ + if (drv_data->rx) { + dev_dbg(&drv_data->pdev->dev, "last read\n"); + if (n_bytes == 2) + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + else if (n_bytes == 1) + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + drv_data->rx += n_bytes; + } + + msg->actual_length += drv_data->len_in_bytes; + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + /* Move to next transfer */ + msg->state = bfin_spi_next_transfer(drv_data); + + disable_irq_nosync(drv_data->spi_irq); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + return IRQ_HANDLED; + } + + if (drv_data->rx && drv_data->tx) { + /* duplex */ + dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); + if (drv_data->n_bytes == 2) { + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + } else if (drv_data->n_bytes == 1) { + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + } + } else if (drv_data->rx) { + /* read */ + dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); + if (drv_data->n_bytes == 2) + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + else if (drv_data->n_bytes == 1) + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, chip->idle_tx_val); + } else if (drv_data->tx) { + /* write */ + dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); + bfin_spi_dummy_read(drv_data); + if (drv_data->n_bytes == 2) + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + else if (drv_data->n_bytes == 1) + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + } + + if (drv_data->tx) + drv_data->tx += n_bytes; + if (drv_data->rx) + drv_data->rx += n_bytes; + + return IRQ_HANDLED; +} + static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) { - struct driver_data *drv_data = dev_id; - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_message *msg = drv_data->cur_msg; unsigned long timeout; unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); @@ -537,11 +504,16 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", dmastat, spistat); - clear_dma_irqstat(drv_data->dma_channel); + if (drv_data->rx != NULL) { + u16 cr = read_CTRL(drv_data); + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ + write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ + write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ + } - /* Wait for DMA to complete */ - while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN) - cpu_relax(); + clear_dma_irqstat(drv_data->dma_channel); /* * wait for the last transaction shifted out. HRM states: @@ -550,8 +522,8 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) * register until it goes low for 2 successive reads */ if (drv_data->tx != NULL) { - while ((read_STAT(drv_data) & TXS) || - (read_STAT(drv_data) & TXS)) + while ((read_STAT(drv_data) & BIT_STAT_TXS) || + (read_STAT(drv_data) & BIT_STAT_TXS)) cpu_relax(); } @@ -560,14 +532,14 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) dmastat, read_STAT(drv_data)); timeout = jiffies + HZ; - while (!(read_STAT(drv_data) & SPIF)) + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) if (!time_before(jiffies, timeout)) { dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); break; } else cpu_relax(); - if ((dmastat & DMA_ERR) && (spistat & RBSY)) { + if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { msg->state = ERROR_STATE; dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); } else { @@ -587,20 +559,20 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) dev_dbg(&drv_data->pdev->dev, "disable dma channel irq%d\n", drv_data->dma_channel); - dma_disable_irq(drv_data->dma_channel); + dma_disable_irq_nosync(drv_data->dma_channel); return IRQ_HANDLED; } static void bfin_spi_pump_transfers(unsigned long data) { - struct driver_data *drv_data = (struct driver_data *)data; + struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; - struct chip_data *chip = NULL; - u8 width; - u16 cr, dma_width, dma_config; + struct bfin_spi_slave_data *chip = NULL; + unsigned int bits_per_word; + u16 cr, cr_width, dma_width, dma_config; u32 tranf_success = 1; u8 full_duplex = 0; @@ -638,7 +610,7 @@ static void bfin_spi_pump_transfers(unsigned long data) udelay(previous->delay_usecs); } - /* Setup the transfer state based on the type of transfer */ + /* Flush any existing transfers that may be sitting in the hardware */ if (bfin_spi_flush(drv_data) == 0) { dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); message->status = -EIO; @@ -678,52 +650,31 @@ static void bfin_spi_pump_transfers(unsigned long data) drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ - switch (transfer->bits_per_word) { - case 8: + bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; + if (bits_per_word == 8) { drv_data->n_bytes = 1; - width = CFG_SPI_WORDSIZE8; - drv_data->read = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; - drv_data->write = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; - drv_data->duplex = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; - break; - - case 16: + drv_data->len = transfer->len; + cr_width = 0; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; + } else if (bits_per_word == 16) { drv_data->n_bytes = 2; - width = CFG_SPI_WORDSIZE16; - drv_data->read = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; - drv_data->write = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; - drv_data->duplex = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; - break; - - default: - /* No change, the same as default setting */ - drv_data->n_bytes = chip->n_bytes; - width = chip->width; - drv_data->write = drv_data->tx ? chip->write : bfin_spi_null_writer; - drv_data->read = drv_data->rx ? chip->read : bfin_spi_null_reader; - drv_data->duplex = chip->duplex ? chip->duplex : bfin_spi_null_writer; - break; - } - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); - cr |= (width << 8); - write_CTRL(drv_data, cr); - - if (width == CFG_SPI_WORDSIZE16) { drv_data->len = (transfer->len) >> 1; + cr_width = BIT_CTL_WORDSIZE; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; } else { - drv_data->len = transfer->len; + dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); + message->status = -EINVAL; + bfin_spi_giveback(drv_data); + return; } + cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); + cr |= cr_width; + write_CTRL(drv_data, cr); + dev_dbg(&drv_data->pdev->dev, - "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", - drv_data->write, chip->write, bfin_spi_null_writer); + "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", + drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); - /* speed and width has been set on per message */ message->state = RUNNING_STATE; dma_config = 0; @@ -734,13 +685,11 @@ static void bfin_spi_pump_transfers(unsigned long data) write_BAUD(drv_data, chip->baud); write_STAT(drv_data, BIT_STAT_CLR); - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); - if (drv_data->cs_change) - bfin_spi_cs_active(drv_data, chip); + bfin_spi_cs_active(drv_data, chip); dev_dbg(&drv_data->pdev->dev, "now pumping a transfer: width is %d, len is %d\n", - width, transfer->len); + cr_width, transfer->len); /* * Try to map dma buffer and do a dma transfer. If successful use, @@ -759,7 +708,7 @@ static void bfin_spi_pump_transfers(unsigned long data) /* config dma channel */ dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); set_dma_x_count(drv_data->dma_channel, drv_data->len); - if (width == CFG_SPI_WORDSIZE16) { + if (cr_width == BIT_CTL_WORDSIZE) { set_dma_x_modify(drv_data->dma_channel, 2); dma_width = WDSIZE_16; } else { @@ -845,73 +794,100 @@ static void bfin_spi_pump_transfers(unsigned long data) dma_enable_irq(drv_data->dma_channel); local_irq_restore(flags); - } else { - /* IO mode write then read */ - dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); - - /* we always use SPI_WRITE mode. SPI_READ mode - seems to have problems with setting up the - output value in TDBR prior to the transfer. */ - write_CTRL(drv_data, (cr | CFG_SPI_WRITE)); - - if (full_duplex) { - /* full duplex mode */ - BUG_ON((drv_data->tx_end - drv_data->tx) != - (drv_data->rx_end - drv_data->rx)); - dev_dbg(&drv_data->pdev->dev, - "IO duplex: cr is 0x%x\n", cr); - - drv_data->duplex(drv_data); + return; + } - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->tx != NULL) { - /* write only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO write: cr is 0x%x\n", cr); + /* + * We always use SPI_WRITE mode (transfer starts with TDBR write). + * SPI_READ mode (transfer starts with RDBR read) seems to have + * problems with setting up the output value in TDBR prior to the + * start of the transfer. + */ + write_CTRL(drv_data, cr | BIT_CTL_TXMOD); - drv_data->write(drv_data); + if (chip->pio_interrupt) { + /* SPI irq should have been disabled by now */ - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->rx != NULL) { - /* read only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO read: cr is 0x%x\n", cr); + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); - drv_data->read(drv_data); - if (drv_data->rx != drv_data->rx_end) - tranf_success = 0; + /* start transfer */ + if (drv_data->tx == NULL) + write_TDBR(drv_data, chip->idle_tx_val); + else { + if (bits_per_word == 8) + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + else + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + drv_data->tx += drv_data->n_bytes; } - if (!tranf_success) { - dev_dbg(&drv_data->pdev->dev, - "IO write error!\n"); - message->state = ERROR_STATE; - } else { - /* Update total byte transfered */ - message->actual_length += drv_data->len_in_bytes; - /* Move to next transfer of this msg */ - message->state = bfin_spi_next_transfer(drv_data); - if (drv_data->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - } - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); + /* once TDBR is empty, interrupt is triggered */ + enable_irq(drv_data->spi_irq); + return; + } + + /* IO mode */ + dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); + + if (full_duplex) { + /* full duplex mode */ + BUG_ON((drv_data->tx_end - drv_data->tx) != + (drv_data->rx_end - drv_data->rx)); + dev_dbg(&drv_data->pdev->dev, + "IO duplex: cr is 0x%x\n", cr); + + drv_data->ops->duplex(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->tx != NULL) { + /* write only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO write: cr is 0x%x\n", cr); + + drv_data->ops->write(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->rx != NULL) { + /* read only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO read: cr is 0x%x\n", cr); + + drv_data->ops->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + tranf_success = 0; } + + if (!tranf_success) { + dev_dbg(&drv_data->pdev->dev, + "IO write error!\n"); + message->state = ERROR_STATE; + } else { + /* Update total byte transfered */ + message->actual_length += drv_data->len_in_bytes; + /* Move to next transfer of this msg */ + message->state = bfin_spi_next_transfer(drv_data); + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + } + + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); } /* pop a msg from queue and kick off real transfer */ static void bfin_spi_pump_messages(struct work_struct *work) { - struct driver_data *drv_data; + struct bfin_spi_master_data *drv_data; unsigned long flags; - drv_data = container_of(work, struct driver_data, pump_messages); + drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { + if (list_empty(&drv_data->queue) || !drv_data->running) { /* pumper kicked off but no work to do */ drv_data->busy = 0; spin_unlock_irqrestore(&drv_data->lock, flags); @@ -961,12 +937,12 @@ static void bfin_spi_pump_messages(struct work_struct *work) */ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) { - struct driver_data *drv_data = spi_master_get_devdata(spi->master); + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->run == QUEUE_STOPPED) { + if (!drv_data->running) { spin_unlock_irqrestore(&drv_data->lock, flags); return -ESHUTDOWN; } @@ -978,7 +954,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) dev_dbg(&spi->dev, "adding an msg in transfer() \n"); list_add_tail(&msg->queue, &drv_data->queue); - if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) + if (drv_data->running && !drv_data->busy) queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); @@ -1002,147 +978,187 @@ static u16 ssel[][MAX_SPI_SSEL] = { P_SPI2_SSEL6, P_SPI2_SSEL7}, }; -/* first setup for new devices */ +/* setup for devices (may be called multiple times -- not just first setup) */ static int bfin_spi_setup(struct spi_device *spi) { - struct bfin5xx_spi_chip *chip_info = NULL; - struct chip_data *chip; - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - int ret; - - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) - return -EINVAL; + struct bfin5xx_spi_chip *chip_info; + struct bfin_spi_slave_data *chip = NULL; + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); + u16 bfin_ctl_reg; + int ret = -EINVAL; /* Only alloc (or use chip_info) on first setup */ + chip_info = NULL; chip = spi_get_ctldata(spi); if (chip == NULL) { - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) - return -ENOMEM; + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, "cannot allocate chip data\n"); + ret = -ENOMEM; + goto error; + } chip->enable_dma = 0; chip_info = spi->controller_data; } + /* Let people set non-standard bits directly */ + bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | + BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; + /* chip_info isn't always needed */ if (chip_info) { /* Make sure people stop trying to set fields via ctl_reg * when they should actually be using common SPI framework. - * Currently we let through: WOM EMISO PSSE GM SZ TIMOD. + * Currently we let through: WOM EMISO PSSE GM SZ. * Not sure if a user actually needs/uses any of these, * but let's assume (for now) they do. */ - if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) { + if (chip_info->ctl_reg & ~bfin_ctl_reg) { dev_err(&spi->dev, "do not set bits in ctl_reg " "that the SPI framework manages\n"); - return -EINVAL; + goto error; } - chip->enable_dma = chip_info->enable_dma != 0 && drv_data->master_info->enable_dma; chip->ctl_reg = chip_info->ctl_reg; - chip->bits_per_word = chip_info->bits_per_word; - chip->cs_change_per_word = chip_info->cs_change_per_word; chip->cs_chg_udelay = chip_info->cs_chg_udelay; - chip->cs_gpio = chip_info->cs_gpio; chip->idle_tx_val = chip_info->idle_tx_val; + chip->pio_interrupt = chip_info->pio_interrupt; + spi->bits_per_word = chip_info->bits_per_word; + } else { + /* force a default base state */ + chip->ctl_reg &= bfin_ctl_reg; + } + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { + dev_err(&spi->dev, "%d bits_per_word is not supported\n", + spi->bits_per_word); + goto error; } /* translate common spi framework into our register */ + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { + dev_err(&spi->dev, "unsupported spi modes detected\n"); + goto error; + } if (spi->mode & SPI_CPOL) - chip->ctl_reg |= CPOL; + chip->ctl_reg |= BIT_CTL_CPOL; if (spi->mode & SPI_CPHA) - chip->ctl_reg |= CPHA; + chip->ctl_reg |= BIT_CTL_CPHA; if (spi->mode & SPI_LSB_FIRST) - chip->ctl_reg |= LSBF; + chip->ctl_reg |= BIT_CTL_LSBF; /* we dont support running in slave mode (yet?) */ - chip->ctl_reg |= MSTR; + chip->ctl_reg |= BIT_CTL_MASTER; /* + * Notice: for blackfin, the speed_hz is the value of register + * SPI_BAUD, not the real baudrate + */ + chip->baud = hz_to_spi_baud(spi->max_speed_hz); + chip->chip_select_num = spi->chip_select; + if (chip->chip_select_num < MAX_CTRL_CS) { + if (!(spi->mode & SPI_CPHA)) + dev_warn(&spi->dev, "Warning: SPI CPHA not set:" + " Slave Select not under software control!\n" + " See Documentation/blackfin/bfin-spi-notes.txt"); + + chip->flag = (1 << spi->chip_select) << 8; + } else + chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; + + if (chip->enable_dma && chip->pio_interrupt) { + dev_err(&spi->dev, "enable_dma is set, " + "do not set pio_interrupt\n"); + goto error; + } + /* * if any one SPI chip is registered and wants DMA, request the * DMA channel for it */ if (chip->enable_dma && !drv_data->dma_requested) { /* register dma irq handler */ - if (request_dma(drv_data->dma_channel, "BFIN_SPI_DMA") < 0) { - dev_dbg(&spi->dev, + ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); + if (ret) { + dev_err(&spi->dev, "Unable to request BlackFin SPI DMA channel\n"); - return -ENODEV; + goto error; } - if (set_dma_callback(drv_data->dma_channel, - bfin_spi_dma_irq_handler, drv_data) < 0) { - dev_dbg(&spi->dev, "Unable to set dma callback\n"); - return -EPERM; + drv_data->dma_requested = 1; + + ret = set_dma_callback(drv_data->dma_channel, + bfin_spi_dma_irq_handler, drv_data); + if (ret) { + dev_err(&spi->dev, "Unable to set dma callback\n"); + goto error; } dma_disable_irq(drv_data->dma_channel); - drv_data->dma_requested = 1; } - /* - * Notice: for blackfin, the speed_hz is the value of register - * SPI_BAUD, not the real baudrate - */ - chip->baud = hz_to_spi_baud(spi->max_speed_hz); - chip->flag = 1 << (spi->chip_select); - chip->chip_select_num = spi->chip_select; - - if (chip->chip_select_num == 0) { - ret = gpio_request(chip->cs_gpio, spi->modalias); + if (chip->pio_interrupt && !drv_data->irq_requested) { + ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, + IRQF_DISABLED, "BFIN_SPI", drv_data); if (ret) { - if (drv_data->dma_requested) - free_dma(drv_data->dma_channel); - return ret; + dev_err(&spi->dev, "Unable to register spi IRQ\n"); + goto error; } - gpio_direction_output(chip->cs_gpio, 1); + drv_data->irq_requested = 1; + /* we use write mode, spi irq has to be disabled here */ + disable_irq(drv_data->spi_irq); } - switch (chip->bits_per_word) { - case 8: - chip->n_bytes = 1; - chip->width = CFG_SPI_WORDSIZE8; - chip->read = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; - chip->write = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; - chip->duplex = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; - break; - - case 16: - chip->n_bytes = 2; - chip->width = CFG_SPI_WORDSIZE16; - chip->read = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; - chip->write = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; - chip->duplex = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; - break; - - default: - dev_err(&spi->dev, "%d bits_per_word is not supported\n", - chip->bits_per_word); - if (chip_info) - kfree(chip); - return -ENODEV; + if (chip->chip_select_num >= MAX_CTRL_CS) { + /* Only request on first setup */ + if (spi_get_ctldata(spi) == NULL) { + ret = gpio_request(chip->cs_gpio, spi->modalias); + if (ret) { + dev_err(&spi->dev, "gpio_request() error\n"); + goto pin_error; + } + gpio_direction_output(chip->cs_gpio, 1); + } } dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", - spi->modalias, chip->width, chip->enable_dma); + spi->modalias, spi->bits_per_word, chip->enable_dma); dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", chip->ctl_reg, chip->flag); spi_set_ctldata(spi, chip); dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); - if ((chip->chip_select_num > 0) - && (chip->chip_select_num <= spi->master->num_chipselect)) - peripheral_request(ssel[spi->master->bus_num] - [chip->chip_select_num-1], spi->modalias); + if (chip->chip_select_num < MAX_CTRL_CS) { + ret = peripheral_request(ssel[spi->master->bus_num] + [chip->chip_select_num-1], spi->modalias); + if (ret) { + dev_err(&spi->dev, "peripheral_request() error\n"); + goto pin_error; + } + } + bfin_spi_cs_enable(drv_data, chip); bfin_spi_cs_deactive(drv_data, chip); return 0; + + pin_error: + if (chip->chip_select_num >= MAX_CTRL_CS) + gpio_free(chip->cs_gpio); + else + peripheral_free(ssel[spi->master->bus_num] + [chip->chip_select_num - 1]); + error: + if (chip) { + if (drv_data->dma_requested) + free_dma(drv_data->dma_channel); + drv_data->dma_requested = 0; + + kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); + } + + return ret; } /* @@ -1151,28 +1167,30 @@ static int bfin_spi_setup(struct spi_device *spi) */ static void bfin_spi_cleanup(struct spi_device *spi) { - struct chip_data *chip = spi_get_ctldata(spi); + struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); if (!chip) return; - if ((chip->chip_select_num > 0) - && (chip->chip_select_num <= spi->master->num_chipselect)) + if (chip->chip_select_num < MAX_CTRL_CS) { peripheral_free(ssel[spi->master->bus_num] [chip->chip_select_num-1]); - - if (chip->chip_select_num == 0) + bfin_spi_cs_disable(drv_data, chip); + } else gpio_free(chip->cs_gpio); kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); } -static inline int bfin_spi_init_queue(struct driver_data *drv_data) +static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); - drv_data->run = QUEUE_STOPPED; + drv_data->running = false; drv_data->busy = 0; /* init transfer tasklet */ @@ -1189,18 +1207,18 @@ static inline int bfin_spi_init_queue(struct driver_data *drv_data) return 0; } -static inline int bfin_spi_start_queue(struct driver_data *drv_data) +static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { + if (drv_data->running || drv_data->busy) { spin_unlock_irqrestore(&drv_data->lock, flags); return -EBUSY; } - drv_data->run = QUEUE_RUNNING; + drv_data->running = true; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; @@ -1211,7 +1229,7 @@ static inline int bfin_spi_start_queue(struct driver_data *drv_data) return 0; } -static inline int bfin_spi_stop_queue(struct driver_data *drv_data) +static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; unsigned limit = 500; @@ -1225,7 +1243,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ - drv_data->run = QUEUE_STOPPED; + drv_data->running = false; while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); @@ -1240,7 +1258,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) return status; } -static inline int bfin_spi_destroy_queue(struct driver_data *drv_data) +static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) { int status; @@ -1258,14 +1276,14 @@ static int __init bfin_spi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct bfin5xx_spi_master *platform_info; struct spi_master *master; - struct driver_data *drv_data = 0; + struct bfin_spi_master_data *drv_data; struct resource *res; int status = 0; platform_info = dev->platform_data; /* Allocate master with space for drv_data */ - master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); + master = spi_alloc_master(dev, sizeof(*drv_data)); if (!master) { dev_err(&pdev->dev, "can not alloc spi_master\n"); return -ENOMEM; @@ -1294,18 +1312,26 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_get_res; } - drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1)); + drv_data->regs_base = ioremap(res->start, resource_size(res)); if (drv_data->regs_base == NULL) { dev_err(dev, "Cannot map IO\n"); status = -ENXIO; goto out_error_ioremap; } - drv_data->dma_channel = platform_get_irq(pdev, 0); - if (drv_data->dma_channel < 0) { + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (res == NULL) { dev_err(dev, "No DMA channel specified\n"); status = -ENOENT; - goto out_error_no_dma_ch; + goto out_error_free_io; + } + drv_data->dma_channel = res->start; + + drv_data->spi_irq = platform_get_irq(pdev, 0); + if (drv_data->spi_irq < 0) { + dev_err(dev, "No spi pio irq specified\n"); + status = -ENOENT; + goto out_error_free_io; } /* Initial and start queue */ @@ -1327,6 +1353,12 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_queue_alloc; } + /* Reset SPI registers. If these registers were used by the boot loader, + * the sky may fall on your head if you enable the dma controller. + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); + /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); @@ -1342,7 +1374,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev) out_error_queue_alloc: bfin_spi_destroy_queue(drv_data); -out_error_no_dma_ch: +out_error_free_io: iounmap((void *) drv_data->regs_base); out_error_ioremap: out_error_get_res: @@ -1354,7 +1386,7 @@ out_error_get_res: /* stop hardware and remove the driver */ static int __devexit bfin_spi_remove(struct platform_device *pdev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; if (!drv_data) @@ -1374,6 +1406,11 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) free_dma(drv_data->dma_channel); } + if (drv_data->irq_requested) { + free_irq(drv_data->spi_irq, drv_data); + drv_data->irq_requested = 0; + } + /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); @@ -1388,26 +1425,32 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; status = bfin_spi_stop_queue(drv_data); if (status != 0) return status; - /* stop hardware */ - bfin_spi_disable(drv_data); + drv_data->ctrl_reg = read_CTRL(drv_data); + drv_data->flag_reg = read_FLAG(drv_data); + + /* + * reset SPI_CTL and SPI_FLG registers + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); return 0; } static int bfin_spi_resume(struct platform_device *pdev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; - /* Enable the SPI interface */ - bfin_spi_enable(drv_data); + write_CTRL(drv_data, drv_data->ctrl_reg); + write_FLAG(drv_data, drv_data->flag_reg); /* Start the queue running */ status = bfin_spi_start_queue(drv_data); @@ -1438,7 +1481,7 @@ static int __init bfin_spi_init(void) { return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); } -module_init(bfin_spi_init); +subsys_initcall(bfin_spi_init); static void __exit bfin_spi_exit(void) { diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 2a5abc08e85..8b55724d5f3 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c @@ -23,6 +23,7 @@ #include <linux/delay.h> #include <linux/errno.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> @@ -258,6 +259,10 @@ static void bitbang_work(struct work_struct *work) struct spi_bitbang *bitbang = container_of(work, struct spi_bitbang, work); unsigned long flags; + int (*setup_transfer)(struct spi_device *, + struct spi_transfer *); + + setup_transfer = bitbang->setup_transfer; spin_lock_irqsave(&bitbang->lock, flags); bitbang->busy = 1; @@ -269,8 +274,7 @@ static void bitbang_work(struct work_struct *work) unsigned tmp; unsigned cs_change; int status; - int (*setup_transfer)(struct spi_device *, - struct spi_transfer *); + int do_setup = -1; m = container_of(bitbang->queue.next, struct spi_message, queue); @@ -287,22 +291,24 @@ static void bitbang_work(struct work_struct *work) tmp = 0; cs_change = 1; status = 0; - setup_transfer = NULL; list_for_each_entry (t, &m->transfers, transfer_list) { - /* override or restore speed and wordsize */ - if (t->speed_hz || t->bits_per_word) { - setup_transfer = bitbang->setup_transfer; + /* override speed or wordsize? */ + if (t->speed_hz || t->bits_per_word) + do_setup = 1; + + /* init (-1) or override (1) transfer params */ + if (do_setup != 0) { if (!setup_transfer) { status = -ENOPROTOOPT; break; } - } - if (setup_transfer) { status = setup_transfer(spi, t); if (status < 0) break; + if (do_setup == -1) + do_setup = 0; } /* set up default clock polarity, and activate chip; @@ -363,10 +369,6 @@ static void bitbang_work(struct work_struct *work) m->status = status; m->complete(m->context); - /* restore speed and wordsize */ - if (setup_transfer) - setup_transfer(spi, NULL); - /* normally deactivate chipselect ... unless no error and * cs_change has hinted that the next message will probably * be for this chip too. diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h new file mode 100644 index 00000000000..c16bf853c3e --- /dev/null +++ b/drivers/spi/spi_bitbang_txrx.h @@ -0,0 +1,97 @@ +/* + * Mix this utility code with some glue code to get one of several types of + * simple SPI master driver. Two do polled word-at-a-time I/O: + * + * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](), + * expanding the per-word routines from the inline templates below. + * + * - Drivers for controllers resembling bare shift registers. Provide + * chipselect() and txrx_word[](), with custom setup()/cleanup() methods + * that use your controller's clock and chipselect registers. + * + * Some hardware works well with requests at spi_transfer scope: + * + * - Drivers leveraging smarter hardware, with fifos or DMA; or for half + * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(), + * and custom setup()/cleanup() methods. + */ + +/* + * The code that knows what GPIO pins do what should have declared four + * functions, ideally as inlines, before including this header: + * + * void setsck(struct spi_device *, int is_on); + * void setmosi(struct spi_device *, int is_on); + * int getmiso(struct spi_device *); + * void spidelay(unsigned); + * + * setsck()'s is_on parameter is a zero/nonzero boolean. + * + * setmosi()'s is_on parameter is a zero/nonzero boolean. + * + * getmiso() is required to return 0 or 1 only. Any other value is invalid + * and will result in improper operation. + * + * A non-inlined routine would call bitbang_txrx_*() routines. The + * main loop could easily compile down to a handful of instructions, + * especially if the delay is a NOP (to run at peak speed). + * + * Since this is software, the timings may not be exactly what your board's + * chips need ... there may be several reasons you'd need to tweak timings + * in these routines, not just make to make it faster or slower to match a + * particular CPU clock rate. + */ + +static inline u32 +bitbang_txrx_be_cpha0(struct spi_device *spi, + unsigned nsecs, unsigned cpol, unsigned flags, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on trailing edge */ + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, !cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on leading edge */ + word <<= 1; + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); + setsck(spi, cpol); + } + return word; +} + +static inline u32 +bitbang_txrx_be_cpha1(struct spi_device *spi, + unsigned nsecs, unsigned cpol, unsigned flags, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on leading edge */ + setsck(spi, !cpol); + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on trailing edge */ + word <<= 1; + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); + } + return word; +} diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c index c2184866fa9..0d4ceba3b59 100644 --- a/drivers/spi/spi_butterfly.c +++ b/drivers/spi/spi_butterfly.c @@ -149,15 +149,14 @@ static void butterfly_chipselect(struct spi_device *spi, int value) #define spidelay(X) do{}while(0) //#define spidelay ndelay -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 butterfly_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } /*----------------------------------------------------------------------*/ diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi_fsl_espi.c new file mode 100644 index 00000000000..a99e2333b94 --- /dev/null +++ b/drivers/spi/spi_fsl_espi.c @@ -0,0 +1,763 @@ +/* + * Freescale eSPI controller driver. + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/spi/spi.h> +#include <linux/platform_device.h> +#include <linux/fsl_devices.h> +#include <linux/mm.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_spi.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <sysdev/fsl_soc.h> + +#include "spi_fsl_lib.h" + +/* eSPI Controller registers */ +struct fsl_espi_reg { + __be32 mode; /* 0x000 - eSPI mode register */ + __be32 event; /* 0x004 - eSPI event register */ + __be32 mask; /* 0x008 - eSPI mask register */ + __be32 command; /* 0x00c - eSPI command register */ + __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ + __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ + u8 res[8]; /* 0x018 - 0x01c reserved */ + __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ +}; + +struct fsl_espi_transfer { + const void *tx_buf; + void *rx_buf; + unsigned len; + unsigned n_tx; + unsigned n_rx; + unsigned actual_length; + int status; +}; + +/* eSPI Controller mode register definitions */ +#define SPMODE_ENABLE (1 << 31) +#define SPMODE_LOOP (1 << 30) +#define SPMODE_TXTHR(x) ((x) << 8) +#define SPMODE_RXTHR(x) ((x) << 0) + +/* eSPI Controller CS mode register definitions */ +#define CSMODE_CI_INACTIVEHIGH (1 << 31) +#define CSMODE_CP_BEGIN_EDGECLK (1 << 30) +#define CSMODE_REV (1 << 29) +#define CSMODE_DIV16 (1 << 28) +#define CSMODE_PM(x) ((x) << 24) +#define CSMODE_POL_1 (1 << 20) +#define CSMODE_LEN(x) ((x) << 16) +#define CSMODE_BEF(x) ((x) << 12) +#define CSMODE_AFT(x) ((x) << 8) +#define CSMODE_CG(x) ((x) << 3) + +/* Default mode/csmode for eSPI controller */ +#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) +#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ + | CSMODE_AFT(0) | CSMODE_CG(1)) + +/* SPIE register values */ +#define SPIE_NE 0x00000200 /* Not empty */ +#define SPIE_NF 0x00000100 /* Not full */ + +/* SPIM register values */ +#define SPIM_NE 0x00000200 /* Not empty */ +#define SPIM_NF 0x00000100 /* Not full */ +#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) +#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) + +/* SPCOM register values */ +#define SPCOM_CS(x) ((x) << 30) +#define SPCOM_TRANLEN(x) ((x) << 0) +#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ + +static void fsl_espi_change_mode(struct spi_device *spi) +{ + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct fsl_espi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->csmode[spi->chip_select]; + __be32 __iomem *espi_mode = ®_base->mode; + u32 tmp; + unsigned long flags; + + /* Turn off IRQs locally to minimize time that SPI is disabled. */ + local_irq_save(flags); + + /* Turn off SPI unit prior changing mode */ + tmp = mpc8xxx_spi_read_reg(espi_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); + mpc8xxx_spi_write_reg(mode, cs->hw_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp); + + local_irq_restore(flags); +} + +static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) +{ + u32 data; + u16 data_h; + u16 data_l; + const u32 *tx = mpc8xxx_spi->tx; + + if (!tx) + return 0; + + data = *tx++ << mpc8xxx_spi->tx_shift; + data_l = data & 0xffff; + data_h = (data >> 16) & 0xffff; + swab16s(&data_l); + swab16s(&data_h); + data = data_h | data_l; + + mpc8xxx_spi->tx = tx; + return data; +} + +static int fsl_espi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + int bits_per_word = 0; + u8 pm; + u32 hz = 0; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16] */ + if ((bits_per_word < 4) || (bits_per_word > 16)) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + cs->rx_shift = 0; + cs->tx_shift = 0; + cs->get_rx = mpc8xxx_spi_rx_buf_u32; + cs->get_tx = mpc8xxx_spi_tx_buf_u32; + if (bits_per_word <= 8) { + cs->rx_shift = 8 - bits_per_word; + } else if (bits_per_word <= 16) { + cs->rx_shift = 16 - bits_per_word; + if (spi->mode & SPI_LSB_FIRST) + cs->get_tx = fsl_espi_tx_buf_lsb; + } else { + return -EINVAL; + } + + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + bits_per_word = bits_per_word - 1; + + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); + + cs->hw_mode |= CSMODE_LEN(bits_per_word); + + if ((mpc8xxx_spi->spibrg / hz) > 64) { + cs->hw_mode |= CSMODE_DIV16; + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; + + WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " + "Will use %d Hz instead.\n", dev_name(&spi->dev), + hz, mpc8xxx_spi->spibrg / 1024); + if (pm > 16) + pm = 16; + } else { + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } + if (pm) + pm--; + + cs->hw_mode |= CSMODE_PM(pm); + + fsl_espi_change_mode(spi); + return 0; +} + +static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, + unsigned int len) +{ + u32 word; + struct fsl_espi_reg *reg_base = mspi->reg_base; + + mspi->count = len; + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); + + /* transmit word */ + word = mspi->get_tx(mspi); + mpc8xxx_spi_write_reg(®_base->transmit, word); + + return 0; +} + +static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; + unsigned int len = t->len; + u8 bits_per_word; + int ret; + + bits_per_word = spi->bits_per_word; + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + mpc8xxx_spi->len = t->len; + len = roundup(len, 4) / 4; + + mpc8xxx_spi->tx = t->tx_buf; + mpc8xxx_spi->rx = t->rx_buf; + + INIT_COMPLETION(mpc8xxx_spi->done); + + /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ + if ((t->len - 1) > SPCOM_TRANLEN_MAX) { + dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" + " beyond the SPCOM[TRANLEN] field\n", t->len); + return -EINVAL; + } + mpc8xxx_spi_write_reg(®_base->command, + (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); + + ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); + if (ret) + return ret; + + wait_for_completion(&mpc8xxx_spi->done); + + /* disable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, 0); + + return mpc8xxx_spi->count; +} + +static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) +{ + if (cmd) { + cmd[1] = (u8)(addr >> 16); + cmd[2] = (u8)(addr >> 8); + cmd[3] = (u8)(addr >> 0); + } +} + +static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) +{ + if (cmd) + return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; + + return 0; +} + +static void fsl_espi_do_trans(struct spi_message *m, + struct fsl_espi_transfer *tr) +{ + struct spi_device *spi = m->spi; + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct fsl_espi_transfer *espi_trans = tr; + struct spi_message message; + struct spi_transfer *t, *first, trans; + int status = 0; + + spi_message_init(&message); + memset(&trans, 0, sizeof(trans)); + + first = list_first_entry(&m->transfers, struct spi_transfer, + transfer_list); + list_for_each_entry(t, &m->transfers, transfer_list) { + if ((first->bits_per_word != t->bits_per_word) || + (first->speed_hz != t->speed_hz)) { + espi_trans->status = -EINVAL; + dev_err(mspi->dev, "bits_per_word/speed_hz should be" + " same for the same SPI transfer\n"); + return; + } + + trans.speed_hz = t->speed_hz; + trans.bits_per_word = t->bits_per_word; + trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); + } + + trans.len = espi_trans->len; + trans.tx_buf = espi_trans->tx_buf; + trans.rx_buf = espi_trans->rx_buf; + spi_message_add_tail(&trans, &message); + + list_for_each_entry(t, &message.transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = -EINVAL; + + status = fsl_espi_setup_transfer(spi, t); + if (status < 0) + break; + } + + if (t->len) + status = fsl_espi_bufs(spi, t); + + if (status) { + status = -EMSGSIZE; + break; + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + } + + espi_trans->status = status; + fsl_espi_setup_transfer(spi, NULL); +} + +static void fsl_espi_cmd_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct spi_transfer *t; + u8 *local_buf; + int i = 0; + struct fsl_espi_transfer *espi_trans = trans; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + espi_trans->n_tx; + fsl_espi_do_trans(m, espi_trans); + + espi_trans->actual_length = espi_trans->len; + kfree(local_buf); +} + +static void fsl_espi_rw_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct fsl_espi_transfer *espi_trans = trans; + unsigned int n_tx = espi_trans->n_tx; + unsigned int n_rx = espi_trans->n_rx; + struct spi_transfer *t; + u8 *local_buf; + u8 *rx_buf = rx_buff; + unsigned int trans_len; + unsigned int addr; + int i, pos, loop; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { + trans_len = n_rx - pos; + if (trans_len > SPCOM_TRANLEN_MAX - n_tx) + trans_len = SPCOM_TRANLEN_MAX - n_tx; + + i = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + if (pos > 0) { + addr = fsl_espi_cmd2addr(local_buf); + addr += pos; + fsl_espi_addr2cmd(addr, local_buf); + } + + espi_trans->n_tx = n_tx; + espi_trans->n_rx = trans_len; + espi_trans->len = trans_len + n_tx; + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + n_tx; + fsl_espi_do_trans(m, espi_trans); + + memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); + + if (loop > 0) + espi_trans->actual_length += espi_trans->len - n_tx; + else + espi_trans->actual_length += espi_trans->len; + } + + kfree(local_buf); +} + +static void fsl_espi_do_one_msg(struct spi_message *m) +{ + struct spi_transfer *t; + u8 *rx_buf = NULL; + unsigned int n_tx = 0; + unsigned int n_rx = 0; + struct fsl_espi_transfer espi_trans; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) + n_tx += t->len; + if (t->rx_buf) { + n_rx += t->len; + rx_buf = t->rx_buf; + } + } + + espi_trans.n_tx = n_tx; + espi_trans.n_rx = n_rx; + espi_trans.len = n_tx + n_rx; + espi_trans.actual_length = 0; + espi_trans.status = 0; + + if (!rx_buf) + fsl_espi_cmd_trans(m, &espi_trans, NULL); + else + fsl_espi_rw_trans(m, &espi_trans, rx_buf); + + m->actual_length = espi_trans.actual_length; + m->status = espi_trans.status; + m->complete(m->context); +} + +static int fsl_espi_setup(struct spi_device *spi) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + int retval; + u32 hw_mode; + u32 loop_mode; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (!spi->max_speed_hz) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + reg_base = mpc8xxx_spi->reg_base; + + hw_mode = cs->hw_mode; /* Save orginal settings */ + cs->hw_mode = mpc8xxx_spi_read_reg( + ®_base->csmode[spi->chip_select]); + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH + | CSMODE_REV); + + if (spi->mode & SPI_CPHA) + cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; + if (spi->mode & SPI_CPOL) + cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; + if (!(spi->mode & SPI_LSB_FIRST)) + cs->hw_mode |= CSMODE_REV; + + /* Handle the loop mode */ + loop_mode = mpc8xxx_spi_read_reg(®_base->mode); + loop_mode &= ~SPMODE_LOOP; + if (spi->mode & SPI_LOOP) + loop_mode |= SPMODE_LOOP; + mpc8xxx_spi_write_reg(®_base->mode, loop_mode); + + retval = fsl_espi_setup_transfer(spi, NULL); + if (retval < 0) { + cs->hw_mode = hw_mode; /* Restore settings */ + return retval; + } + return 0; +} + +void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + struct fsl_espi_reg *reg_base = mspi->reg_base; + + /* We need handle RX first */ + if (events & SPIE_NE) { + u32 rx_data, tmp; + u8 rx_data_8; + + /* Spin until RX is done */ + while (SPIE_RXCNT(events) < min(4, mspi->len)) { + cpu_relax(); + events = mpc8xxx_spi_read_reg(®_base->event); + } + + if (mspi->len >= 4) { + rx_data = mpc8xxx_spi_read_reg(®_base->receive); + } else { + tmp = mspi->len; + rx_data = 0; + while (tmp--) { + rx_data_8 = in_8((u8 *)®_base->receive); + rx_data |= (rx_data_8 << (tmp * 8)); + } + + rx_data <<= (4 - mspi->len) * 8; + } + + mspi->len -= 4; + + if (mspi->rx) + mspi->get_rx(rx_data, mspi); + } + + if (!(events & SPIE_NF)) { + int ret; + + /* spin until TX is done */ + ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( + ®_base->event)) & SPIE_NF) == 0, 1000, 0); + if (!ret) { + dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); + return; + } + } + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= 1; + if (mspi->count) { + u32 word = mspi->get_tx(mspi); + + mpc8xxx_spi_write_reg(®_base->transmit, word); + } else { + complete(&mspi->done); + } +} + +static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) +{ + struct mpc8xxx_spi *mspi = context_data; + struct fsl_espi_reg *reg_base = mspi->reg_base; + irqreturn_t ret = IRQ_NONE; + u32 events; + + /* Get interrupt events(tx/rx) */ + events = mpc8xxx_spi_read_reg(®_base->event); + if (events) + ret = IRQ_HANDLED; + + dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); + + fsl_espi_cpu_irq(mspi, events); + + return ret; +} + +static void fsl_espi_remove(struct mpc8xxx_spi *mspi) +{ + iounmap(mspi->reg_base); +} + +static struct spi_master * __devinit fsl_espi_probe(struct device *dev, + struct resource *mem, unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + u32 regval; + int i, ret = 0; + + master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); + if (!master) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(dev, master); + + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; + + master->setup = fsl_espi_setup; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_espi_remove; + + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (!mpc8xxx_spi->reg_base) { + ret = -ENOMEM; + goto err_probe; + } + + reg_base = mpc8xxx_spi->reg_base; + + /* Register for SPI Interrupt */ + ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, + 0, "fsl_espi", mpc8xxx_spi); + if (ret) + goto free_irq; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + mpc8xxx_spi->rx_shift = 16; + mpc8xxx_spi->tx_shift = 24; + } + + /* SPI controller initializations */ + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); + + /* Init eSPI CS mode register */ + for (i = 0; i < pdata->max_chipselect; i++) + mpc8xxx_spi_write_reg(®_base->csmode[i], CSMODE_INIT_VAL); + + /* Enable SPI interface */ + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; + + mpc8xxx_spi_write_reg(®_base->mode, regval); + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); + + return master; + +unreg_master: + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); +free_irq: + iounmap(mpc8xxx_spi->reg_base); +err_probe: + spi_master_put(master); +err: + return ERR_PTR(ret); +} + +static int of_fsl_espi_get_chipselects(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct fsl_spi_platform_data *pdata = dev->platform_data; + const u32 *prop; + int len; + + prop = of_get_property(np, "fsl,espi-num-chipselects", &len); + if (!prop || len < sizeof(*prop)) { + dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); + return -EINVAL; + } + + pdata->max_chipselect = *prop; + pdata->cs_control = NULL; + + return 0; +} + +static int __devinit of_fsl_espi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct spi_master *master; + struct resource mem; + struct resource irq; + int ret = -ENOMEM; + + ret = of_mpc8xxx_spi_probe(ofdev, ofid); + if (ret) + return ret; + + ret = of_fsl_espi_get_chipselects(dev); + if (ret) + goto err; + + ret = of_address_to_resource(np, 0, &mem); + if (ret) + goto err; + + ret = of_irq_to_resource(np, 0, &irq); + if (!ret) { + ret = -EINVAL; + goto err; + } + + master = fsl_espi_probe(dev, &mem, irq.start); + if (IS_ERR(master)) { + ret = PTR_ERR(master); + goto err; + } + + return 0; + +err: + return ret; +} + +static int __devexit of_fsl_espi_remove(struct platform_device *dev) +{ + return mpc8xxx_spi_remove(&dev->dev); +} + +static const struct of_device_id of_fsl_espi_match[] = { + { .compatible = "fsl,mpc8536-espi" }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_espi_match); + +static struct of_platform_driver fsl_espi_driver = { + .driver = { + .name = "fsl_espi", + .owner = THIS_MODULE, + .of_match_table = of_fsl_espi_match, + }, + .probe = of_fsl_espi_probe, + .remove = __devexit_p(of_fsl_espi_remove), +}; + +static int __init fsl_espi_init(void) +{ + return of_register_platform_driver(&fsl_espi_driver); +} +module_init(fsl_espi_init); + +static void __exit fsl_espi_exit(void) +{ + of_unregister_platform_driver(&fsl_espi_driver); +} +module_exit(fsl_espi_exit); + +MODULE_AUTHOR("Mingkai Hu"); +MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi_fsl_lib.c new file mode 100644 index 00000000000..5cd741fdb5c --- /dev/null +++ b/drivers/spi/spi_fsl_lib.c @@ -0,0 +1,237 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/fsl_devices.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/of_platform.h> +#include <linux/of_spi.h> +#include <sysdev/fsl_soc.h> + +#include "spi_fsl_lib.h" + +#define MPC8XXX_SPI_RX_BUF(type) \ +void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + type *rx = mpc8xxx_spi->rx; \ + *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ + mpc8xxx_spi->rx = rx; \ +} + +#define MPC8XXX_SPI_TX_BUF(type) \ +u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + u32 data; \ + const type *tx = mpc8xxx_spi->tx; \ + if (!tx) \ + return 0; \ + data = *tx++ << mpc8xxx_spi->tx_shift; \ + mpc8xxx_spi->tx = tx; \ + return data; \ +} + +MPC8XXX_SPI_RX_BUF(u8) +MPC8XXX_SPI_RX_BUF(u16) +MPC8XXX_SPI_RX_BUF(u32) +MPC8XXX_SPI_TX_BUF(u8) +MPC8XXX_SPI_TX_BUF(u16) +MPC8XXX_SPI_TX_BUF(u32) + +struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) +{ + return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); +} + +void mpc8xxx_spi_work(struct work_struct *work) +{ + struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, + work); + + spin_lock_irq(&mpc8xxx_spi->lock); + while (!list_empty(&mpc8xxx_spi->queue)) { + struct spi_message *m = container_of(mpc8xxx_spi->queue.next, + struct spi_message, queue); + + list_del_init(&m->queue); + spin_unlock_irq(&mpc8xxx_spi->lock); + + if (mpc8xxx_spi->spi_do_one_msg) + mpc8xxx_spi->spi_do_one_msg(m); + + spin_lock_irq(&mpc8xxx_spi->lock); + } + spin_unlock_irq(&mpc8xxx_spi->lock); +} + +int mpc8xxx_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mpc8xxx_spi->lock, flags); + list_add_tail(&m->queue, &mpc8xxx_spi->queue); + queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); + spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); + + return 0; +} + +void mpc8xxx_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +const char *mpc8xxx_spi_strmode(unsigned int flags) +{ + if (flags & SPI_QE_CPU_MODE) { + return "QE CPU"; + } else if (flags & SPI_CPM_MODE) { + if (flags & SPI_QE) + return "QE"; + else if (flags & SPI_CPM2) + return "CPM2"; + else + return "CPM1"; + } + return "CPU"; +} + +int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + int ret = 0; + + master = dev_get_drvdata(dev); + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH + | SPI_LSB_FIRST | SPI_LOOP; + + master->transfer = mpc8xxx_spi_transfer; + master->cleanup = mpc8xxx_spi_cleanup; + master->dev.of_node = dev->of_node; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->dev = dev; + mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; + mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; + mpc8xxx_spi->flags = pdata->flags; + mpc8xxx_spi->spibrg = pdata->sysclk; + mpc8xxx_spi->irq = irq; + + mpc8xxx_spi->rx_shift = 0; + mpc8xxx_spi->tx_shift = 0; + + init_completion(&mpc8xxx_spi->done); + + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + + spin_lock_init(&mpc8xxx_spi->lock); + init_completion(&mpc8xxx_spi->done); + INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); + INIT_LIST_HEAD(&mpc8xxx_spi->queue); + + mpc8xxx_spi->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (mpc8xxx_spi->workqueue == NULL) { + ret = -EBUSY; + goto err; + } + + return 0; + +err: + return ret; +} + +int __devexit mpc8xxx_spi_remove(struct device *dev) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct spi_master *master; + + master = dev_get_drvdata(dev); + mpc8xxx_spi = spi_master_get_devdata(master); + + flush_workqueue(mpc8xxx_spi->workqueue); + destroy_workqueue(mpc8xxx_spi->workqueue); + spi_unregister_master(master); + + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); + + if (mpc8xxx_spi->spi_remove) + mpc8xxx_spi->spi_remove(mpc8xxx_spi); + + return 0; +} + +int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct mpc8xxx_spi_probe_info *pinfo; + struct fsl_spi_platform_data *pdata; + const void *prop; + int ret = -ENOMEM; + + pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + pdata = &pinfo->pdata; + dev->platform_data = pdata; + + /* Allocate bus num dynamically. */ + pdata->bus_num = -1; + + /* SPI controller is either clocked from QE or SoC clock. */ + pdata->sysclk = get_brgfreq(); + if (pdata->sysclk == -1) { + pdata->sysclk = fsl_get_sys_freq(); + if (pdata->sysclk == -1) { + ret = -ENODEV; + goto err; + } + } + + prop = of_get_property(np, "mode", NULL); + if (prop && !strcmp(prop, "cpu-qe")) + pdata->flags = SPI_QE_CPU_MODE; + else if (prop && !strcmp(prop, "qe")) + pdata->flags = SPI_CPM_MODE | SPI_QE; + else if (of_device_is_compatible(np, "fsl,cpm2-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM2; + else if (of_device_is_compatible(np, "fsl,cpm1-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM1; + + return 0; + +err: + kfree(pinfo); + return ret; +} diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi_fsl_lib.h new file mode 100644 index 00000000000..281e060977c --- /dev/null +++ b/drivers/spi/spi_fsl_lib.h @@ -0,0 +1,124 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright 2010 Freescale Semiconductor, Inc. + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __SPI_FSL_LIB_H__ +#define __SPI_FSL_LIB_H__ + +#include <asm/io.h> + +/* SPI/eSPI Controller driver's private data. */ +struct mpc8xxx_spi { + struct device *dev; + void *reg_base; + + /* rx & tx bufs from the spi_transfer */ + const void *tx; + void *rx; +#ifdef CONFIG_SPI_FSL_ESPI + int len; +#endif + + int subblock; + struct spi_pram __iomem *pram; + struct cpm_buf_desc __iomem *tx_bd; + struct cpm_buf_desc __iomem *rx_bd; + + struct spi_transfer *xfer_in_progress; + + /* dma addresses for CPM transfers */ + dma_addr_t tx_dma; + dma_addr_t rx_dma; + bool map_tx_dma; + bool map_rx_dma; + + dma_addr_t dma_dummy_tx; + dma_addr_t dma_dummy_rx; + + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32(*get_tx) (struct mpc8xxx_spi *); + + /* hooks for different controller driver */ + void (*spi_do_one_msg) (struct spi_message *m); + void (*spi_remove) (struct mpc8xxx_spi *mspi); + + unsigned int count; + unsigned int irq; + + unsigned nsecs; /* (clock cycle time)/2 */ + + u32 spibrg; /* SPIBRG input clock */ + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + + unsigned int flags; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; + + struct completion done; +}; + +struct spi_mpc8xxx_cs { + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32 (*get_tx) (struct mpc8xxx_spi *); + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + u32 hw_mode; /* Holds HW mode register settings */ +}; + +static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) +{ + out_be32(reg, val); +} + +static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) +{ + return in_be32(reg); +} + +struct mpc8xxx_spi_probe_info { + struct fsl_spi_platform_data pdata; + int *gpios; + bool *alow_flags; +}; + +extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); + +extern struct mpc8xxx_spi_probe_info *to_of_pinfo( + struct fsl_spi_platform_data *pdata); +extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, unsigned int len); +extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); +extern void mpc8xxx_spi_cleanup(struct spi_device *spi); +extern const char *mpc8xxx_spi_strmode(unsigned int flags); +extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq); +extern int mpc8xxx_spi_remove(struct device *dev); +extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid); + +#endif /* __SPI_FSL_LIB_H__ */ diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi_fsl_spi.c new file mode 100644 index 00000000000..7ca52d3ae8f --- /dev/null +++ b/drivers/spi/spi_fsl_spi.c @@ -0,0 +1,1193 @@ +/* + * Freescale SPI controller driver. + * + * Maintainer: Kumar Gala + * + * Copyright (C) 2006 Polycom, Inc. + * Copyright 2010 Freescale Semiconductor, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/platform_device.h> +#include <linux/fsl_devices.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/gpio.h> +#include <linux/of_gpio.h> + +#include <sysdev/fsl_soc.h> +#include <asm/cpm.h> +#include <asm/qe.h> + +#include "spi_fsl_lib.h" + +/* CPM1 and CPM2 are mutually exclusive. */ +#ifdef CONFIG_CPM1 +#include <asm/cpm1.h> +#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0) +#else +#include <asm/cpm2.h> +#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0) +#endif + +/* SPI Controller registers */ +struct fsl_spi_reg { + u8 res1[0x20]; + __be32 mode; + __be32 event; + __be32 mask; + __be32 command; + __be32 transmit; + __be32 receive; +}; + +/* SPI Controller mode register definitions */ +#define SPMODE_LOOP (1 << 30) +#define SPMODE_CI_INACTIVEHIGH (1 << 29) +#define SPMODE_CP_BEGIN_EDGECLK (1 << 28) +#define SPMODE_DIV16 (1 << 27) +#define SPMODE_REV (1 << 26) +#define SPMODE_MS (1 << 25) +#define SPMODE_ENABLE (1 << 24) +#define SPMODE_LEN(x) ((x) << 20) +#define SPMODE_PM(x) ((x) << 16) +#define SPMODE_OP (1 << 14) +#define SPMODE_CG(x) ((x) << 7) + +/* + * Default for SPI Mode: + * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk + */ +#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ + SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) + +/* SPIE register values */ +#define SPIE_NE 0x00000200 /* Not empty */ +#define SPIE_NF 0x00000100 /* Not full */ + +/* SPIM register values */ +#define SPIM_NE 0x00000200 /* Not empty */ +#define SPIM_NF 0x00000100 /* Not full */ + +#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */ +#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */ + +/* SPCOM register values */ +#define SPCOM_STR (1 << 23) /* Start transmit */ + +#define SPI_PRAM_SIZE 0x100 +#define SPI_MRBLR ((unsigned int)PAGE_SIZE) + +static void *fsl_dummy_rx; +static DEFINE_MUTEX(fsl_dummy_rx_lock); +static int fsl_dummy_rx_refcnt; + +static void fsl_spi_change_mode(struct spi_device *spi) +{ + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct fsl_spi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->mode; + unsigned long flags; + + if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) + return; + + /* Turn off IRQs locally to minimize time that SPI is disabled. */ + local_irq_save(flags); + + /* Turn off SPI unit prior changing mode */ + mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); + + /* When in CPM mode, we need to reinit tx and rx. */ + if (mspi->flags & SPI_CPM_MODE) { + if (mspi->flags & SPI_QE) { + qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + } else { + cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX); + if (mspi->flags & SPI_CPM1) { + out_be16(&mspi->pram->rbptr, + in_be16(&mspi->pram->rbase)); + out_be16(&mspi->pram->tbptr, + in_be16(&mspi->pram->tbase)); + } + } + } + mpc8xxx_spi_write_reg(mode, cs->hw_mode); + local_irq_restore(flags); +} + +static void fsl_spi_chipselect(struct spi_device *spi, int value) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; + bool pol = spi->mode & SPI_CS_HIGH; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (value == BITBANG_CS_INACTIVE) { + if (pdata->cs_control) + pdata->cs_control(spi, !pol); + } + + if (value == BITBANG_CS_ACTIVE) { + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + fsl_spi_change_mode(spi); + + if (pdata->cs_control) + pdata->cs_control(spi, pol); + } +} + +static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + struct mpc8xxx_spi *mpc8xxx_spi, + int bits_per_word) +{ + cs->rx_shift = 0; + cs->tx_shift = 0; + if (bits_per_word <= 8) { + cs->get_rx = mpc8xxx_spi_rx_buf_u8; + cs->get_tx = mpc8xxx_spi_tx_buf_u8; + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + cs->rx_shift = 16; + cs->tx_shift = 24; + } + } else if (bits_per_word <= 16) { + cs->get_rx = mpc8xxx_spi_rx_buf_u16; + cs->get_tx = mpc8xxx_spi_tx_buf_u16; + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + cs->rx_shift = 16; + cs->tx_shift = 16; + } + } else if (bits_per_word <= 32) { + cs->get_rx = mpc8xxx_spi_rx_buf_u32; + cs->get_tx = mpc8xxx_spi_tx_buf_u32; + } else + return -EINVAL; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && + spi->mode & SPI_LSB_FIRST) { + cs->tx_shift = 0; + if (bits_per_word <= 8) + cs->rx_shift = 8; + else + cs->rx_shift = 0; + } + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + return bits_per_word; +} + +static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + int bits_per_word) +{ + /* QE uses Little Endian for words > 8 + * so transform all words > 8 into 8 bits + * Unfortnatly that doesn't work for LSB so + * reject these for now */ + /* Note: 32 bits word, LSB works iff + * tfcr/rfcr is set to CPMFCR_GBL */ + if (spi->mode & SPI_LSB_FIRST && + bits_per_word > 8) + return -EINVAL; + if (bits_per_word > 8) + return 8; /* pretend its 8 bits */ + return bits_per_word; +} + +static int fsl_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + int bits_per_word = 0; + u8 pm; + u32 hz = 0; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16, 32] */ + if ((bits_per_word < 4) + || ((bits_per_word > 16) && (bits_per_word != 32))) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) + bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi, + mpc8xxx_spi, + bits_per_word); + else if (mpc8xxx_spi->flags & SPI_QE) + bits_per_word = mspi_apply_qe_mode_quirks(cs, spi, + bits_per_word); + + if (bits_per_word < 0) + return bits_per_word; + + if (bits_per_word == 32) + bits_per_word = 0; + else + bits_per_word = bits_per_word - 1; + + /* mask out bits we are going to set */ + cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16 + | SPMODE_PM(0xF)); + + cs->hw_mode |= SPMODE_LEN(bits_per_word); + + if ((mpc8xxx_spi->spibrg / hz) > 64) { + cs->hw_mode |= SPMODE_DIV16; + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; + + WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " + "Will use %d Hz instead.\n", dev_name(&spi->dev), + hz, mpc8xxx_spi->spibrg / 1024); + if (pm > 16) + pm = 16; + } else { + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } + if (pm) + pm--; + + cs->hw_mode |= SPMODE_PM(pm); + + fsl_spi_change_mode(spi); + return 0; +} + +static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) +{ + struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; + struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; + unsigned int xfer_len = min(mspi->count, SPI_MRBLR); + unsigned int xfer_ofs; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + xfer_ofs = mspi->xfer_in_progress->len - mspi->count; + + if (mspi->rx_dma == mspi->dma_dummy_rx) + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); + else + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); + out_be16(&rx_bd->cbd_datlen, 0); + out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); + + if (mspi->tx_dma == mspi->dma_dummy_tx) + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); + else + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); + out_be16(&tx_bd->cbd_datlen, xfer_len); + out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | + BD_SC_LAST); + + /* start transfer */ + mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); +} + +static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, bool is_dma_mapped) +{ + struct device *dev = mspi->dev; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + if (is_dma_mapped) { + mspi->map_tx_dma = 0; + mspi->map_rx_dma = 0; + } else { + mspi->map_tx_dma = 1; + mspi->map_rx_dma = 1; + } + + if (!t->tx_buf) { + mspi->tx_dma = mspi->dma_dummy_tx; + mspi->map_tx_dma = 0; + } + + if (!t->rx_buf) { + mspi->rx_dma = mspi->dma_dummy_rx; + mspi->map_rx_dma = 0; + } + + if (mspi->map_tx_dma) { + void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */ + + mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, mspi->tx_dma)) { + dev_err(dev, "unable to map tx dma\n"); + return -ENOMEM; + } + } else if (t->tx_buf) { + mspi->tx_dma = t->tx_dma; + } + + if (mspi->map_rx_dma) { + mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mspi->rx_dma)) { + dev_err(dev, "unable to map rx dma\n"); + goto err_rx_dma; + } + } else if (t->rx_buf) { + mspi->rx_dma = t->rx_dma; + } + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); + + mspi->xfer_in_progress = t; + mspi->count = t->len; + + /* start CPM transfers */ + fsl_spi_cpm_bufs_start(mspi); + + return 0; + +err_rx_dma: + if (mspi->map_tx_dma) + dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); + return -ENOMEM; +} + +static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + struct spi_transfer *t = mspi->xfer_in_progress; + + if (mspi->map_tx_dma) + dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); + if (mspi->map_rx_dma) + dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); + mspi->xfer_in_progress = NULL; +} + +static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, unsigned int len) +{ + u32 word; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + mspi->count = len; + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); + + /* transmit word */ + word = mspi->get_tx(mspi); + mpc8xxx_spi_write_reg(®_base->transmit, word); + + return 0; +} + +static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, + bool is_dma_mapped) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_spi_reg *reg_base; + unsigned int len = t->len; + u8 bits_per_word; + int ret; + + reg_base = mpc8xxx_spi->reg_base; + bits_per_word = spi->bits_per_word; + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + if (bits_per_word > 8) { + /* invalid length? */ + if (len & 1) + return -EINVAL; + len /= 2; + } + if (bits_per_word > 16) { + /* invalid length? */ + if (len & 1) + return -EINVAL; + len /= 2; + } + + mpc8xxx_spi->tx = t->tx_buf; + mpc8xxx_spi->rx = t->rx_buf; + + INIT_COMPLETION(mpc8xxx_spi->done); + + if (mpc8xxx_spi->flags & SPI_CPM_MODE) + ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); + else + ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); + if (ret) + return ret; + + wait_for_completion(&mpc8xxx_spi->done); + + /* disable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, 0); + + if (mpc8xxx_spi->flags & SPI_CPM_MODE) + fsl_spi_cpm_bufs_complete(mpc8xxx_spi); + + return mpc8xxx_spi->count; +} + +static void fsl_spi_do_one_msg(struct spi_message *m) +{ + struct spi_device *spi = m->spi; + struct spi_transfer *t; + unsigned int cs_change; + const int nsecs = 50; + int status; + + cs_change = 1; + status = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + /* Don't allow changes if CS is active */ + status = -EINVAL; + + if (cs_change) + status = fsl_spi_setup_transfer(spi, t); + if (status < 0) + break; + } + + if (cs_change) { + fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); + ndelay(nsecs); + } + cs_change = t->cs_change; + if (t->len) + status = fsl_spi_bufs(spi, t, m->is_dma_mapped); + if (status) { + status = -EMSGSIZE; + break; + } + m->actual_length += t->len; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (cs_change) { + ndelay(nsecs); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); + ndelay(nsecs); + } + } + + m->status = status; + m->complete(m->context); + + if (status || !cs_change) { + ndelay(nsecs); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); + } + + fsl_spi_setup_transfer(spi, NULL); +} + +static int fsl_spi_setup(struct spi_device *spi) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; + int retval; + u32 hw_mode; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (!spi->max_speed_hz) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + mpc8xxx_spi = spi_master_get_devdata(spi->master); + + reg_base = mpc8xxx_spi->reg_base; + + hw_mode = cs->hw_mode; /* Save original settings */ + cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); + /* mask out bits we are going to set */ + cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH + | SPMODE_REV | SPMODE_LOOP); + + if (spi->mode & SPI_CPHA) + cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK; + if (spi->mode & SPI_CPOL) + cs->hw_mode |= SPMODE_CI_INACTIVEHIGH; + if (!(spi->mode & SPI_LSB_FIRST)) + cs->hw_mode |= SPMODE_REV; + if (spi->mode & SPI_LOOP) + cs->hw_mode |= SPMODE_LOOP; + + retval = fsl_spi_setup_transfer(spi, NULL); + if (retval < 0) { + cs->hw_mode = hw_mode; /* Restore settings */ + return retval; + } + return 0; +} + +static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + u16 len; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, + in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); + + len = in_be16(&mspi->rx_bd->cbd_datlen); + if (len > mspi->count) { + WARN_ON(1); + len = mspi->count; + } + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= len; + if (mspi->count) + fsl_spi_cpm_bufs_start(mspi); + else + complete(&mspi->done); +} + +static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + struct fsl_spi_reg *reg_base = mspi->reg_base; + + /* We need handle RX first */ + if (events & SPIE_NE) { + u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive); + + if (mspi->rx) + mspi->get_rx(rx_data, mspi); + } + + if ((events & SPIE_NF) == 0) + /* spin until TX is done */ + while (((events = + mpc8xxx_spi_read_reg(®_base->event)) & + SPIE_NF) == 0) + cpu_relax(); + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= 1; + if (mspi->count) { + u32 word = mspi->get_tx(mspi); + + mpc8xxx_spi_write_reg(®_base->transmit, word); + } else { + complete(&mspi->done); + } +} + +static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) +{ + struct mpc8xxx_spi *mspi = context_data; + irqreturn_t ret = IRQ_NONE; + u32 events; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + /* Get interrupt events(tx/rx) */ + events = mpc8xxx_spi_read_reg(®_base->event); + if (events) + ret = IRQ_HANDLED; + + dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); + + if (mspi->flags & SPI_CPM_MODE) + fsl_spi_cpm_irq(mspi, events); + else + fsl_spi_cpu_irq(mspi, events); + + return ret; +} + +static void *fsl_spi_alloc_dummy_rx(void) +{ + mutex_lock(&fsl_dummy_rx_lock); + + if (!fsl_dummy_rx) + fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); + if (fsl_dummy_rx) + fsl_dummy_rx_refcnt++; + + mutex_unlock(&fsl_dummy_rx_lock); + + return fsl_dummy_rx; +} + +static void fsl_spi_free_dummy_rx(void) +{ + mutex_lock(&fsl_dummy_rx_lock); + + switch (fsl_dummy_rx_refcnt) { + case 0: + WARN_ON(1); + break; + case 1: + kfree(fsl_dummy_rx); + fsl_dummy_rx = NULL; + /* fall through */ + default: + fsl_dummy_rx_refcnt--; + break; + } + + mutex_unlock(&fsl_dummy_rx_lock); +} + +static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + struct device_node *np = dev->of_node; + const u32 *iprop; + int size; + unsigned long spi_base_ofs; + unsigned long pram_ofs = -ENOMEM; + + /* Can't use of_address_to_resource(), QE muram isn't at 0. */ + iprop = of_get_property(np, "reg", &size); + + /* QE with a fixed pram location? */ + if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4) + return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE); + + /* QE but with a dynamic pram location? */ + if (mspi->flags & SPI_QE) { + pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); + qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock, + QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs); + return pram_ofs; + } + + /* CPM1 and CPM2 pram must be at a fixed addr. */ + if (!iprop || size != sizeof(*iprop) * 4) + return -ENOMEM; + + spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2); + if (IS_ERR_VALUE(spi_base_ofs)) + return -ENOMEM; + + if (mspi->flags & SPI_CPM2) { + pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); + if (!IS_ERR_VALUE(pram_ofs)) { + u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs); + + out_be16(spi_base, pram_ofs); + } + } else { + struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs); + u16 rpbase = in_be16(&pram->rpbase); + + /* Microcode relocation patch applied? */ + if (rpbase) + pram_ofs = rpbase; + else + return spi_base_ofs; + } + + cpm_muram_free(spi_base_ofs); + return pram_ofs; +} + +static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + struct device_node *np = dev->of_node; + const u32 *iprop; + int size; + unsigned long pram_ofs; + unsigned long bds_ofs; + + if (!(mspi->flags & SPI_CPM_MODE)) + return 0; + + if (!fsl_spi_alloc_dummy_rx()) + return -ENOMEM; + + if (mspi->flags & SPI_QE) { + iprop = of_get_property(np, "cell-index", &size); + if (iprop && size == sizeof(*iprop)) + mspi->subblock = *iprop; + + switch (mspi->subblock) { + default: + dev_warn(dev, "cell-index unspecified, assuming SPI1"); + /* fall through */ + case 0: + mspi->subblock = QE_CR_SUBBLOCK_SPI1; + break; + case 1: + mspi->subblock = QE_CR_SUBBLOCK_SPI2; + break; + } + } + + pram_ofs = fsl_spi_cpm_get_pram(mspi); + if (IS_ERR_VALUE(pram_ofs)) { + dev_err(dev, "can't allocate spi parameter ram\n"); + goto err_pram; + } + + bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) + + sizeof(*mspi->rx_bd), 8); + if (IS_ERR_VALUE(bds_ofs)) { + dev_err(dev, "can't allocate bds\n"); + goto err_bds; + } + + mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, mspi->dma_dummy_tx)) { + dev_err(dev, "unable to map dummy tx buffer\n"); + goto err_dummy_tx; + } + + mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { + dev_err(dev, "unable to map dummy rx buffer\n"); + goto err_dummy_rx; + } + + mspi->pram = cpm_muram_addr(pram_ofs); + + mspi->tx_bd = cpm_muram_addr(bds_ofs); + mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); + + /* Initialize parameter ram. */ + out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd)); + out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd)); + out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL); + out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL); + out_be16(&mspi->pram->mrblr, SPI_MRBLR); + out_be32(&mspi->pram->rstate, 0); + out_be32(&mspi->pram->rdp, 0); + out_be16(&mspi->pram->rbptr, 0); + out_be16(&mspi->pram->rbc, 0); + out_be32(&mspi->pram->rxtmp, 0); + out_be32(&mspi->pram->tstate, 0); + out_be32(&mspi->pram->tdp, 0); + out_be16(&mspi->pram->tbptr, 0); + out_be16(&mspi->pram->tbc, 0); + out_be32(&mspi->pram->txtmp, 0); + + return 0; + +err_dummy_rx: + dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); +err_dummy_tx: + cpm_muram_free(bds_ofs); +err_bds: + cpm_muram_free(pram_ofs); +err_pram: + fsl_spi_free_dummy_rx(); + return -ENOMEM; +} + +static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + + dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); + dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); + cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); + cpm_muram_free(cpm_muram_offset(mspi->pram)); + fsl_spi_free_dummy_rx(); +} + +static void fsl_spi_remove(struct mpc8xxx_spi *mspi) +{ + iounmap(mspi->reg_base); + fsl_spi_cpm_free(mspi); +} + +static struct spi_master * __devinit fsl_spi_probe(struct device *dev, + struct resource *mem, unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; + u32 regval; + int ret = 0; + + master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); + if (master == NULL) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(dev, master); + + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; + + master->setup = fsl_spi_setup; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_spi_remove; + + + ret = fsl_spi_cpm_init(mpc8xxx_spi); + if (ret) + goto err_cpm_init; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + mpc8xxx_spi->rx_shift = 16; + mpc8xxx_spi->tx_shift = 24; + } + + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (mpc8xxx_spi->reg_base == NULL) { + ret = -ENOMEM; + goto err_ioremap; + } + + /* Register for SPI Interrupt */ + ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, + 0, "fsl_spi", mpc8xxx_spi); + + if (ret != 0) + goto free_irq; + + reg_base = mpc8xxx_spi->reg_base; + + /* SPI controller initializations */ + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); + + /* Enable SPI interface */ + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) + regval |= SPMODE_OP; + + mpc8xxx_spi_write_reg(®_base->mode, regval); + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, + mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); + + return master; + +unreg_master: + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); +free_irq: + iounmap(mpc8xxx_spi->reg_base); +err_ioremap: + fsl_spi_cpm_free(mpc8xxx_spi); +err_cpm_init: +err_probe: + spi_master_put(master); +err: + return ERR_PTR(ret); +} + +static void fsl_spi_cs_control(struct spi_device *spi, bool on) +{ + struct device *dev = spi->dev.parent; + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); + u16 cs = spi->chip_select; + int gpio = pinfo->gpios[cs]; + bool alow = pinfo->alow_flags[cs]; + + gpio_set_value(gpio, on ^ alow); +} + +static int of_fsl_spi_get_chipselects(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); + unsigned int ngpios; + int i = 0; + int ret; + + ngpios = of_gpio_count(np); + if (!ngpios) { + /* + * SPI w/o chip-select line. One SPI device is still permitted + * though. + */ + pdata->max_chipselect = 1; + return 0; + } + + pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL); + if (!pinfo->gpios) + return -ENOMEM; + memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios)); + + pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags), + GFP_KERNEL); + if (!pinfo->alow_flags) { + ret = -ENOMEM; + goto err_alloc_flags; + } + + for (; i < ngpios; i++) { + int gpio; + enum of_gpio_flags flags; + + gpio = of_get_gpio_flags(np, i, &flags); + if (!gpio_is_valid(gpio)) { + dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); + ret = gpio; + goto err_loop; + } + + ret = gpio_request(gpio, dev_name(dev)); + if (ret) { + dev_err(dev, "can't request gpio #%d: %d\n", i, ret); + goto err_loop; + } + + pinfo->gpios[i] = gpio; + pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW; + + ret = gpio_direction_output(pinfo->gpios[i], + pinfo->alow_flags[i]); + if (ret) { + dev_err(dev, "can't set output direction for gpio " + "#%d: %d\n", i, ret); + goto err_loop; + } + } + + pdata->max_chipselect = ngpios; + pdata->cs_control = fsl_spi_cs_control; + + return 0; + +err_loop: + while (i >= 0) { + if (gpio_is_valid(pinfo->gpios[i])) + gpio_free(pinfo->gpios[i]); + i--; + } + + kfree(pinfo->alow_flags); + pinfo->alow_flags = NULL; +err_alloc_flags: + kfree(pinfo->gpios); + pinfo->gpios = NULL; + return ret; +} + +static int of_fsl_spi_free_chipselects(struct device *dev) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); + int i; + + if (!pinfo->gpios) + return 0; + + for (i = 0; i < pdata->max_chipselect; i++) { + if (gpio_is_valid(pinfo->gpios[i])) + gpio_free(pinfo->gpios[i]); + } + + kfree(pinfo->gpios); + kfree(pinfo->alow_flags); + return 0; +} + +static int __devinit of_fsl_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct spi_master *master; + struct resource mem; + struct resource irq; + int ret = -ENOMEM; + + ret = of_mpc8xxx_spi_probe(ofdev, ofid); + if (ret) + return ret; + + ret = of_fsl_spi_get_chipselects(dev); + if (ret) + goto err; + + ret = of_address_to_resource(np, 0, &mem); + if (ret) + goto err; + + ret = of_irq_to_resource(np, 0, &irq); + if (!ret) { + ret = -EINVAL; + goto err; + } + + master = fsl_spi_probe(dev, &mem, irq.start); + if (IS_ERR(master)) { + ret = PTR_ERR(master); + goto err; + } + + return 0; + +err: + of_fsl_spi_free_chipselects(dev); + return ret; +} + +static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) +{ + int ret; + + ret = mpc8xxx_spi_remove(&ofdev->dev); + if (ret) + return ret; + of_fsl_spi_free_chipselects(&ofdev->dev); + return 0; +} + +static const struct of_device_id of_fsl_spi_match[] = { + { .compatible = "fsl,spi" }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_spi_match); + +static struct of_platform_driver of_fsl_spi_driver = { + .driver = { + .name = "fsl_spi", + .owner = THIS_MODULE, + .of_match_table = of_fsl_spi_match, + }, + .probe = of_fsl_spi_probe, + .remove = __devexit_p(of_fsl_spi_remove), +}; + +#ifdef CONFIG_MPC832x_RDB +/* + * XXX XXX XXX + * This is "legacy" platform driver, was used by the MPC8323E-RDB boards + * only. The driver should go away soon, since newer MPC8323E-RDB's device + * tree can work with OpenFirmware driver. But for now we support old trees + * as well. + */ +static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) +{ + struct resource *mem; + int irq; + struct spi_master *master; + + if (!pdev->dev.platform_data) + return -EINVAL; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -EINVAL; + + master = fsl_spi_probe(&pdev->dev, mem, irq); + if (IS_ERR(master)) + return PTR_ERR(master); + return 0; +} + +static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev) +{ + return mpc8xxx_spi_remove(&pdev->dev); +} + +MODULE_ALIAS("platform:mpc8xxx_spi"); +static struct platform_driver mpc8xxx_spi_driver = { + .probe = plat_mpc8xxx_spi_probe, + .remove = __devexit_p(plat_mpc8xxx_spi_remove), + .driver = { + .name = "mpc8xxx_spi", + .owner = THIS_MODULE, + }, +}; + +static bool legacy_driver_failed; + +static void __init legacy_driver_register(void) +{ + legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver); +} + +static void __exit legacy_driver_unregister(void) +{ + if (legacy_driver_failed) + return; + platform_driver_unregister(&mpc8xxx_spi_driver); +} +#else +static void __init legacy_driver_register(void) {} +static void __exit legacy_driver_unregister(void) {} +#endif /* CONFIG_MPC832x_RDB */ + +static int __init fsl_spi_init(void) +{ + legacy_driver_register(); + return of_register_platform_driver(&of_fsl_spi_driver); +} +module_init(fsl_spi_init); + +static void __exit fsl_spi_exit(void) +{ + of_unregister_platform_driver(&of_fsl_spi_driver); + legacy_driver_unregister(); +} +module_exit(fsl_spi_exit); + +MODULE_AUTHOR("Kumar Gala"); +MODULE_DESCRIPTION("Simple Freescale SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index 26bd03e6185..63e51b011d5 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c @@ -127,8 +127,7 @@ static inline int getmiso(const struct spi_device *spi) */ #define spidelay(nsecs) do {} while (0) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" /* * These functions can leverage inline expansion of GPIO calls to shrink @@ -147,25 +146,63 @@ static inline int getmiso(const struct spi_device *spi) static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); +} + +/* + * These functions do not call setmosi or getmiso if respective flag + * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to + * call when such pin is not present or defined in the controller. + * A separate set of callbacks is defined to get highest possible + * speed in the generic case (when both MISO and MOSI lines are + * available), as optimiser will remove the checks when argument is + * constant. + */ + +static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits); } /*----------------------------------------------------------------------*/ @@ -233,19 +270,30 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) } static int __init -spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label) +spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, + u16 *res_flags) { int value; /* NOTE: SPI_*_GPIO symbols may reference "pdata" */ - value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); - if (value) - goto done; + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) { + value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); + if (value) + goto done; + } else { + /* HW configuration without MOSI pin */ + *res_flags |= SPI_MASTER_NO_TX; + } - value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); - if (value) - goto free_mosi; + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) { + value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); + if (value) + goto free_mosi; + } else { + /* HW configuration without MISO pin */ + *res_flags |= SPI_MASTER_NO_RX; + } value = spi_gpio_alloc(SPI_SCK_GPIO, label, false); if (value) @@ -254,9 +302,11 @@ spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label) goto done; free_miso: - gpio_free(SPI_MISO_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); free_mosi: - gpio_free(SPI_MOSI_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); done: return value; } @@ -267,6 +317,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) struct spi_master *master; struct spi_gpio *spi_gpio; struct spi_gpio_platform_data *pdata; + u16 master_flags = 0; pdata = pdev->dev.platform_data; #ifdef GENERIC_BITBANG @@ -274,7 +325,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) return -ENODEV; #endif - status = spi_gpio_request(pdata, dev_name(&pdev->dev)); + status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags); if (status < 0) return status; @@ -290,6 +341,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) if (pdata) spi_gpio->pdata = *pdata; + master->flags = master_flags; master->bus_num = pdev->id; master->num_chipselect = SPI_N_CHIPSEL; master->setup = spi_gpio_setup; @@ -297,10 +349,18 @@ static int __init spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.master = spi_master_get(master); spi_gpio->bitbang.chipselect = spi_gpio_chipselect; - spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; - spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; - spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; - spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; + + if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; + } else { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; + } spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; spi_gpio->bitbang.flags = SPI_CS_HIGH; @@ -308,8 +368,10 @@ static int __init spi_gpio_probe(struct platform_device *pdev) if (status < 0) { spi_master_put(spi_gpio->bitbang.master); gpio_free: - gpio_free(SPI_MISO_GPIO); - gpio_free(SPI_MOSI_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); gpio_free(SPI_SCK_GPIO); spi_master_put(master); } @@ -332,8 +394,10 @@ static int __exit spi_gpio_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); - gpio_free(SPI_MISO_GPIO); - gpio_free(SPI_MOSI_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); gpio_free(SPI_SCK_GPIO); return status; diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index c195e45f7f3..9469564e688 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c @@ -1,1770 +1,932 @@ /* - * drivers/spi/spi_imx.c - * - * Copyright (C) 2006 SWAPP - * Andrea Paterniani <a.paterniani@swapp-eng.it> - * - * Initial version inspired by: - * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright (C) 2008 Juergen Beisert * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02110-1301, USA. */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/ioport.h> -#include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/platform_device.h> -#include <linux/dma-mapping.h> +#include <linux/slab.h> #include <linux/spi/spi.h> -#include <linux/workqueue.h> -#include <linux/delay.h> -#include <linux/clk.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/types.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/delay.h> - -#include <mach/hardware.h> -#include <mach/imx-dma.h> -#include <mach/spi_imx.h> - -/*-------------------------------------------------------------------------*/ -/* SPI Registers offsets from peripheral base address */ -#define SPI_RXDATA (0x00) -#define SPI_TXDATA (0x04) -#define SPI_CONTROL (0x08) -#define SPI_INT_STATUS (0x0C) -#define SPI_TEST (0x10) -#define SPI_PERIOD (0x14) -#define SPI_DMA (0x18) -#define SPI_RESET (0x1C) - -/* SPI Control Register Bit Fields & Masks */ -#define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */ -#define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK) -#define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */ -#define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */ -#define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */ -#define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */ -#define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */ -#define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */ -#define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */ -#define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst - Slave: RXFIFO advanced by BIT_COUNT */ -#define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst - Slave: RXFIFO advanced by /SS rising edge */ -#define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */ -#define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */ -#define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */ -#define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */ -#define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */ -#define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */ -#define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */ -#define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */ -#define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */ -#define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */ -#define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */ -#define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */ -#define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */ -#define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */ -#define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */ -#define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13) -#define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13) -#define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1) - -/* SPI Interrupt/Status Register Bit Fields & Masks */ -#define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */ -#define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */ -#define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */ -#define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */ -#define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */ -#define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */ -#define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */ -#define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */ -#define SPI_STATUS (0xFF) /* SPI Status Mask */ -#define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */ -#define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */ -#define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */ -#define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */ -#define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */ -#define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */ -#define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */ -#define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */ -#define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */ - -/* SPI Test Register Bit Fields & Masks */ -#define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */ -#define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */ -#define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */ -#define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */ -#define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */ - -/* SPI Period Register Bit Fields & Masks */ -#define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */ -#define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between - Transactions */ -#define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */ -#define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is - Bit Clock */ -#define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is - 32.768 KHz Clock */ - -/* SPI DMA Register Bit Fields & Masks */ -#define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */ -#define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */ -#define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */ -#define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */ -#define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */ -#define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */ -#define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */ -#define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */ - -/* SPI Soft Reset Register Bit Fields & Masks */ -#define SPI_RESET_START (0x1) /* Start */ - -/* Default SPI configuration values */ -#define SPI_DEFAULT_CONTROL \ -( \ - SPI_CONTROL_BITCOUNT(16) | \ - SPI_CONTROL_POL_ACT_HIGH | \ - SPI_CONTROL_PHA_0 | \ - SPI_CONTROL_SPIEN | \ - SPI_CONTROL_SSCTL_1 | \ - SPI_CONTROL_MODE_MASTER | \ - SPI_CONTROL_DRCTL_0 | \ - SPI_CONTROL_DATARATE_MIN \ -) -#define SPI_DEFAULT_ENABLE_LOOPBACK (0) -#define SPI_DEFAULT_ENABLE_DMA (0) -#define SPI_DEFAULT_PERIOD_WAIT (8) -/*-------------------------------------------------------------------------*/ - - -/*-------------------------------------------------------------------------*/ -/* TX/RX SPI FIFO size */ -#define SPI_FIFO_DEPTH (8) -#define SPI_FIFO_BYTE_WIDTH (2) -#define SPI_FIFO_OVERFLOW_MARGIN (2) - -/* DMA burst length for half full/empty request trigger */ -#define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2) - -/* Dummy char output to achieve reads. - Choosing something different from all zeroes may help pattern recogition - for oscilloscope analysis, but may break some drivers. */ -#define SPI_DUMMY_u8 0 -#define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8) -#define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16) - -/** - * Macro to change a u32 field: - * @r : register to edit - * @m : bit mask - * @v : new value for the field correctly bit-alligned -*/ -#define u32_EDIT(r, m, v) r = (r & ~(m)) | (v) - -/* Message state */ -#define START_STATE ((void*)0) -#define RUNNING_STATE ((void*)1) -#define DONE_STATE ((void*)2) -#define ERROR_STATE ((void*)-1) - -/* Queue state */ -#define QUEUE_RUNNING (0) -#define QUEUE_STOPPED (1) - -#define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0) -#define DMA_ALIGNMENT 4 -/*-------------------------------------------------------------------------*/ - - -/*-------------------------------------------------------------------------*/ -/* Driver data structs */ - -/* Context */ -struct driver_data { - /* Driver model hookup */ - struct platform_device *pdev; - - /* SPI framework hookup */ - struct spi_master *master; +#include <mach/spi.h> - /* IMX hookup */ - struct spi_imx_master *master_info; - - /* Memory resources and SPI regs virtual address */ - struct resource *ioarea; - void __iomem *regs; - - /* SPI RX_DATA physical address */ - dma_addr_t rd_data_phys; - - /* Driver message queue */ - struct workqueue_struct *workqueue; - struct work_struct work; - spinlock_t lock; - struct list_head queue; - int busy; - int run; - - /* Message Transfer pump */ - struct tasklet_struct pump_transfers; - - /* Current message, transfer and state */ - struct spi_message *cur_msg; - struct spi_transfer *cur_transfer; - struct chip_data *cur_chip; - - /* Rd / Wr buffers pointers */ - size_t len; - void *tx; - void *tx_end; - void *rx; - void *rx_end; - - u8 rd_only; - u8 n_bytes; - int cs_change; - - /* Function pointers */ - irqreturn_t (*transfer_handler)(struct driver_data *drv_data); - void (*cs_control)(u32 command); - - /* DMA setup */ - int rx_channel; - int tx_channel; - dma_addr_t rx_dma; - dma_addr_t tx_dma; - int rx_dma_needs_unmap; - int tx_dma_needs_unmap; - size_t tx_map_len; - u32 dummy_dma_buf ____cacheline_aligned; +#define DRIVER_NAME "spi_imx" - struct clk *clk; -}; +#define MXC_CSPIRXDATA 0x00 +#define MXC_CSPITXDATA 0x04 +#define MXC_CSPICTRL 0x08 +#define MXC_CSPIINT 0x0c +#define MXC_RESET 0x1c -/* Runtime state */ -struct chip_data { - u32 control; - u32 period; - u32 test; +#define MX3_CSPISTAT 0x14 +#define MX3_CSPISTAT_RR (1 << 3) - u8 enable_dma:1; - u8 bits_per_word; - u8 n_bytes; - u32 max_speed_hz; +/* generic defines to abstract from the different register layouts */ +#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ +#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ - void (*cs_control)(u32 command); +struct spi_imx_config { + unsigned int speed_hz; + unsigned int bpw; + unsigned int mode; + u8 cs; }; -/*-------------------------------------------------------------------------*/ +enum spi_imx_devtype { + SPI_IMX_VER_IMX1, + SPI_IMX_VER_0_0, + SPI_IMX_VER_0_4, + SPI_IMX_VER_0_5, + SPI_IMX_VER_0_7, + SPI_IMX_VER_2_3, +}; -static void pump_messages(struct work_struct *work); +struct spi_imx_data; -static void flush(struct driver_data *drv_data) -{ - void __iomem *regs = drv_data->regs; - u32 control; +struct spi_imx_devtype_data { + void (*intctrl)(struct spi_imx_data *, int); + int (*config)(struct spi_imx_data *, struct spi_imx_config *); + void (*trigger)(struct spi_imx_data *); + int (*rx_available)(struct spi_imx_data *); + void (*reset)(struct spi_imx_data *); + unsigned int fifosize; +}; - dev_dbg(&drv_data->pdev->dev, "flush\n"); +struct spi_imx_data { + struct spi_bitbang bitbang; - /* Wait for end of transaction */ - do { - control = readl(regs + SPI_CONTROL); - } while (control & SPI_CONTROL_XCH); + struct completion xfer_done; + void *base; + int irq; + struct clk *clk; + unsigned long spi_clk; + int *chipselect; - /* Release chip select if requested, transfer delays are - handled in pump_transfers */ - if (drv_data->cs_change) - drv_data->cs_control(SPI_CS_DEASSERT); + unsigned int count; + void (*tx)(struct spi_imx_data *); + void (*rx)(struct spi_imx_data *); + void *rx_buf; + const void *tx_buf; + unsigned int txfifo; /* number of words pushed in tx FIFO */ - /* Disable SPI to flush FIFOs */ - writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL); - writel(control, regs + SPI_CONTROL); -} + struct spi_imx_devtype_data devtype_data; +}; -static void restore_state(struct driver_data *drv_data) -{ - void __iomem *regs = drv_data->regs; - struct chip_data *chip = drv_data->cur_chip; - - /* Load chip registers */ - dev_dbg(&drv_data->pdev->dev, - "restore_state\n" - " test = 0x%08X\n" - " control = 0x%08X\n", - chip->test, - chip->control); - writel(chip->test, regs + SPI_TEST); - writel(chip->period, regs + SPI_PERIOD); - writel(0, regs + SPI_INT_STATUS); - writel(chip->control, regs + SPI_CONTROL); -} +#define MXC_SPI_BUF_RX(type) \ +static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ +{ \ + unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ + \ + if (spi_imx->rx_buf) { \ + *(type *)spi_imx->rx_buf = val; \ + spi_imx->rx_buf += sizeof(type); \ + } \ +} + +#define MXC_SPI_BUF_TX(type) \ +static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ +{ \ + type val = 0; \ + \ + if (spi_imx->tx_buf) { \ + val = *(type *)spi_imx->tx_buf; \ + spi_imx->tx_buf += sizeof(type); \ + } \ + \ + spi_imx->count -= sizeof(type); \ + \ + writel(val, spi_imx->base + MXC_CSPITXDATA); \ +} + +MXC_SPI_BUF_RX(u8) +MXC_SPI_BUF_TX(u8) +MXC_SPI_BUF_RX(u16) +MXC_SPI_BUF_TX(u16) +MXC_SPI_BUF_RX(u32) +MXC_SPI_BUF_TX(u32) + +/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set + * (which is currently not the case in this driver) + */ +static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, + 256, 384, 512, 768, 1024}; -static void null_cs_control(u32 command) +/* MX21, MX27 */ +static unsigned int spi_imx_clkdiv_1(unsigned int fin, + unsigned int fspi) { -} + int i, max; -static inline u32 data_to_write(struct driver_data *drv_data) -{ - return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes; -} + if (cpu_is_mx21()) + max = 18; + else + max = 16; -static inline u32 data_to_read(struct driver_data *drv_data) -{ - return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes; + for (i = 2; i < max; i++) + if (fspi * mxc_clkdivs[i] >= fin) + return i; + + return max; } -static int write(struct driver_data *drv_data) +/* MX1, MX31, MX35, MX51 CSPI */ +static unsigned int spi_imx_clkdiv_2(unsigned int fin, + unsigned int fspi) { - void __iomem *regs = drv_data->regs; - void *tx = drv_data->tx; - void *tx_end = drv_data->tx_end; - u8 n_bytes = drv_data->n_bytes; - u32 remaining_writes; - u32 fifo_avail_space; - u32 n; - u16 d; - - /* Compute how many fifo writes to do */ - remaining_writes = (u32)(tx_end - tx) / n_bytes; - fifo_avail_space = SPI_FIFO_DEPTH - - (readl(regs + SPI_TEST) & SPI_TEST_TXCNT); - if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN)) - /* Fix misunderstood receive overflow */ - fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN; - n = min(remaining_writes, fifo_avail_space); - - dev_dbg(&drv_data->pdev->dev, - "write type %s\n" - " remaining writes = %d\n" - " fifo avail space = %d\n" - " fifo writes = %d\n", - (n_bytes == 1) ? "u8" : "u16", - remaining_writes, - fifo_avail_space, - n); - - if (n > 0) { - /* Fill SPI TXFIFO */ - if (drv_data->rd_only) { - tx += n * n_bytes; - while (n--) - writel(SPI_DUMMY_u16, regs + SPI_TXDATA); - } else { - if (n_bytes == 1) { - while (n--) { - d = *(u8*)tx; - writel(d, regs + SPI_TXDATA); - tx += 1; - } - } else { - while (n--) { - d = *(u16*)tx; - writel(d, regs + SPI_TXDATA); - tx += 2; - } - } - } - - /* Trigger transfer */ - writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH, - regs + SPI_CONTROL); + int i, div = 4; - /* Update tx pointer */ - drv_data->tx = tx; + for (i = 0; i < 7; i++) { + if (fspi * div >= fin) + return i; + div <<= 1; } - return (tx >= tx_end); + return 7; } -static int read(struct driver_data *drv_data) -{ - void __iomem *regs = drv_data->regs; - void *rx = drv_data->rx; - void *rx_end = drv_data->rx_end; - u8 n_bytes = drv_data->n_bytes; - u32 remaining_reads; - u32 fifo_rxcnt; - u32 n; - u16 d; - - /* Compute how many fifo reads to do */ - remaining_reads = (u32)(rx_end - rx) / n_bytes; - fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >> - SPI_TEST_RXCNT_LSB; - n = min(remaining_reads, fifo_rxcnt); - - dev_dbg(&drv_data->pdev->dev, - "read type %s\n" - " remaining reads = %d\n" - " fifo rx count = %d\n" - " fifo reads = %d\n", - (n_bytes == 1) ? "u8" : "u16", - remaining_reads, - fifo_rxcnt, - n); - - if (n > 0) { - /* Read SPI RXFIFO */ - if (n_bytes == 1) { - while (n--) { - d = readl(regs + SPI_RXDATA); - *((u8*)rx) = d; - rx += 1; - } - } else { - while (n--) { - d = readl(regs + SPI_RXDATA); - *((u16*)rx) = d; - rx += 2; - } - } +#define SPI_IMX2_3_CTRL 0x08 +#define SPI_IMX2_3_CTRL_ENABLE (1 << 0) +#define SPI_IMX2_3_CTRL_XCH (1 << 2) +#define SPI_IMX2_3_CTRL_MODE(cs) (1 << ((cs) + 4)) +#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8 +#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12 +#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18) +#define SPI_IMX2_3_CTRL_BL_OFFSET 20 - /* Update rx pointer */ - drv_data->rx = rx; - } +#define SPI_IMX2_3_CONFIG 0x0c +#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) +#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) +#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) +#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) - return (rx >= rx_end); -} +#define SPI_IMX2_3_INT 0x10 +#define SPI_IMX2_3_INT_TEEN (1 << 0) +#define SPI_IMX2_3_INT_RREN (1 << 3) + +#define SPI_IMX2_3_STAT 0x18 +#define SPI_IMX2_3_STAT_RR (1 << 3) -static void *next_transfer(struct driver_data *drv_data) +/* MX51 eCSPI */ +static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi) { - struct spi_message *msg = drv_data->cur_msg; - struct spi_transfer *trans = drv_data->cur_transfer; - - /* Move to next transfer */ - if (trans->transfer_list.next != &msg->transfers) { - drv_data->cur_transfer = - list_entry(trans->transfer_list.next, - struct spi_transfer, - transfer_list); - return RUNNING_STATE; - } + /* + * there are two 4-bit dividers, the pre-divider divides by + * $pre, the post-divider by 2^$post + */ + unsigned int pre, post; - return DONE_STATE; -} + if (unlikely(fspi > fin)) + return 0; -static int map_dma_buffers(struct driver_data *drv_data) -{ - struct spi_message *msg; - struct device *dev; - void *buf; - - drv_data->rx_dma_needs_unmap = 0; - drv_data->tx_dma_needs_unmap = 0; - - if (!drv_data->master_info->enable_dma || - !drv_data->cur_chip->enable_dma) - return -1; - - msg = drv_data->cur_msg; - dev = &msg->spi->dev; - if (msg->is_dma_mapped) { - if (drv_data->tx_dma) - /* The caller provided at least dma and cpu virtual - address for write; pump_transfers() will consider the - transfer as write only if cpu rx virtual address is - NULL */ - return 0; - - if (drv_data->rx_dma) { - /* The caller provided dma and cpu virtual address to - performe read only transfer --> - use drv_data->dummy_dma_buf for dummy writes to - achive reads */ - buf = &drv_data->dummy_dma_buf; - drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf); - drv_data->tx_dma = dma_map_single(dev, - buf, - drv_data->tx_map_len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, drv_data->tx_dma)) - return -1; - - drv_data->tx_dma_needs_unmap = 1; - - /* Flags transfer as rd_only for pump_transfers() DMA - regs programming (should be redundant) */ - drv_data->tx = NULL; - - return 0; - } - } + post = fls(fin) - fls(fspi); + if (fin > fspi << post) + post++; - if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) - return -1; - - if (drv_data->tx == NULL) { - /* Read only message --> use drv_data->dummy_dma_buf for dummy - writes to achive reads */ - buf = &drv_data->dummy_dma_buf; - drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf); - } else { - buf = drv_data->tx; - drv_data->tx_map_len = drv_data->len; - } - drv_data->tx_dma = dma_map_single(dev, - buf, - drv_data->tx_map_len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, drv_data->tx_dma)) - return -1; - drv_data->tx_dma_needs_unmap = 1; - - /* NULL rx means write-only transfer and no map needed - * since rx DMA will not be used */ - if (drv_data->rx) { - buf = drv_data->rx; - drv_data->rx_dma = dma_map_single(dev, - buf, - drv_data->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, drv_data->rx_dma)) { - if (drv_data->tx_dma) { - dma_unmap_single(dev, - drv_data->tx_dma, - drv_data->tx_map_len, - DMA_TO_DEVICE); - drv_data->tx_dma_needs_unmap = 0; - } - return -1; - } - drv_data->rx_dma_needs_unmap = 1; + /* now we have: (fin <= fspi << post) with post being minimal */ + + post = max(4U, post) - 4; + if (unlikely(post > 0xf)) { + pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", + __func__, fspi, fin); + return 0xff; } - return 0; -} + pre = DIV_ROUND_UP(fin, fspi << post) - 1; -static void unmap_dma_buffers(struct driver_data *drv_data) -{ - struct spi_message *msg = drv_data->cur_msg; - struct device *dev = &msg->spi->dev; - - if (drv_data->rx_dma_needs_unmap) { - dma_unmap_single(dev, - drv_data->rx_dma, - drv_data->len, - DMA_FROM_DEVICE); - drv_data->rx_dma_needs_unmap = 0; - } - if (drv_data->tx_dma_needs_unmap) { - dma_unmap_single(dev, - drv_data->tx_dma, - drv_data->tx_map_len, - DMA_TO_DEVICE); - drv_data->tx_dma_needs_unmap = 0; - } + pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", + __func__, fin, fspi, post, pre); + return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) | + (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET); } -/* Caller already set message->status (dma is already blocked) */ -static void giveback(struct spi_message *message, struct driver_data *drv_data) +static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable) { - void __iomem *regs = drv_data->regs; - - /* Bring SPI to sleep; restore_state() and pump_transfer() - will do new setup */ - writel(0, regs + SPI_INT_STATUS); - writel(0, regs + SPI_DMA); + unsigned val = 0; - /* Unconditioned deselct */ - drv_data->cs_control(SPI_CS_DEASSERT); + if (enable & MXC_INT_TE) + val |= SPI_IMX2_3_INT_TEEN; - message->state = NULL; - if (message->complete) - message->complete(message->context); + if (enable & MXC_INT_RR) + val |= SPI_IMX2_3_INT_RREN; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - drv_data->cur_chip = NULL; - queue_work(drv_data->workqueue, &drv_data->work); + writel(val, spi_imx->base + SPI_IMX2_3_INT); } -static void dma_err_handler(int channel, void *data, int errcode) +static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx) { - struct driver_data *drv_data = data; - struct spi_message *msg = drv_data->cur_msg; + u32 reg; - dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n"); + reg = readl(spi_imx->base + SPI_IMX2_3_CTRL); + reg |= SPI_IMX2_3_CTRL_XCH; + writel(reg, spi_imx->base + SPI_IMX2_3_CTRL); +} - /* Disable both rx and tx dma channels */ - imx_dma_disable(drv_data->rx_channel); - imx_dma_disable(drv_data->tx_channel); - unmap_dma_buffers(drv_data); +static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0; - flush(drv_data); + /* set master mode */ + ctrl |= SPI_IMX2_3_CTRL_MODE(config->cs); - msg->state = ERROR_STATE; - tasklet_schedule(&drv_data->pump_transfers); -} + /* set clock speed */ + ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz); -static void dma_tx_handler(int channel, void *data) -{ - struct driver_data *drv_data = data; + /* set chip select to use */ + ctrl |= SPI_IMX2_3_CTRL_CS(config->cs); - dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n"); + ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET; - imx_dma_disable(channel); + cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs); - /* Now waits for TX FIFO empty */ - writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS); -} + if (config->mode & SPI_CPHA) + cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs); -static irqreturn_t dma_transfer(struct driver_data *drv_data) -{ - u32 status; - struct spi_message *msg = drv_data->cur_msg; - void __iomem *regs = drv_data->regs; + if (config->mode & SPI_CPOL) + cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs); - status = readl(regs + SPI_INT_STATUS); + if (config->mode & SPI_CS_HIGH) + cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs); - if ((status & (SPI_INTEN_RO | SPI_STATUS_RO)) - == (SPI_INTEN_RO | SPI_STATUS_RO)) { - writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS); + writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL); + writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG); - imx_dma_disable(drv_data->tx_channel); - imx_dma_disable(drv_data->rx_channel); - unmap_dma_buffers(drv_data); + return 0; +} - flush(drv_data); +static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR; +} - dev_warn(&drv_data->pdev->dev, - "dma_transfer - fifo overun\n"); +static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (spi_imx2_3_rx_available(spi_imx)) + readl(spi_imx->base + MXC_CSPIRXDATA); +} - msg->state = ERROR_STATE; - tasklet_schedule(&drv_data->pump_transfers); +#define MX31_INTREG_TEEN (1 << 0) +#define MX31_INTREG_RREN (1 << 3) - return IRQ_HANDLED; - } +#define MX31_CSPICTRL_ENABLE (1 << 0) +#define MX31_CSPICTRL_MASTER (1 << 1) +#define MX31_CSPICTRL_XCH (1 << 2) +#define MX31_CSPICTRL_POL (1 << 4) +#define MX31_CSPICTRL_PHA (1 << 5) +#define MX31_CSPICTRL_SSCTL (1 << 6) +#define MX31_CSPICTRL_SSPOL (1 << 7) +#define MX31_CSPICTRL_BC_SHIFT 8 +#define MX35_CSPICTRL_BL_SHIFT 20 +#define MX31_CSPICTRL_CS_SHIFT 24 +#define MX35_CSPICTRL_CS_SHIFT 12 +#define MX31_CSPICTRL_DR_SHIFT 16 - if (status & SPI_STATUS_TE) { - writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS); - - if (drv_data->rx) { - /* Wait end of transfer before read trailing data */ - while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) - cpu_relax(); - - imx_dma_disable(drv_data->rx_channel); - unmap_dma_buffers(drv_data); - - /* Release chip select if requested, transfer delays are - handled in pump_transfers() */ - if (drv_data->cs_change) - drv_data->cs_control(SPI_CS_DEASSERT); - - /* Calculate number of trailing data and read them */ - dev_dbg(&drv_data->pdev->dev, - "dma_transfer - test = 0x%08X\n", - readl(regs + SPI_TEST)); - drv_data->rx = drv_data->rx_end - - ((readl(regs + SPI_TEST) & - SPI_TEST_RXCNT) >> - SPI_TEST_RXCNT_LSB)*drv_data->n_bytes; - read(drv_data); - } else { - /* Write only transfer */ - unmap_dma_buffers(drv_data); - - flush(drv_data); - } +#define MX31_CSPISTATUS 0x14 +#define MX31_STATUS_RR (1 << 3) - /* End of transfer, update total byte transfered */ - msg->actual_length += drv_data->len; +/* These functions also work for the i.MX35, but be aware that + * the i.MX35 has a slightly different register layout for bits + * we do not use here. + */ +static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned int val = 0; - /* Move to next transfer */ - msg->state = next_transfer(drv_data); + if (enable & MXC_INT_TE) + val |= MX31_INTREG_TEEN; + if (enable & MXC_INT_RR) + val |= MX31_INTREG_RREN; - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); + writel(val, spi_imx->base + MXC_CSPIINT); +} - return IRQ_HANDLED; - } +static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) +{ + unsigned int reg; - /* Opps problem detected */ - return IRQ_NONE; + reg = readl(spi_imx->base + MXC_CSPICTRL); + reg |= MX31_CSPICTRL_XCH; + writel(reg, spi_imx->base + MXC_CSPICTRL); } -static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data) +static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) { - struct spi_message *msg = drv_data->cur_msg; - void __iomem *regs = drv_data->regs; - u32 status; - irqreturn_t handled = IRQ_NONE; + unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; + + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX31_CSPICTRL_DR_SHIFT; - status = readl(regs + SPI_INT_STATUS); + reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; - if (status & SPI_INTEN_TE) { - /* TXFIFO Empty Interrupt on the last transfered word */ - writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS); - dev_dbg(&drv_data->pdev->dev, - "interrupt_wronly_transfer - end of tx\n"); + if (config->mode & SPI_CPHA) + reg |= MX31_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX31_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX31_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT; - flush(drv_data); + writel(reg, spi_imx->base + MXC_CSPICTRL); - /* Update total byte transfered */ - msg->actual_length += drv_data->len; + return 0; +} - /* Move to next transfer */ - msg->state = next_transfer(drv_data); +static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX31_CSPICTRL_DR_SHIFT; - return IRQ_HANDLED; - } else { - while (status & SPI_STATUS_TH) { - dev_dbg(&drv_data->pdev->dev, - "interrupt_wronly_transfer - status = 0x%08X\n", - status); - - /* Pump data */ - if (write(drv_data)) { - /* End of TXFIFO writes, - now wait until TXFIFO is empty */ - writel(SPI_INTEN_TE, regs + SPI_INT_STATUS); - return IRQ_HANDLED; - } + reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; + reg |= MX31_CSPICTRL_SSCTL; - status = readl(regs + SPI_INT_STATUS); + if (config->mode & SPI_CPHA) + reg |= MX31_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX31_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX31_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT; - /* We did something */ - handled = IRQ_HANDLED; - } - } + writel(reg, spi_imx->base + MXC_CSPICTRL); - return handled; + return 0; } -static irqreturn_t interrupt_transfer(struct driver_data *drv_data) +static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) { - struct spi_message *msg = drv_data->cur_msg; - void __iomem *regs = drv_data->regs; - u32 status, control; - irqreturn_t handled = IRQ_NONE; - unsigned long limit; - - status = readl(regs + SPI_INT_STATUS); - - if (status & SPI_INTEN_TE) { - /* TXFIFO Empty Interrupt on the last transfered word */ - writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS); - dev_dbg(&drv_data->pdev->dev, - "interrupt_transfer - end of tx\n"); - - if (msg->state == ERROR_STATE) { - /* RXFIFO overrun was detected and message aborted */ - flush(drv_data); - } else { - /* Wait for end of transaction */ - do { - control = readl(regs + SPI_CONTROL); - } while (control & SPI_CONTROL_XCH); - - /* Release chip select if requested, transfer delays are - handled in pump_transfers */ - if (drv_data->cs_change) - drv_data->cs_control(SPI_CS_DEASSERT); - - /* Read trailing bytes */ - limit = loops_per_jiffy << 1; - while ((read(drv_data) == 0) && --limit) - cpu_relax(); - - if (limit == 0) - dev_err(&drv_data->pdev->dev, - "interrupt_transfer - " - "trailing byte read failed\n"); - else - dev_dbg(&drv_data->pdev->dev, - "interrupt_transfer - end of rx\n"); - - /* Update total byte transfered */ - msg->actual_length += drv_data->len; - - /* Move to next transfer */ - msg->state = next_transfer(drv_data); - } + return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; +} - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); +static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) + readl(spi_imx->base + MXC_CSPIRXDATA); +} - return IRQ_HANDLED; - } else { - while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) { - dev_dbg(&drv_data->pdev->dev, - "interrupt_transfer - status = 0x%08X\n", - status); - - if (status & SPI_STATUS_RO) { - /* RXFIFO overrun, abort message end wait - until TXFIFO is empty */ - writel(SPI_INTEN_TE, regs + SPI_INT_STATUS); - - dev_warn(&drv_data->pdev->dev, - "interrupt_transfer - fifo overun\n" - " data not yet written = %d\n" - " data not yet read = %d\n", - data_to_write(drv_data), - data_to_read(drv_data)); - - msg->state = ERROR_STATE; - - return IRQ_HANDLED; - } +#define MX27_INTREG_RR (1 << 4) +#define MX27_INTREG_TEEN (1 << 9) +#define MX27_INTREG_RREN (1 << 13) - /* Pump data */ - read(drv_data); - if (write(drv_data)) { - /* End of TXFIFO writes, - now wait until TXFIFO is empty */ - writel(SPI_INTEN_TE, regs + SPI_INT_STATUS); - return IRQ_HANDLED; - } +#define MX27_CSPICTRL_POL (1 << 5) +#define MX27_CSPICTRL_PHA (1 << 6) +#define MX27_CSPICTRL_SSPOL (1 << 8) +#define MX27_CSPICTRL_XCH (1 << 9) +#define MX27_CSPICTRL_ENABLE (1 << 10) +#define MX27_CSPICTRL_MASTER (1 << 11) +#define MX27_CSPICTRL_DR_SHIFT 14 +#define MX27_CSPICTRL_CS_SHIFT 19 - status = readl(regs + SPI_INT_STATUS); +static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned int val = 0; - /* We did something */ - handled = IRQ_HANDLED; - } - } + if (enable & MXC_INT_TE) + val |= MX27_INTREG_TEEN; + if (enable & MXC_INT_RR) + val |= MX27_INTREG_RREN; - return handled; + writel(val, spi_imx->base + MXC_CSPIINT); } -static irqreturn_t spi_int(int irq, void *dev_id) +static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx) { - struct driver_data *drv_data = (struct driver_data *)dev_id; - - if (!drv_data->cur_msg) { - dev_err(&drv_data->pdev->dev, - "spi_int - bad message state\n"); - /* Never fail */ - return IRQ_HANDLED; - } + unsigned int reg; - return drv_data->transfer_handler(drv_data); + reg = readl(spi_imx->base + MXC_CSPICTRL); + reg |= MX27_CSPICTRL_XCH; + writel(reg, spi_imx->base + MXC_CSPICTRL); } -static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate) +static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) { - return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13)); + unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; + + reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << + MX27_CSPICTRL_DR_SHIFT; + reg |= config->bpw - 1; + + if (config->mode & SPI_CPHA) + reg |= MX27_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX27_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX27_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT; + + writel(reg, spi_imx->base + MXC_CSPICTRL); + + return 0; } -static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz) +static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx) { - u32 div; - u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2; - - for (div = SPI_PERCLK2_DIV_MIN; - div <= SPI_PERCLK2_DIV_MAX; - div++, quantized_hz >>= 1) { - if (quantized_hz <= speed_hz) - /* Max available speed LEQ required speed */ - return div << 13; - } - return SPI_CONTROL_DATARATE_BAD; + return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; } -static void pump_transfers(unsigned long data) +static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx) { - struct driver_data *drv_data = (struct driver_data *)data; - struct spi_message *message; - struct spi_transfer *transfer, *previous; - struct chip_data *chip; - void __iomem *regs; - u32 tmp, control; - - dev_dbg(&drv_data->pdev->dev, "pump_transfer\n"); + writel(1, spi_imx->base + MXC_RESET); +} - message = drv_data->cur_msg; +#define MX1_INTREG_RR (1 << 3) +#define MX1_INTREG_TEEN (1 << 8) +#define MX1_INTREG_RREN (1 << 11) - /* Handle for abort */ - if (message->state == ERROR_STATE) { - message->status = -EIO; - giveback(message, drv_data); - return; - } +#define MX1_CSPICTRL_POL (1 << 4) +#define MX1_CSPICTRL_PHA (1 << 5) +#define MX1_CSPICTRL_XCH (1 << 8) +#define MX1_CSPICTRL_ENABLE (1 << 9) +#define MX1_CSPICTRL_MASTER (1 << 10) +#define MX1_CSPICTRL_DR_SHIFT 13 - /* Handle end of message */ - if (message->state == DONE_STATE) { - message->status = 0; - giveback(message, drv_data); - return; - } +static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned int val = 0; - chip = drv_data->cur_chip; - - /* Delay if requested at end of transfer*/ - transfer = drv_data->cur_transfer; - if (message->state == RUNNING_STATE) { - previous = list_entry(transfer->transfer_list.prev, - struct spi_transfer, - transfer_list); - if (previous->delay_usecs) - udelay(previous->delay_usecs); - } else { - /* START_STATE */ - message->state = RUNNING_STATE; - drv_data->cs_control = chip->cs_control; - } + if (enable & MXC_INT_TE) + val |= MX1_INTREG_TEEN; + if (enable & MXC_INT_RR) + val |= MX1_INTREG_RREN; - transfer = drv_data->cur_transfer; - drv_data->tx = (void *)transfer->tx_buf; - drv_data->tx_end = drv_data->tx + transfer->len; - drv_data->rx = transfer->rx_buf; - drv_data->rx_end = drv_data->rx + transfer->len; - drv_data->rx_dma = transfer->rx_dma; - drv_data->tx_dma = transfer->tx_dma; - drv_data->len = transfer->len; - drv_data->cs_change = transfer->cs_change; - drv_data->rd_only = (drv_data->tx == NULL); - - regs = drv_data->regs; - control = readl(regs + SPI_CONTROL); - - /* Bits per word setup */ - tmp = transfer->bits_per_word; - if (tmp == 0) { - /* Use device setup */ - tmp = chip->bits_per_word; - drv_data->n_bytes = chip->n_bytes; - } else - /* Use per-transfer setup */ - drv_data->n_bytes = (tmp <= 8) ? 1 : 2; - u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1); - - /* Speed setup (surely valid because already checked) */ - tmp = transfer->speed_hz; - if (tmp == 0) - tmp = chip->max_speed_hz; - tmp = spi_data_rate(drv_data, tmp); - u32_EDIT(control, SPI_CONTROL_DATARATE, tmp); - - writel(control, regs + SPI_CONTROL); - - /* Assert device chip-select */ - drv_data->cs_control(SPI_CS_ASSERT); - - /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence - if bits_per_word is less or equal 8 PIO transfers are performed. - Moreover DMA is convinient for transfer length bigger than FIFOs - byte size. */ - if ((drv_data->n_bytes == 2) && - (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) && - (map_dma_buffers(drv_data) == 0)) { - dev_dbg(&drv_data->pdev->dev, - "pump dma transfer\n" - " tx = %p\n" - " tx_dma = %08X\n" - " rx = %p\n" - " rx_dma = %08X\n" - " len = %d\n", - drv_data->tx, - (unsigned int)drv_data->tx_dma, - drv_data->rx, - (unsigned int)drv_data->rx_dma, - drv_data->len); - - /* Ensure we have the correct interrupt handler */ - drv_data->transfer_handler = dma_transfer; - - /* Trigger transfer */ - writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH, - regs + SPI_CONTROL); - - /* Setup tx DMA */ - if (drv_data->tx) - /* Linear source address */ - CCR(drv_data->tx_channel) = - CCR_DMOD_FIFO | - CCR_SMOD_LINEAR | - CCR_SSIZ_32 | CCR_DSIZ_16 | - CCR_REN; - else - /* Read only transfer -> fixed source address for - dummy write to achive read */ - CCR(drv_data->tx_channel) = - CCR_DMOD_FIFO | - CCR_SMOD_FIFO | - CCR_SSIZ_32 | CCR_DSIZ_16 | - CCR_REN; - - imx_dma_setup_single( - drv_data->tx_channel, - drv_data->tx_dma, - drv_data->len, - drv_data->rd_data_phys + 4, - DMA_MODE_WRITE); - - if (drv_data->rx) { - /* Setup rx DMA for linear destination address */ - CCR(drv_data->rx_channel) = - CCR_DMOD_LINEAR | - CCR_SMOD_FIFO | - CCR_DSIZ_32 | CCR_SSIZ_16 | - CCR_REN; - imx_dma_setup_single( - drv_data->rx_channel, - drv_data->rx_dma, - drv_data->len, - drv_data->rd_data_phys, - DMA_MODE_READ); - imx_dma_enable(drv_data->rx_channel); - - /* Enable SPI interrupt */ - writel(SPI_INTEN_RO, regs + SPI_INT_STATUS); - - /* Set SPI to request DMA service on both - Rx and Tx half fifo watermark */ - writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA); - } else - /* Write only access -> set SPI to request DMA - service on Tx half fifo watermark */ - writel(SPI_DMA_THDEN, regs + SPI_DMA); - - imx_dma_enable(drv_data->tx_channel); - } else { - dev_dbg(&drv_data->pdev->dev, - "pump pio transfer\n" - " tx = %p\n" - " rx = %p\n" - " len = %d\n", - drv_data->tx, - drv_data->rx, - drv_data->len); - - /* Ensure we have the correct interrupt handler */ - if (drv_data->rx) - drv_data->transfer_handler = interrupt_transfer; - else - drv_data->transfer_handler = interrupt_wronly_transfer; - - /* Enable SPI interrupt */ - if (drv_data->rx) - writel(SPI_INTEN_TH | SPI_INTEN_RO, - regs + SPI_INT_STATUS); - else - writel(SPI_INTEN_TH, regs + SPI_INT_STATUS); - } + writel(val, spi_imx->base + MXC_CSPIINT); } -static void pump_messages(struct work_struct *work) +static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) { - struct driver_data *drv_data = - container_of(work, struct driver_data, work); - unsigned long flags; - - /* Lock queue and check for queue work */ - spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { - drv_data->busy = 0; - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } - - /* Make sure we are not already running a message */ - if (drv_data->cur_msg) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } + unsigned int reg; - /* Extract head of queue */ - drv_data->cur_msg = list_entry(drv_data->queue.next, - struct spi_message, queue); - list_del_init(&drv_data->cur_msg->queue); - drv_data->busy = 1; - spin_unlock_irqrestore(&drv_data->lock, flags); - - /* Initial message state */ - drv_data->cur_msg->state = START_STATE; - drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, - struct spi_transfer, - transfer_list); - - /* Setup the SPI using the per chip configuration */ - drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); - restore_state(drv_data); - - /* Mark as busy and launch transfers */ - tasklet_schedule(&drv_data->pump_transfers); + reg = readl(spi_imx->base + MXC_CSPICTRL); + reg |= MX1_CSPICTRL_XCH; + writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int transfer(struct spi_device *spi, struct spi_message *msg) +static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) { - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - u32 min_speed_hz, max_speed_hz, tmp; - struct spi_transfer *trans; - unsigned long flags; - - msg->actual_length = 0; - - /* Per transfer setup check */ - min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN); - max_speed_hz = spi->max_speed_hz; - list_for_each_entry(trans, &msg->transfers, transfer_list) { - tmp = trans->bits_per_word; - if (tmp > 16) { - dev_err(&drv_data->pdev->dev, - "message rejected : " - "invalid transfer bits_per_word (%d bits)\n", - tmp); - goto msg_rejected; - } - tmp = trans->speed_hz; - if (tmp) { - if (tmp < min_speed_hz) { - dev_err(&drv_data->pdev->dev, - "message rejected : " - "device min speed (%d Hz) exceeds " - "required transfer speed (%d Hz)\n", - min_speed_hz, - tmp); - goto msg_rejected; - } else if (tmp > max_speed_hz) { - dev_err(&drv_data->pdev->dev, - "message rejected : " - "transfer speed (%d Hz) exceeds " - "device max speed (%d Hz)\n", - tmp, - max_speed_hz); - goto msg_rejected; - } - } - } + unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; - /* Message accepted */ - msg->status = -EINPROGRESS; - msg->state = START_STATE; + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX1_CSPICTRL_DR_SHIFT; + reg |= config->bpw - 1; - spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->run == QUEUE_STOPPED) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -ESHUTDOWN; - } + if (config->mode & SPI_CPHA) + reg |= MX1_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX1_CSPICTRL_POL; - list_add_tail(&msg->queue, &drv_data->queue); - if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) - queue_work(drv_data->workqueue, &drv_data->work); + writel(reg, spi_imx->base + MXC_CSPICTRL); - spin_unlock_irqrestore(&drv_data->lock, flags); return 0; +} -msg_rejected: - /* Message rejected and not queued */ - msg->status = -EINVAL; - msg->state = ERROR_STATE; - if (msg->complete) - msg->complete(msg->context); - return -EINVAL; +static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; } -/* On first setup bad values must free chip_data memory since will cause - spi_new_device to fail. Bad value setup from protocol driver are simply not - applied and notified to the calling driver. */ -static int setup(struct spi_device *spi) +static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) { - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - struct spi_imx_chip *chip_info; - struct chip_data *chip; - int first_setup = 0; - u32 tmp; - int status = 0; - - /* Get controller data */ - chip_info = spi->controller_data; - - /* Get controller_state */ - chip = spi_get_ctldata(spi); - if (chip == NULL) { - first_setup = 1; - - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) { - dev_err(&spi->dev, - "setup - cannot allocate controller state\n"); - return -ENOMEM; - } - chip->control = SPI_DEFAULT_CONTROL; - - if (chip_info == NULL) { - /* spi_board_info.controller_data not is supplied */ - chip_info = kzalloc(sizeof(struct spi_imx_chip), - GFP_KERNEL); - if (!chip_info) { - dev_err(&spi->dev, - "setup - " - "cannot allocate controller data\n"); - status = -ENOMEM; - goto err_first_setup; - } - /* Set controller data default value */ - chip_info->enable_loopback = - SPI_DEFAULT_ENABLE_LOOPBACK; - chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA; - chip_info->ins_ss_pulse = 1; - chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT; - chip_info->cs_control = null_cs_control; - } - } + writel(1, spi_imx->base + MXC_RESET); +} - /* Now set controller state based on controller data */ - - if (first_setup) { - /* SPI loopback */ - if (chip_info->enable_loopback) - chip->test = SPI_TEST_LBC; - else - chip->test = 0; - - /* SPI dma driven */ - chip->enable_dma = chip_info->enable_dma; - - /* SPI /SS pulse between spi burst */ - if (chip_info->ins_ss_pulse) - u32_EDIT(chip->control, - SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1); - else - u32_EDIT(chip->control, - SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0); - - /* SPI bclk waits between each bits_per_word spi burst */ - if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) { - dev_err(&spi->dev, - "setup - " - "bclk_wait exceeds max allowed (%d)\n", - SPI_PERIOD_MAX_WAIT); - goto err_first_setup; - } - chip->period = SPI_PERIOD_CSRC_BCLK | - (chip_info->bclk_wait & SPI_PERIOD_WAIT); - } +/* + * These version numbers are taken from the Freescale driver. Unfortunately it + * doesn't support i.MX1, so this entry doesn't match the scheme. :-( + */ +static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = { +#ifdef CONFIG_SPI_IMX_VER_IMX1 + [SPI_IMX_VER_IMX1] = { + .intctrl = mx1_intctrl, + .config = mx1_config, + .trigger = mx1_trigger, + .rx_available = mx1_rx_available, + .reset = mx1_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_0 + [SPI_IMX_VER_0_0] = { + .intctrl = mx27_intctrl, + .config = mx27_config, + .trigger = mx27_trigger, + .rx_available = mx27_rx_available, + .reset = spi_imx0_0_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_4 + [SPI_IMX_VER_0_4] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_4_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_7 + [SPI_IMX_VER_0_7] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_7_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_2_3 + [SPI_IMX_VER_2_3] = { + .intctrl = spi_imx2_3_intctrl, + .config = spi_imx2_3_config, + .trigger = spi_imx2_3_trigger, + .rx_available = spi_imx2_3_rx_available, + .reset = spi_imx2_3_reset, + .fifosize = 64, + }, +#endif +}; - /* SPI mode */ - tmp = spi->mode; - if (tmp & SPI_CS_HIGH) { - u32_EDIT(chip->control, - SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH); - } - switch (tmp & SPI_MODE_3) { - case SPI_MODE_0: - tmp = 0; - break; - case SPI_MODE_1: - tmp = SPI_CONTROL_PHA_1; - break; - case SPI_MODE_2: - tmp = SPI_CONTROL_POL_ACT_LOW; - break; - default: - /* SPI_MODE_3 */ - tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW; - break; - } - u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp); - - /* SPI word width */ - tmp = spi->bits_per_word; - if (tmp > 16) { - status = -EINVAL; - dev_err(&spi->dev, - "setup - " - "invalid bits_per_word (%d)\n", - tmp); - if (first_setup) - goto err_first_setup; - else { - /* Undo setup using chip as backup copy */ - tmp = chip->bits_per_word; - spi->bits_per_word = tmp; - } - } - chip->bits_per_word = tmp; - u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1); - chip->n_bytes = (tmp <= 8) ? 1 : 2; - - /* SPI datarate */ - tmp = spi_data_rate(drv_data, spi->max_speed_hz); - if (tmp == SPI_CONTROL_DATARATE_BAD) { - status = -EINVAL; - dev_err(&spi->dev, - "setup - " - "HW min speed (%d Hz) exceeds required " - "max speed (%d Hz)\n", - spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN), - spi->max_speed_hz); - if (first_setup) - goto err_first_setup; - else - /* Undo setup using chip as backup copy */ - spi->max_speed_hz = chip->max_speed_hz; - } else { - u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp); - /* Actual rounded max_speed_hz */ - tmp = spi_speed_hz(drv_data, tmp); - spi->max_speed_hz = tmp; - chip->max_speed_hz = tmp; - } +static void spi_imx_chipselect(struct spi_device *spi, int is_active) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int gpio = spi_imx->chipselect[spi->chip_select]; + int active = is_active != BITBANG_CS_INACTIVE; + int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); - /* SPI chip-select management */ - if (chip_info->cs_control) - chip->cs_control = chip_info->cs_control; - else - chip->cs_control = null_cs_control; - - /* Save controller_state */ - spi_set_ctldata(spi, chip); - - /* Summary */ - dev_dbg(&spi->dev, - "setup succeded\n" - " loopback enable = %s\n" - " dma enable = %s\n" - " insert /ss pulse = %s\n" - " period wait = %d\n" - " mode = %d\n" - " bits per word = %d\n" - " min speed = %d Hz\n" - " rounded max speed = %d Hz\n", - chip->test & SPI_TEST_LBC ? "Yes" : "No", - chip->enable_dma ? "Yes" : "No", - chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No", - chip->period & SPI_PERIOD_WAIT, - spi->mode, - spi->bits_per_word, - spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN), - spi->max_speed_hz); - return status; - -err_first_setup: - kfree(chip); - return status; + if (gpio < 0) + return; + + gpio_set_value(gpio, dev_is_lowactive ^ active); } -static void cleanup(struct spi_device *spi) +static void spi_imx_push(struct spi_imx_data *spi_imx) { - kfree(spi_get_ctldata(spi)); + while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) { + if (!spi_imx->count) + break; + spi_imx->tx(spi_imx); + spi_imx->txfifo++; + } + + spi_imx->devtype_data.trigger(spi_imx); } -static int __init init_queue(struct driver_data *drv_data) +static irqreturn_t spi_imx_isr(int irq, void *dev_id) { - INIT_LIST_HEAD(&drv_data->queue); - spin_lock_init(&drv_data->lock); + struct spi_imx_data *spi_imx = dev_id; + + while (spi_imx->devtype_data.rx_available(spi_imx)) { + spi_imx->rx(spi_imx); + spi_imx->txfifo--; + } - drv_data->run = QUEUE_STOPPED; - drv_data->busy = 0; + if (spi_imx->count) { + spi_imx_push(spi_imx); + return IRQ_HANDLED; + } - tasklet_init(&drv_data->pump_transfers, - pump_transfers, (unsigned long)drv_data); + if (spi_imx->txfifo) { + /* No data left to push, but still waiting for rx data, + * enable receive data available interrupt. + */ + spi_imx->devtype_data.intctrl( + spi_imx, MXC_INT_RR); + return IRQ_HANDLED; + } - INIT_WORK(&drv_data->work, pump_messages); - drv_data->workqueue = create_singlethread_workqueue( - dev_name(drv_data->master->dev.parent)); - if (drv_data->workqueue == NULL) - return -EBUSY; + spi_imx->devtype_data.intctrl(spi_imx, 0); + complete(&spi_imx->xfer_done); - return 0; + return IRQ_HANDLED; } -static int start_queue(struct driver_data *drv_data) +static int spi_imx_setupxfer(struct spi_device *spi, + struct spi_transfer *t) { - unsigned long flags; + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + struct spi_imx_config config; - spin_lock_irqsave(&drv_data->lock, flags); + config.bpw = t ? t->bits_per_word : spi->bits_per_word; + config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; + config.mode = spi->mode; + config.cs = spi->chip_select; - if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -EBUSY; - } + if (!config.speed_hz) + config.speed_hz = spi->max_speed_hz; + if (!config.bpw) + config.bpw = spi->bits_per_word; + if (!config.speed_hz) + config.speed_hz = spi->max_speed_hz; - drv_data->run = QUEUE_RUNNING; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - drv_data->cur_chip = NULL; - spin_unlock_irqrestore(&drv_data->lock, flags); + /* Initialize the functions for transfer */ + if (config.bpw <= 8) { + spi_imx->rx = spi_imx_buf_rx_u8; + spi_imx->tx = spi_imx_buf_tx_u8; + } else if (config.bpw <= 16) { + spi_imx->rx = spi_imx_buf_rx_u16; + spi_imx->tx = spi_imx_buf_tx_u16; + } else if (config.bpw <= 32) { + spi_imx->rx = spi_imx_buf_rx_u32; + spi_imx->tx = spi_imx_buf_tx_u32; + } else + BUG(); - queue_work(drv_data->workqueue, &drv_data->work); + spi_imx->devtype_data.config(spi_imx, &config); return 0; } -static int stop_queue(struct driver_data *drv_data) +static int spi_imx_transfer(struct spi_device *spi, + struct spi_transfer *transfer) { - unsigned long flags; - unsigned limit = 500; - int status = 0; - - spin_lock_irqsave(&drv_data->lock, flags); - - /* This is a bit lame, but is optimized for the common execution path. - * A wait_queue on the drv_data->busy could be used, but then the common - * execution path (pump_messages) would be required to call wake_up or - * friends on every SPI message. Do this instead */ - drv_data->run = QUEUE_STOPPED; - while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { - spin_unlock_irqrestore(&drv_data->lock, flags); - msleep(10); - spin_lock_irqsave(&drv_data->lock, flags); - } + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + + spi_imx->tx_buf = transfer->tx_buf; + spi_imx->rx_buf = transfer->rx_buf; + spi_imx->count = transfer->len; + spi_imx->txfifo = 0; + + init_completion(&spi_imx->xfer_done); - if (!list_empty(&drv_data->queue) || drv_data->busy) - status = -EBUSY; + spi_imx_push(spi_imx); - spin_unlock_irqrestore(&drv_data->lock, flags); + spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE); - return status; + wait_for_completion(&spi_imx->xfer_done); + + return transfer->len; } -static int destroy_queue(struct driver_data *drv_data) +static int spi_imx_setup(struct spi_device *spi) { - int status; + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int gpio = spi_imx->chipselect[spi->chip_select]; + + dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, + spi->mode, spi->bits_per_word, spi->max_speed_hz); - status = stop_queue(drv_data); - if (status != 0) - return status; + if (gpio >= 0) + gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); - if (drv_data->workqueue) - destroy_workqueue(drv_data->workqueue); + spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); return 0; } -static int __init spi_imx_probe(struct platform_device *pdev) +static void spi_imx_cleanup(struct spi_device *spi) +{ +} + +static struct platform_device_id spi_imx_devtype[] = { + { + .name = "imx1-cspi", + .driver_data = SPI_IMX_VER_IMX1, + }, { + .name = "imx21-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx25-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx27-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx31-cspi", + .driver_data = SPI_IMX_VER_0_4, + }, { + .name = "imx35-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-ecspi", + .driver_data = SPI_IMX_VER_2_3, + }, { + /* sentinel */ + } +}; + +static int __devinit spi_imx_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct spi_imx_master *platform_info; + struct spi_imx_master *mxc_platform_info; struct spi_master *master; - struct driver_data *drv_data; + struct spi_imx_data *spi_imx; struct resource *res; - int irq, status = 0; + int i, ret; - platform_info = dev->platform_data; - if (platform_info == NULL) { - dev_err(&pdev->dev, "probe - no platform data supplied\n"); - status = -ENODEV; - goto err_no_pdata; + mxc_platform_info = dev_get_platdata(&pdev->dev); + if (!mxc_platform_info) { + dev_err(&pdev->dev, "can't get the platform data\n"); + return -EINVAL; } - /* Allocate master with space for drv_data */ - master = spi_alloc_master(dev, sizeof(struct driver_data)); - if (!master) { - dev_err(&pdev->dev, "probe - cannot alloc spi_master\n"); - status = -ENOMEM; - goto err_no_mem; - } - drv_data = spi_master_get_devdata(master); - drv_data->master = master; - drv_data->master_info = platform_info; - drv_data->pdev = pdev; + master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); + if (!master) + return -ENOMEM; - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + platform_set_drvdata(pdev, master); master->bus_num = pdev->id; - master->num_chipselect = platform_info->num_chipselect; - master->dma_alignment = DMA_ALIGNMENT; - master->cleanup = cleanup; - master->setup = setup; - master->transfer = transfer; - - drv_data->dummy_dma_buf = SPI_DUMMY_u32; - - drv_data->clk = clk_get(&pdev->dev, "perclk2"); - if (IS_ERR(drv_data->clk)) { - dev_err(&pdev->dev, "probe - cannot get clock\n"); - status = PTR_ERR(drv_data->clk); - goto err_no_clk; + master->num_chipselect = mxc_platform_info->num_chipselect; + + spi_imx = spi_master_get_devdata(master); + spi_imx->bitbang.master = spi_master_get(master); + spi_imx->chipselect = mxc_platform_info->chipselect; + + for (i = 0; i < master->num_chipselect; i++) { + if (spi_imx->chipselect[i] < 0) + continue; + ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); + if (ret) { + while (i > 0) { + i--; + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); + } + dev_err(&pdev->dev, "can't get cs gpios\n"); + goto out_master_put; + } } - clk_enable(drv_data->clk); - /* Find and map resources */ + spi_imx->bitbang.chipselect = spi_imx_chipselect; + spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; + spi_imx->bitbang.txrx_bufs = spi_imx_transfer; + spi_imx->bitbang.master->setup = spi_imx_setup; + spi_imx->bitbang.master->cleanup = spi_imx_cleanup; + spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + init_completion(&spi_imx->xfer_done); + + spi_imx->devtype_data = + spi_imx_devtype_data[pdev->id_entry->driver_data]; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - dev_err(&pdev->dev, "probe - MEM resources not defined\n"); - status = -ENODEV; - goto err_no_iores; - } - drv_data->ioarea = request_mem_region(res->start, - res->end - res->start + 1, - pdev->name); - if (drv_data->ioarea == NULL) { - dev_err(&pdev->dev, "probe - cannot reserve region\n"); - status = -ENXIO; - goto err_no_iores; - } - drv_data->regs = ioremap(res->start, res->end - res->start + 1); - if (drv_data->regs == NULL) { - dev_err(&pdev->dev, "probe - cannot map IO\n"); - status = -ENXIO; - goto err_no_iomap; - } - drv_data->rd_data_phys = (dma_addr_t)res->start; - - /* Attach to IRQ */ - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "probe - IRQ resource not defined\n"); - status = -ENODEV; - goto err_no_irqres; - } - status = request_irq(irq, spi_int, IRQF_DISABLED, - dev_name(dev), drv_data); - if (status < 0) { - dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status); - goto err_no_irqres; + dev_err(&pdev->dev, "can't get platform resource\n"); + ret = -ENOMEM; + goto out_gpio_free; } - /* Setup DMA if requested */ - drv_data->tx_channel = -1; - drv_data->rx_channel = -1; - if (platform_info->enable_dma) { - /* Get rx DMA channel */ - drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx", - DMA_PRIO_HIGH); - if (drv_data->rx_channel < 0) { - dev_err(dev, - "probe - problem (%d) requesting rx channel\n", - drv_data->rx_channel); - goto err_no_rxdma; - } else - imx_dma_setup_handlers(drv_data->rx_channel, NULL, - dma_err_handler, drv_data); - - /* Get tx DMA channel */ - drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx", - DMA_PRIO_MEDIUM); - if (drv_data->tx_channel < 0) { - dev_err(dev, - "probe - problem (%d) requesting tx channel\n", - drv_data->tx_channel); - imx_dma_free(drv_data->rx_channel); - goto err_no_txdma; - } else - imx_dma_setup_handlers(drv_data->tx_channel, - dma_tx_handler, dma_err_handler, - drv_data); - - /* Set request source and burst length for allocated channels */ - switch (drv_data->pdev->id) { - case 1: - /* Using SPI1 */ - RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R; - RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T; - break; - case 2: - /* Using SPI2 */ - RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R; - RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T; - break; - default: - dev_err(dev, "probe - bad SPI Id\n"); - imx_dma_free(drv_data->rx_channel); - imx_dma_free(drv_data->tx_channel); - status = -ENODEV; - goto err_no_devid; - } - BLR(drv_data->rx_channel) = SPI_DMA_BLR; - BLR(drv_data->tx_channel) = SPI_DMA_BLR; + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "request_mem_region failed\n"); + ret = -EBUSY; + goto out_gpio_free; } - /* Load default SPI configuration */ - writel(SPI_RESET_START, drv_data->regs + SPI_RESET); - writel(0, drv_data->regs + SPI_RESET); - writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL); - - /* Initial and start queue */ - status = init_queue(drv_data); - if (status != 0) { - dev_err(&pdev->dev, "probe - problem initializing queue\n"); - goto err_init_queue; - } - status = start_queue(drv_data); - if (status != 0) { - dev_err(&pdev->dev, "probe - problem starting queue\n"); - goto err_start_queue; + spi_imx->base = ioremap(res->start, resource_size(res)); + if (!spi_imx->base) { + ret = -EINVAL; + goto out_release_mem; } - /* Register with the SPI framework */ - platform_set_drvdata(pdev, drv_data); - status = spi_register_master(master); - if (status != 0) { - dev_err(&pdev->dev, "probe - problem registering spi master\n"); - goto err_spi_register; + spi_imx->irq = platform_get_irq(pdev, 0); + if (spi_imx->irq < 0) { + ret = -EINVAL; + goto out_iounmap; } - dev_dbg(dev, "probe succeded\n"); - return 0; - -err_init_queue: -err_start_queue: -err_spi_register: - destroy_queue(drv_data); - -err_no_rxdma: -err_no_txdma: -err_no_devid: - free_irq(irq, drv_data); - -err_no_irqres: - iounmap(drv_data->regs); - -err_no_iomap: - release_resource(drv_data->ioarea); - kfree(drv_data->ioarea); - -err_no_iores: - clk_disable(drv_data->clk); - clk_put(drv_data->clk); - -err_no_clk: - spi_master_put(master); - -err_no_pdata: -err_no_mem: - return status; -} - -static int __exit spi_imx_remove(struct platform_device *pdev) -{ - struct driver_data *drv_data = platform_get_drvdata(pdev); - int irq; - int status = 0; - - if (!drv_data) - return 0; - - tasklet_kill(&drv_data->pump_transfers); - - /* Remove the queue */ - status = destroy_queue(drv_data); - if (status != 0) { - dev_err(&pdev->dev, "queue remove failed (%d)\n", status); - return status; + ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); + if (ret) { + dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); + goto out_iounmap; } - /* Reset SPI */ - writel(SPI_RESET_START, drv_data->regs + SPI_RESET); - writel(0, drv_data->regs + SPI_RESET); - - /* Release DMA */ - if (drv_data->master_info->enable_dma) { - RSSR(drv_data->rx_channel) = 0; - RSSR(drv_data->tx_channel) = 0; - imx_dma_free(drv_data->tx_channel); - imx_dma_free(drv_data->rx_channel); + spi_imx->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(spi_imx->clk)) { + dev_err(&pdev->dev, "unable to get clock\n"); + ret = PTR_ERR(spi_imx->clk); + goto out_free_irq; } - /* Release IRQ */ - irq = platform_get_irq(pdev, 0); - if (irq >= 0) - free_irq(irq, drv_data); + clk_enable(spi_imx->clk); + spi_imx->spi_clk = clk_get_rate(spi_imx->clk); - clk_disable(drv_data->clk); - clk_put(drv_data->clk); + spi_imx->devtype_data.reset(spi_imx); - /* Release map resources */ - iounmap(drv_data->regs); - release_resource(drv_data->ioarea); - kfree(drv_data->ioarea); + spi_imx->devtype_data.intctrl(spi_imx, 0); - /* Disconnect from the SPI framework */ - spi_unregister_master(drv_data->master); - spi_master_put(drv_data->master); + ret = spi_bitbang_start(&spi_imx->bitbang); + if (ret) { + dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); + goto out_clk_put; + } - /* Prevent double remove */ - platform_set_drvdata(pdev, NULL); + dev_info(&pdev->dev, "probed\n"); - dev_dbg(&pdev->dev, "remove succeded\n"); + return ret; - return 0; +out_clk_put: + clk_disable(spi_imx->clk); + clk_put(spi_imx->clk); +out_free_irq: + free_irq(spi_imx->irq, spi_imx); +out_iounmap: + iounmap(spi_imx->base); +out_release_mem: + release_mem_region(res->start, resource_size(res)); +out_gpio_free: + for (i = 0; i < master->num_chipselect; i++) + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); +out_master_put: + spi_master_put(master); + kfree(master); + platform_set_drvdata(pdev, NULL); + return ret; } -static void spi_imx_shutdown(struct platform_device *pdev) +static int __devexit spi_imx_remove(struct platform_device *pdev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct spi_imx_data *spi_imx = spi_master_get_devdata(master); + int i; - /* Reset SPI */ - writel(SPI_RESET_START, drv_data->regs + SPI_RESET); - writel(0, drv_data->regs + SPI_RESET); + spi_bitbang_stop(&spi_imx->bitbang); - dev_dbg(&pdev->dev, "shutdown succeded\n"); -} + writel(0, spi_imx->base + MXC_CSPICTRL); + clk_disable(spi_imx->clk); + clk_put(spi_imx->clk); + free_irq(spi_imx->irq, spi_imx); + iounmap(spi_imx->base); -#ifdef CONFIG_PM + for (i = 0; i < master->num_chipselect; i++) + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); -static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct driver_data *drv_data = platform_get_drvdata(pdev); - int status = 0; + spi_master_put(master); - status = stop_queue(drv_data); - if (status != 0) { - dev_warn(&pdev->dev, "suspend cannot stop queue\n"); - return status; - } + release_mem_region(res->start, resource_size(res)); - dev_dbg(&pdev->dev, "suspended\n"); + platform_set_drvdata(pdev, NULL); return 0; } -static int spi_imx_resume(struct platform_device *pdev) -{ - struct driver_data *drv_data = platform_get_drvdata(pdev); - int status = 0; - - /* Start the queue running */ - status = start_queue(drv_data); - if (status != 0) - dev_err(&pdev->dev, "problem starting queue (%d)\n", status); - else - dev_dbg(&pdev->dev, "resumed\n"); - - return status; -} -#else -#define spi_imx_suspend NULL -#define spi_imx_resume NULL -#endif /* CONFIG_PM */ - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:spi_imx"); - -static struct platform_driver driver = { +static struct platform_driver spi_imx_driver = { .driver = { - .name = "spi_imx", - .owner = THIS_MODULE, - }, - .remove = __exit_p(spi_imx_remove), - .shutdown = spi_imx_shutdown, - .suspend = spi_imx_suspend, - .resume = spi_imx_resume, + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, + .id_table = spi_imx_devtype, + .probe = spi_imx_probe, + .remove = __devexit_p(spi_imx_remove), }; static int __init spi_imx_init(void) { - return platform_driver_probe(&driver, spi_imx_probe); + return platform_driver_register(&spi_imx_driver); } -module_init(spi_imx_init); static void __exit spi_imx_exit(void) { - platform_driver_unregister(&driver); + platform_driver_unregister(&spi_imx_driver); } + +module_init(spi_imx_init); module_exit(spi_imx_exit); -MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>"); -MODULE_DESCRIPTION("iMX SPI Controller Driver"); +MODULE_DESCRIPTION("SPI Master Controller driver"); +MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c index 568c781ad91..7746a41ab6d 100644 --- a/drivers/spi/spi_lm70llp.c +++ b/drivers/spi/spi_lm70llp.c @@ -174,8 +174,7 @@ static inline int getmiso(struct spi_device *s) } /*--------------------------------------------------------------------*/ -#define EXPAND_BITBANG_TXRX 1 -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static void lm70_chipselect(struct spi_device *spi, int value) { @@ -192,7 +191,7 @@ static void lm70_chipselect(struct spi_device *spi, int value) */ static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static void spi_lm70llp_attach(struct parport *p) diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c deleted file mode 100644 index 0fd0ec4d3a7..00000000000 --- a/drivers/spi/spi_mpc8xxx.c +++ /dev/null @@ -1,945 +0,0 @@ -/* - * MPC8xxx SPI controller driver. - * - * Maintainer: Kumar Gala - * - * Copyright (C) 2006 Polycom, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/bug.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/completion.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/device.h> -#include <linux/spi/spi.h> -#include <linux/spi/spi_bitbang.h> -#include <linux/platform_device.h> -#include <linux/fsl_devices.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <linux/gpio.h> -#include <linux/of_gpio.h> -#include <linux/of_spi.h> - -#include <sysdev/fsl_soc.h> -#include <asm/irq.h> - -/* SPI Controller registers */ -struct mpc8xxx_spi_reg { - u8 res1[0x20]; - __be32 mode; - __be32 event; - __be32 mask; - __be32 command; - __be32 transmit; - __be32 receive; -}; - -/* SPI Controller mode register definitions */ -#define SPMODE_LOOP (1 << 30) -#define SPMODE_CI_INACTIVEHIGH (1 << 29) -#define SPMODE_CP_BEGIN_EDGECLK (1 << 28) -#define SPMODE_DIV16 (1 << 27) -#define SPMODE_REV (1 << 26) -#define SPMODE_MS (1 << 25) -#define SPMODE_ENABLE (1 << 24) -#define SPMODE_LEN(x) ((x) << 20) -#define SPMODE_PM(x) ((x) << 16) -#define SPMODE_OP (1 << 14) -#define SPMODE_CG(x) ((x) << 7) - -/* - * Default for SPI Mode: - * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk - */ -#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ - SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) - -/* SPIE register values */ -#define SPIE_NE 0x00000200 /* Not empty */ -#define SPIE_NF 0x00000100 /* Not full */ - -/* SPIM register values */ -#define SPIM_NE 0x00000200 /* Not empty */ -#define SPIM_NF 0x00000100 /* Not full */ - -/* SPI Controller driver's private data. */ -struct mpc8xxx_spi { - struct mpc8xxx_spi_reg __iomem *base; - - /* rx & tx bufs from the spi_transfer */ - const void *tx; - void *rx; - - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32(*get_tx) (struct mpc8xxx_spi *); - - unsigned int count; - unsigned int irq; - - unsigned nsecs; /* (clock cycle time)/2 */ - - u32 spibrg; /* SPIBRG input clock */ - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - - bool qe_mode; - - struct workqueue_struct *workqueue; - struct work_struct work; - - struct list_head queue; - spinlock_t lock; - - struct completion done; -}; - -struct spi_mpc8xxx_cs { - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32 (*get_tx) (struct mpc8xxx_spi *); - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - u32 hw_mode; /* Holds HW mode register settings */ -}; - -static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) -{ - out_be32(reg, val); -} - -static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) -{ - return in_be32(reg); -} - -#define MPC83XX_SPI_RX_BUF(type) \ -static \ -void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - type *rx = mpc8xxx_spi->rx; \ - *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ - mpc8xxx_spi->rx = rx; \ -} - -#define MPC83XX_SPI_TX_BUF(type) \ -static \ -u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - u32 data; \ - const type *tx = mpc8xxx_spi->tx; \ - if (!tx) \ - return 0; \ - data = *tx++ << mpc8xxx_spi->tx_shift; \ - mpc8xxx_spi->tx = tx; \ - return data; \ -} - -MPC83XX_SPI_RX_BUF(u8) -MPC83XX_SPI_RX_BUF(u16) -MPC83XX_SPI_RX_BUF(u32) -MPC83XX_SPI_TX_BUF(u8) -MPC83XX_SPI_TX_BUF(u16) -MPC83XX_SPI_TX_BUF(u32) - -static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; - bool pol = spi->mode & SPI_CS_HIGH; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - if (value == BITBANG_CS_INACTIVE) { - if (pdata->cs_control) - pdata->cs_control(spi, !pol); - } - - if (value == BITBANG_CS_ACTIVE) { - u32 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); - - mpc8xxx_spi->rx_shift = cs->rx_shift; - mpc8xxx_spi->tx_shift = cs->tx_shift; - mpc8xxx_spi->get_rx = cs->get_rx; - mpc8xxx_spi->get_tx = cs->get_tx; - - if (cs->hw_mode != regval) { - unsigned long flags; - __be32 __iomem *mode = &mpc8xxx_spi->base->mode; - - regval = cs->hw_mode; - /* Turn off IRQs locally to minimize time that - * SPI is disabled - */ - local_irq_save(flags); - /* Turn off SPI unit prior changing mode */ - mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE); - mpc8xxx_spi_write_reg(mode, regval); - local_irq_restore(flags); - } - if (pdata->cs_control) - pdata->cs_control(spi, pol); - } -} - -static -int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - u32 regval; - u8 bits_per_word, pm; - u32 hz; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - mpc8xxx_spi = spi_master_get_devdata(spi->master); - - if (t) { - bits_per_word = t->bits_per_word; - hz = t->speed_hz; - } else { - bits_per_word = 0; - hz = 0; - } - - /* spi_transfer level calls that work per-word */ - if (!bits_per_word) - bits_per_word = spi->bits_per_word; - - /* Make sure its a bit width we support [4..16, 32] */ - if ((bits_per_word < 4) - || ((bits_per_word > 16) && (bits_per_word != 32))) - return -EINVAL; - - if (!hz) - hz = spi->max_speed_hz; - - cs->rx_shift = 0; - cs->tx_shift = 0; - if (bits_per_word <= 8) { - cs->get_rx = mpc8xxx_spi_rx_buf_u8; - cs->get_tx = mpc8xxx_spi_tx_buf_u8; - if (mpc8xxx_spi->qe_mode) { - cs->rx_shift = 16; - cs->tx_shift = 24; - } - } else if (bits_per_word <= 16) { - cs->get_rx = mpc8xxx_spi_rx_buf_u16; - cs->get_tx = mpc8xxx_spi_tx_buf_u16; - if (mpc8xxx_spi->qe_mode) { - cs->rx_shift = 16; - cs->tx_shift = 16; - } - } else if (bits_per_word <= 32) { - cs->get_rx = mpc8xxx_spi_rx_buf_u32; - cs->get_tx = mpc8xxx_spi_tx_buf_u32; - } else - return -EINVAL; - - if (mpc8xxx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) { - cs->tx_shift = 0; - if (bits_per_word <= 8) - cs->rx_shift = 8; - else - cs->rx_shift = 0; - } - - mpc8xxx_spi->rx_shift = cs->rx_shift; - mpc8xxx_spi->tx_shift = cs->tx_shift; - mpc8xxx_spi->get_rx = cs->get_rx; - mpc8xxx_spi->get_tx = cs->get_tx; - - if (bits_per_word == 32) - bits_per_word = 0; - else - bits_per_word = bits_per_word - 1; - - /* mask out bits we are going to set */ - cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16 - | SPMODE_PM(0xF)); - - cs->hw_mode |= SPMODE_LEN(bits_per_word); - - if ((mpc8xxx_spi->spibrg / hz) > 64) { - cs->hw_mode |= SPMODE_DIV16; - pm = mpc8xxx_spi->spibrg / (hz * 64); - - WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " - "Will use %d Hz instead.\n", dev_name(&spi->dev), - hz, mpc8xxx_spi->spibrg / 1024); - if (pm > 16) - pm = 16; - } else - pm = mpc8xxx_spi->spibrg / (hz * 4); - if (pm) - pm--; - - cs->hw_mode |= SPMODE_PM(pm); - regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); - if (cs->hw_mode != regval) { - unsigned long flags; - __be32 __iomem *mode = &mpc8xxx_spi->base->mode; - - regval = cs->hw_mode; - /* Turn off IRQs locally to minimize time - * that SPI is disabled - */ - local_irq_save(flags); - /* Turn off SPI unit prior changing mode */ - mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE); - mpc8xxx_spi_write_reg(mode, regval); - local_irq_restore(flags); - } - return 0; -} - -static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - u32 word, len, bits_per_word; - - mpc8xxx_spi = spi_master_get_devdata(spi->master); - - mpc8xxx_spi->tx = t->tx_buf; - mpc8xxx_spi->rx = t->rx_buf; - bits_per_word = spi->bits_per_word; - if (t->bits_per_word) - bits_per_word = t->bits_per_word; - len = t->len; - if (bits_per_word > 8) { - /* invalid length? */ - if (len & 1) - return -EINVAL; - len /= 2; - } - if (bits_per_word > 16) { - /* invalid length? */ - if (len & 1) - return -EINVAL; - len /= 2; - } - mpc8xxx_spi->count = len; - - INIT_COMPLETION(mpc8xxx_spi->done); - - /* enable rx ints */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, SPIM_NE); - - /* transmit word */ - word = mpc8xxx_spi->get_tx(mpc8xxx_spi); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); - - wait_for_completion(&mpc8xxx_spi->done); - - /* disable rx ints */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); - - return mpc8xxx_spi->count; -} - -static void mpc8xxx_spi_do_one_msg(struct spi_message *m) -{ - struct spi_device *spi = m->spi; - struct spi_transfer *t; - unsigned int cs_change; - const int nsecs = 50; - int status; - - cs_change = 1; - status = 0; - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - /* Don't allow changes if CS is active */ - status = -EINVAL; - - if (cs_change) - status = mpc8xxx_spi_setup_transfer(spi, t); - if (status < 0) - break; - } - - if (cs_change) { - mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE); - ndelay(nsecs); - } - cs_change = t->cs_change; - if (t->len) - status = mpc8xxx_spi_bufs(spi, t); - if (status) { - status = -EMSGSIZE; - break; - } - m->actual_length += t->len; - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (cs_change) { - ndelay(nsecs); - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(nsecs); - } - } - - m->status = status; - m->complete(m->context); - - if (status || !cs_change) { - ndelay(nsecs); - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); - } - - mpc8xxx_spi_setup_transfer(spi, NULL); -} - -static void mpc8xxx_spi_work(struct work_struct *work) -{ - struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, - work); - - spin_lock_irq(&mpc8xxx_spi->lock); - while (!list_empty(&mpc8xxx_spi->queue)) { - struct spi_message *m = container_of(mpc8xxx_spi->queue.next, - struct spi_message, queue); - - list_del_init(&m->queue); - spin_unlock_irq(&mpc8xxx_spi->lock); - - mpc8xxx_spi_do_one_msg(m); - - spin_lock_irq(&mpc8xxx_spi->lock); - } - spin_unlock_irq(&mpc8xxx_spi->lock); -} - -static int mpc8xxx_spi_setup(struct spi_device *spi) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - int retval; - u32 hw_mode; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - if (!spi->max_speed_hz) - return -EINVAL; - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - spi->controller_state = cs; - } - mpc8xxx_spi = spi_master_get_devdata(spi->master); - - hw_mode = cs->hw_mode; /* Save orginal settings */ - cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); - /* mask out bits we are going to set */ - cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH - | SPMODE_REV | SPMODE_LOOP); - - if (spi->mode & SPI_CPHA) - cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK; - if (spi->mode & SPI_CPOL) - cs->hw_mode |= SPMODE_CI_INACTIVEHIGH; - if (!(spi->mode & SPI_LSB_FIRST)) - cs->hw_mode |= SPMODE_REV; - if (spi->mode & SPI_LOOP) - cs->hw_mode |= SPMODE_LOOP; - - retval = mpc8xxx_spi_setup_transfer(spi, NULL); - if (retval < 0) { - cs->hw_mode = hw_mode; /* Restore settings */ - return retval; - } - return 0; -} - -static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) -{ - struct mpc8xxx_spi *mpc8xxx_spi = context_data; - u32 event; - irqreturn_t ret = IRQ_NONE; - - /* Get interrupt events(tx/rx) */ - event = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event); - - /* We need handle RX first */ - if (event & SPIE_NE) { - u32 rx_data = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->receive); - - if (mpc8xxx_spi->rx) - mpc8xxx_spi->get_rx(rx_data, mpc8xxx_spi); - - ret = IRQ_HANDLED; - } - - if ((event & SPIE_NF) == 0) - /* spin until TX is done */ - while (((event = - mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event)) & - SPIE_NF) == 0) - cpu_relax(); - - mpc8xxx_spi->count -= 1; - if (mpc8xxx_spi->count) { - u32 word = mpc8xxx_spi->get_tx(mpc8xxx_spi); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); - } else { - complete(&mpc8xxx_spi->done); - } - - /* Clear the events */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, event); - - return ret; -} -static int mpc8xxx_spi_transfer(struct spi_device *spi, - struct spi_message *m) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&mpc8xxx_spi->lock, flags); - list_add_tail(&m->queue, &mpc8xxx_spi->queue); - queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); - spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); - - return 0; -} - - -static void mpc8xxx_spi_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -static struct spi_master * __devinit -mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct spi_master *master; - struct mpc8xxx_spi *mpc8xxx_spi; - u32 regval; - int ret = 0; - - master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); - if (master == NULL) { - ret = -ENOMEM; - goto err; - } - - dev_set_drvdata(dev, master); - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH - | SPI_LSB_FIRST | SPI_LOOP; - - master->setup = mpc8xxx_spi_setup; - master->transfer = mpc8xxx_spi_transfer; - master->cleanup = mpc8xxx_spi_cleanup; - - mpc8xxx_spi = spi_master_get_devdata(master); - mpc8xxx_spi->qe_mode = pdata->qe_mode; - mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; - mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; - mpc8xxx_spi->spibrg = pdata->sysclk; - - mpc8xxx_spi->rx_shift = 0; - mpc8xxx_spi->tx_shift = 0; - if (mpc8xxx_spi->qe_mode) { - mpc8xxx_spi->rx_shift = 16; - mpc8xxx_spi->tx_shift = 24; - } - - init_completion(&mpc8xxx_spi->done); - - mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1); - if (mpc8xxx_spi->base == NULL) { - ret = -ENOMEM; - goto put_master; - } - - mpc8xxx_spi->irq = irq; - - /* Register for SPI Interrupt */ - ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq, - 0, "mpc8xxx_spi", mpc8xxx_spi); - - if (ret != 0) - goto unmap_io; - - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->max_chipselect; - - /* SPI controller initializations */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff); - - /* Enable SPI interface */ - regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; - if (pdata->qe_mode) - regval |= SPMODE_OP; - - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); - spin_lock_init(&mpc8xxx_spi->lock); - init_completion(&mpc8xxx_spi->done); - INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); - INIT_LIST_HEAD(&mpc8xxx_spi->queue); - - mpc8xxx_spi->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (mpc8xxx_spi->workqueue == NULL) { - ret = -EBUSY; - goto free_irq; - } - - ret = spi_register_master(master); - if (ret < 0) - goto unreg_master; - - printk(KERN_INFO - "%s: MPC8xxx SPI Controller driver at 0x%p (irq = %d)\n", - dev_name(dev), mpc8xxx_spi->base, mpc8xxx_spi->irq); - - return master; - -unreg_master: - destroy_workqueue(mpc8xxx_spi->workqueue); -free_irq: - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); -unmap_io: - iounmap(mpc8xxx_spi->base); -put_master: - spi_master_put(master); -err: - return ERR_PTR(ret); -} - -static int __devexit mpc8xxx_spi_remove(struct device *dev) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - struct spi_master *master; - - master = dev_get_drvdata(dev); - mpc8xxx_spi = spi_master_get_devdata(master); - - flush_workqueue(mpc8xxx_spi->workqueue); - destroy_workqueue(mpc8xxx_spi->workqueue); - spi_unregister_master(master); - - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); - iounmap(mpc8xxx_spi->base); - - return 0; -} - -struct mpc8xxx_spi_probe_info { - struct fsl_spi_platform_data pdata; - int *gpios; - bool *alow_flags; -}; - -static struct mpc8xxx_spi_probe_info * -to_of_pinfo(struct fsl_spi_platform_data *pdata) -{ - return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); -} - -static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) -{ - struct device *dev = spi->dev.parent; - struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); - u16 cs = spi->chip_select; - int gpio = pinfo->gpios[cs]; - bool alow = pinfo->alow_flags[cs]; - - gpio_set_value(gpio, on ^ alow); -} - -static int of_mpc8xxx_spi_get_chipselects(struct device *dev) -{ - struct device_node *np = dev_archdata_get_node(&dev->archdata); - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); - unsigned int ngpios; - int i = 0; - int ret; - - ngpios = of_gpio_count(np); - if (!ngpios) { - /* - * SPI w/o chip-select line. One SPI device is still permitted - * though. - */ - pdata->max_chipselect = 1; - return 0; - } - - pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL); - if (!pinfo->gpios) - return -ENOMEM; - memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios)); - - pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags), - GFP_KERNEL); - if (!pinfo->alow_flags) { - ret = -ENOMEM; - goto err_alloc_flags; - } - - for (; i < ngpios; i++) { - int gpio; - enum of_gpio_flags flags; - - gpio = of_get_gpio_flags(np, i, &flags); - if (!gpio_is_valid(gpio)) { - dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); - goto err_loop; - } - - ret = gpio_request(gpio, dev_name(dev)); - if (ret) { - dev_err(dev, "can't request gpio #%d: %d\n", i, ret); - goto err_loop; - } - - pinfo->gpios[i] = gpio; - pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW; - - ret = gpio_direction_output(pinfo->gpios[i], - pinfo->alow_flags[i]); - if (ret) { - dev_err(dev, "can't set output direction for gpio " - "#%d: %d\n", i, ret); - goto err_loop; - } - } - - pdata->max_chipselect = ngpios; - pdata->cs_control = mpc8xxx_spi_cs_control; - - return 0; - -err_loop: - while (i >= 0) { - if (gpio_is_valid(pinfo->gpios[i])) - gpio_free(pinfo->gpios[i]); - i--; - } - - kfree(pinfo->alow_flags); - pinfo->alow_flags = NULL; -err_alloc_flags: - kfree(pinfo->gpios); - pinfo->gpios = NULL; - return ret; -} - -static int of_mpc8xxx_spi_free_chipselects(struct device *dev) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); - int i; - - if (!pinfo->gpios) - return 0; - - for (i = 0; i < pdata->max_chipselect; i++) { - if (gpio_is_valid(pinfo->gpios[i])) - gpio_free(pinfo->gpios[i]); - } - - kfree(pinfo->gpios); - kfree(pinfo->alow_flags); - return 0; -} - -static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev, - const struct of_device_id *ofid) -{ - struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->node; - struct mpc8xxx_spi_probe_info *pinfo; - struct fsl_spi_platform_data *pdata; - struct spi_master *master; - struct resource mem; - struct resource irq; - const void *prop; - int ret = -ENOMEM; - - pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); - if (!pinfo) - return -ENOMEM; - - pdata = &pinfo->pdata; - dev->platform_data = pdata; - - /* Allocate bus num dynamically. */ - pdata->bus_num = -1; - - /* SPI controller is either clocked from QE or SoC clock. */ - pdata->sysclk = get_brgfreq(); - if (pdata->sysclk == -1) { - pdata->sysclk = fsl_get_sys_freq(); - if (pdata->sysclk == -1) { - ret = -ENODEV; - goto err_clk; - } - } - - prop = of_get_property(np, "mode", NULL); - if (prop && !strcmp(prop, "cpu-qe")) - pdata->qe_mode = 1; - - ret = of_mpc8xxx_spi_get_chipselects(dev); - if (ret) - goto err; - - ret = of_address_to_resource(np, 0, &mem); - if (ret) - goto err; - - ret = of_irq_to_resource(np, 0, &irq); - if (!ret) { - ret = -EINVAL; - goto err; - } - - master = mpc8xxx_spi_probe(dev, &mem, irq.start); - if (IS_ERR(master)) { - ret = PTR_ERR(master); - goto err; - } - - of_register_spi_devices(master, np); - - return 0; - -err: - of_mpc8xxx_spi_free_chipselects(dev); -err_clk: - kfree(pinfo); - return ret; -} - -static int __devexit of_mpc8xxx_spi_remove(struct of_device *ofdev) -{ - int ret; - - ret = mpc8xxx_spi_remove(&ofdev->dev); - if (ret) - return ret; - of_mpc8xxx_spi_free_chipselects(&ofdev->dev); - return 0; -} - -static const struct of_device_id of_mpc8xxx_spi_match[] = { - { .compatible = "fsl,spi" }, - {}, -}; -MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); - -static struct of_platform_driver of_mpc8xxx_spi_driver = { - .name = "mpc8xxx_spi", - .match_table = of_mpc8xxx_spi_match, - .probe = of_mpc8xxx_spi_probe, - .remove = __devexit_p(of_mpc8xxx_spi_remove), -}; - -#ifdef CONFIG_MPC832x_RDB -/* - * XXX XXX XXX - * This is "legacy" platform driver, was used by the MPC8323E-RDB boards - * only. The driver should go away soon, since newer MPC8323E-RDB's device - * tree can work with OpenFirmware driver. But for now we support old trees - * as well. - */ -static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) -{ - struct resource *mem; - unsigned int irq; - struct spi_master *master; - - if (!pdev->dev.platform_data) - return -EINVAL; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -EINVAL; - - irq = platform_get_irq(pdev, 0); - if (!irq) - return -EINVAL; - - master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); - if (IS_ERR(master)) - return PTR_ERR(master); - return 0; -} - -static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev) -{ - return mpc8xxx_spi_remove(&pdev->dev); -} - -MODULE_ALIAS("platform:mpc8xxx_spi"); -static struct platform_driver mpc8xxx_spi_driver = { - .probe = plat_mpc8xxx_spi_probe, - .remove = __exit_p(plat_mpc8xxx_spi_remove), - .driver = { - .name = "mpc8xxx_spi", - .owner = THIS_MODULE, - }, -}; - -static bool legacy_driver_failed; - -static void __init legacy_driver_register(void) -{ - legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver); -} - -static void __exit legacy_driver_unregister(void) -{ - if (legacy_driver_failed) - return; - platform_driver_unregister(&mpc8xxx_spi_driver); -} -#else -static void __init legacy_driver_register(void) {} -static void __exit legacy_driver_unregister(void) {} -#endif /* CONFIG_MPC832x_RDB */ - -static int __init mpc8xxx_spi_init(void) -{ - legacy_driver_register(); - return of_register_platform_driver(&of_mpc8xxx_spi_driver); -} - -static void __exit mpc8xxx_spi_exit(void) -{ - of_unregister_platform_driver(&of_mpc8xxx_spi_driver); - legacy_driver_unregister(); -} - -module_init(mpc8xxx_spi_init); -module_exit(mpc8xxx_spi_exit); - -MODULE_AUTHOR("Kumar Gala"); -MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c new file mode 100644 index 00000000000..d5be18b3078 --- /dev/null +++ b/drivers/spi/spi_nuc900.c @@ -0,0 +1,505 @@ +/* linux/drivers/spi/spi_nuc900.c + * + * Copyright (c) 2009 Nuvoton technology. + * Wan ZongShun <mcuos.com@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * +*/ + +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/io.h> +#include <linux/slab.h> + +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> + +#include <mach/nuc900_spi.h> + +/* usi registers offset */ +#define USI_CNT 0x00 +#define USI_DIV 0x04 +#define USI_SSR 0x08 +#define USI_RX0 0x10 +#define USI_TX0 0x10 + +/* usi register bit */ +#define ENINT (0x01 << 17) +#define ENFLG (0x01 << 16) +#define TXNUM (0x03 << 8) +#define TXNEG (0x01 << 2) +#define RXNEG (0x01 << 1) +#define LSB (0x01 << 10) +#define SELECTLEV (0x01 << 2) +#define SELECTPOL (0x01 << 31) +#define SELECTSLAVE 0x01 +#define GOBUSY 0x01 + +struct nuc900_spi { + struct spi_bitbang bitbang; + struct completion done; + void __iomem *regs; + int irq; + int len; + int count; + const unsigned char *tx; + unsigned char *rx; + struct clk *clk; + struct resource *ioarea; + struct spi_master *master; + struct spi_device *curdev; + struct device *dev; + struct nuc900_spi_info *pdata; + spinlock_t lock; + struct resource *res; +}; + +static inline struct nuc900_spi *to_hw(struct spi_device *sdev) +{ + return spi_master_get_devdata(sdev->master); +} + +static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr) +{ + struct nuc900_spi *hw = to_hw(spi); + unsigned int val; + unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0; + unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_SSR); + + if (!cs) + val &= ~SELECTLEV; + else + val |= SELECTLEV; + + if (!ssr) + val &= ~SELECTSLAVE; + else + val |= SELECTSLAVE; + + __raw_writel(val, hw->regs + USI_SSR); + + val = __raw_readl(hw->regs + USI_CNT); + + if (!cpol) + val &= ~SELECTPOL; + else + val |= SELECTPOL; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_spi_chipsel(struct spi_device *spi, int value) +{ + switch (value) { + case BITBANG_CS_INACTIVE: + nuc900_slave_select(spi, 0); + break; + + case BITBANG_CS_ACTIVE: + nuc900_slave_select(spi, 1); + break; + } +} + +static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, + unsigned int txnum) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (!txnum) + val &= ~TXNUM; + else + val |= txnum << 0x08; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); + +} + +static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw, + unsigned int txbitlen) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + val |= (txbitlen << 0x03); + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_spi_gobusy(struct nuc900_spi *hw) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + val |= GOBUSY; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static int nuc900_spi_setupxfer(struct spi_device *spi, + struct spi_transfer *t) +{ + return 0; +} + +static int nuc900_spi_setup(struct spi_device *spi) +{ + return 0; +} + +static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) +{ + return hw->tx ? hw->tx[count] : 0; +} + +static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct nuc900_spi *hw = to_hw(spi); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->len = t->len; + hw->count = 0; + + __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0); + + nuc900_spi_gobusy(hw); + + wait_for_completion(&hw->done); + + return hw->count; +} + +static irqreturn_t nuc900_spi_irq(int irq, void *dev) +{ + struct nuc900_spi *hw = dev; + unsigned int status; + unsigned int count = hw->count; + + status = __raw_readl(hw->regs + USI_CNT); + __raw_writel(status, hw->regs + USI_CNT); + + if (status & ENFLG) { + hw->count++; + + if (hw->rx) + hw->rx[count] = __raw_readl(hw->regs + USI_RX0); + count++; + + if (count < hw->len) { + __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0); + nuc900_spi_gobusy(hw); + } else { + complete(&hw->done); + } + + return IRQ_HANDLED; + } + + complete(&hw->done); + return IRQ_HANDLED; +} + +static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (edge) + val |= TXNEG; + else + val &= ~TXNEG; + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (edge) + val |= RXNEG; + else + val &= ~RXNEG; + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (lsb) + val |= LSB; + else + val &= ~LSB; + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (sleep) + val |= (sleep << 12); + else + val &= ~(0x0f << 12); + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_enable_int(struct nuc900_spi *hw) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + val |= ENINT; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_set_divider(struct nuc900_spi *hw) +{ + __raw_writel(hw->pdata->divider, hw->regs + USI_DIV); +} + +static void nuc900_init_spi(struct nuc900_spi *hw) +{ + clk_enable(hw->clk); + spin_lock_init(&hw->lock); + + nuc900_tx_edge(hw, hw->pdata->txneg); + nuc900_rx_edge(hw, hw->pdata->rxneg); + nuc900_send_first(hw, hw->pdata->lsb); + nuc900_set_sleep(hw, hw->pdata->sleep); + nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen); + nuc900_spi_setup_txnum(hw, hw->pdata->txnum); + nuc900_set_divider(hw); + nuc900_enable_int(hw); +} + +static int __devinit nuc900_spi_probe(struct platform_device *pdev) +{ + struct nuc900_spi *hw; + struct spi_master *master; + int err = 0; + + master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi)); + if (master == NULL) { + dev_err(&pdev->dev, "No memory for spi_master\n"); + err = -ENOMEM; + goto err_nomem; + } + + hw = spi_master_get_devdata(master); + memset(hw, 0, sizeof(struct nuc900_spi)); + + hw->master = spi_master_get(master); + hw->pdata = pdev->dev.platform_data; + hw->dev = &pdev->dev; + + if (hw->pdata == NULL) { + dev_err(&pdev->dev, "No platform data supplied\n"); + err = -ENOENT; + goto err_pdata; + } + + platform_set_drvdata(pdev, hw); + init_completion(&hw->done); + + master->mode_bits = SPI_MODE_0; + master->num_chipselect = hw->pdata->num_cs; + master->bus_num = hw->pdata->bus_num; + hw->bitbang.master = hw->master; + hw->bitbang.setup_transfer = nuc900_spi_setupxfer; + hw->bitbang.chipselect = nuc900_spi_chipsel; + hw->bitbang.txrx_bufs = nuc900_spi_txrx; + hw->bitbang.master->setup = nuc900_spi_setup; + + hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (hw->res == NULL) { + dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); + err = -ENOENT; + goto err_pdata; + } + + hw->ioarea = request_mem_region(hw->res->start, + resource_size(hw->res), pdev->name); + + if (hw->ioarea == NULL) { + dev_err(&pdev->dev, "Cannot reserve region\n"); + err = -ENXIO; + goto err_pdata; + } + + hw->regs = ioremap(hw->res->start, resource_size(hw->res)); + if (hw->regs == NULL) { + dev_err(&pdev->dev, "Cannot map IO\n"); + err = -ENXIO; + goto err_iomap; + } + + hw->irq = platform_get_irq(pdev, 0); + if (hw->irq < 0) { + dev_err(&pdev->dev, "No IRQ specified\n"); + err = -ENOENT; + goto err_irq; + } + + err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw); + if (err) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + goto err_irq; + } + + hw->clk = clk_get(&pdev->dev, "spi"); + if (IS_ERR(hw->clk)) { + dev_err(&pdev->dev, "No clock for device\n"); + err = PTR_ERR(hw->clk); + goto err_clk; + } + + mfp_set_groupg(&pdev->dev); + nuc900_init_spi(hw); + + err = spi_bitbang_start(&hw->bitbang); + if (err) { + dev_err(&pdev->dev, "Failed to register SPI master\n"); + goto err_register; + } + + return 0; + +err_register: + clk_disable(hw->clk); + clk_put(hw->clk); +err_clk: + free_irq(hw->irq, hw); +err_irq: + iounmap(hw->regs); +err_iomap: + release_mem_region(hw->res->start, resource_size(hw->res)); + kfree(hw->ioarea); +err_pdata: + spi_master_put(hw->master); + +err_nomem: + return err; +} + +static int __devexit nuc900_spi_remove(struct platform_device *dev) +{ + struct nuc900_spi *hw = platform_get_drvdata(dev); + + free_irq(hw->irq, hw); + + platform_set_drvdata(dev, NULL); + + spi_unregister_master(hw->master); + + clk_disable(hw->clk); + clk_put(hw->clk); + + iounmap(hw->regs); + + release_mem_region(hw->res->start, resource_size(hw->res)); + kfree(hw->ioarea); + + spi_master_put(hw->master); + return 0; +} + +static struct platform_driver nuc900_spi_driver = { + .probe = nuc900_spi_probe, + .remove = __devexit_p(nuc900_spi_remove), + .driver = { + .name = "nuc900-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init nuc900_spi_init(void) +{ + return platform_driver_register(&nuc900_spi_driver); +} + +static void __exit nuc900_spi_exit(void) +{ + platform_driver_unregister(&nuc900_spi_driver); +} + +module_init(nuc900_spi_init); +module_exit(nuc900_spi_exit); + +MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); +MODULE_DESCRIPTION("nuc900 spi driver!"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:nuc900-spi"); diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c new file mode 100644 index 00000000000..80e172d3e72 --- /dev/null +++ b/drivers/spi/spi_ppc4xx.c @@ -0,0 +1,613 @@ +/* + * SPI_PPC4XX SPI controller driver. + * + * Copyright (C) 2007 Gary Jennejohn <garyj@denx.de> + * Copyright 2008 Stefan Roese <sr@denx.de>, DENX Software Engineering + * Copyright 2009 Harris Corporation, Steven A. Falco <sfalco@harris.com> + * + * Based in part on drivers/spi/spi_s3c24xx.c + * + * Copyright (c) 2006 Ben Dooks + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +/* + * The PPC4xx SPI controller has no FIFO so each sent/received byte will + * generate an interrupt to the CPU. This can cause high CPU utilization. + * This driver allows platforms to reduce the interrupt load on the CPU + * during SPI transfers by setting max_speed_hz via the device tree. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/of_platform.h> +#include <linux/of_spi.h> +#include <linux/of_gpio.h> +#include <linux/interrupt.h> +#include <linux/delay.h> + +#include <linux/gpio.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> + +#include <asm/io.h> +#include <asm/dcr.h> +#include <asm/dcr-regs.h> + +/* bits in mode register - bit 0 is MSb */ + +/* + * SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock" + * SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock" + * Note: This is the inverse of CPHA. + */ +#define SPI_PPC4XX_MODE_SCP (0x80 >> 3) + +/* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */ +#define SPI_PPC4XX_MODE_SPE (0x80 >> 4) + +/* + * SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode + * SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode + * Note: This is identical to SPI_LSB_FIRST. + */ +#define SPI_PPC4XX_MODE_RD (0x80 >> 5) + +/* + * SPI_PPC4XX_MODE_CI = 0 means "clock idles low" + * SPI_PPC4XX_MODE_CI = 1 means "clock idles high" + * Note: This is identical to CPOL. + */ +#define SPI_PPC4XX_MODE_CI (0x80 >> 6) + +/* + * SPI_PPC4XX_MODE_IL = 0 means "loopback disable" + * SPI_PPC4XX_MODE_IL = 1 means "loopback enable" + */ +#define SPI_PPC4XX_MODE_IL (0x80 >> 7) + +/* bits in control register */ +/* starts a transfer when set */ +#define SPI_PPC4XX_CR_STR (0x80 >> 7) + +/* bits in status register */ +/* port is busy with a transfer */ +#define SPI_PPC4XX_SR_BSY (0x80 >> 6) +/* RxD ready */ +#define SPI_PPC4XX_SR_RBR (0x80 >> 7) + +/* clock settings (SCP and CI) for various SPI modes */ +#define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0) +#define SPI_CLK_MODE1 (0 | 0) +#define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI) +#define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI) + +#define DRIVER_NAME "spi_ppc4xx_of" + +struct spi_ppc4xx_regs { + u8 mode; + u8 rxd; + u8 txd; + u8 cr; + u8 sr; + u8 dummy; + /* + * Clock divisor modulus register + * This uses the follwing formula: + * SCPClkOut = OPBCLK/(4(CDM + 1)) + * or + * CDM = (OPBCLK/4*SCPClkOut) - 1 + * bit 0 is the MSb! + */ + u8 cdm; +}; + +/* SPI Controller driver's private data. */ +struct ppc4xx_spi { + /* bitbang has to be first */ + struct spi_bitbang bitbang; + struct completion done; + + u64 mapbase; + u64 mapsize; + int irqnum; + /* need this to set the SPI clock */ + unsigned int opb_freq; + + /* for transfers */ + int len; + int count; + /* data buffers */ + const unsigned char *tx; + unsigned char *rx; + + int *gpios; + + struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */ + struct spi_master *master; + struct device *dev; +}; + +/* need this so we can set the clock in the chipselect routine */ +struct spi_ppc4xx_cs { + u8 mode; +}; + +static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct ppc4xx_spi *hw; + u8 data; + + dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", + t->tx_buf, t->rx_buf, t->len); + + hw = spi_master_get_devdata(spi->master); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->len = t->len; + hw->count = 0; + + /* send the first byte */ + data = hw->tx ? hw->tx[0] : 0; + out_8(&hw->regs->txd, data); + out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); + wait_for_completion(&hw->done); + + return hw->count; +} + +static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); + struct spi_ppc4xx_cs *cs = spi->controller_state; + int scr; + u8 cdm = 0; + u32 speed; + u8 bits_per_word; + + /* Start with the generic configuration for this device. */ + bits_per_word = spi->bits_per_word; + speed = spi->max_speed_hz; + + /* + * Modify the configuration if the transfer overrides it. Do not allow + * the transfer to overwrite the generic configuration with zeros. + */ + if (t) { + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + if (t->speed_hz) + speed = min(t->speed_hz, spi->max_speed_hz); + } + + if (bits_per_word != 8) { + dev_err(&spi->dev, "invalid bits-per-word (%d)\n", + bits_per_word); + return -EINVAL; + } + + if (!speed || (speed > spi->max_speed_hz)) { + dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed); + return -EINVAL; + } + + /* Write new configration */ + out_8(&hw->regs->mode, cs->mode); + + /* Set the clock */ + /* opb_freq was already divided by 4 */ + scr = (hw->opb_freq / speed) - 1; + if (scr > 0) + cdm = min(scr, 0xff); + + dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed); + + if (in_8(&hw->regs->cdm) != cdm) + out_8(&hw->regs->cdm, cdm); + + spin_lock(&hw->bitbang.lock); + if (!hw->bitbang.busy) { + hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); + /* Need to ndelay here? */ + } + spin_unlock(&hw->bitbang.lock); + + return 0; +} + +static int spi_ppc4xx_setup(struct spi_device *spi) +{ + struct spi_ppc4xx_cs *cs = spi->controller_state; + + if (spi->bits_per_word != 8) { + dev_err(&spi->dev, "invalid bits-per-word (%d)\n", + spi->bits_per_word); + return -EINVAL; + } + + if (!spi->max_speed_hz) { + dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n"); + return -EINVAL; + } + + if (cs == NULL) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + /* + * We set all bits of the SPI0_MODE register, so, + * no need to read-modify-write + */ + cs->mode = SPI_PPC4XX_MODE_SPE; + + switch (spi->mode & (SPI_CPHA | SPI_CPOL)) { + case SPI_MODE_0: + cs->mode |= SPI_CLK_MODE0; + break; + case SPI_MODE_1: + cs->mode |= SPI_CLK_MODE1; + break; + case SPI_MODE_2: + cs->mode |= SPI_CLK_MODE2; + break; + case SPI_MODE_3: + cs->mode |= SPI_CLK_MODE3; + break; + } + + if (spi->mode & SPI_LSB_FIRST) + cs->mode |= SPI_PPC4XX_MODE_RD; + + return 0; +} + +static void spi_ppc4xx_chipsel(struct spi_device *spi, int value) +{ + struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); + unsigned int cs = spi->chip_select; + unsigned int cspol; + + /* + * If there are no chip selects at all, or if this is the special + * case of a non-existent (dummy) chip select, do nothing. + */ + + if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST) + return; + + cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; + if (value == BITBANG_CS_INACTIVE) + cspol = !cspol; + + gpio_set_value(hw->gpios[cs], cspol); +} + +static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id) +{ + struct ppc4xx_spi *hw; + u8 status; + u8 data; + unsigned int count; + + hw = (struct ppc4xx_spi *)dev_id; + + status = in_8(&hw->regs->sr); + if (!status) + return IRQ_NONE; + + /* + * BSY de-asserts one cycle after the transfer is complete. The + * interrupt is asserted after the transfer is complete. The exact + * relationship is not documented, hence this code. + */ + + if (unlikely(status & SPI_PPC4XX_SR_BSY)) { + u8 lstatus; + int cnt = 0; + + dev_dbg(hw->dev, "got interrupt but spi still busy?\n"); + do { + ndelay(10); + lstatus = in_8(&hw->regs->sr); + } while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY); + + if (cnt >= 100) { + dev_err(hw->dev, "busywait: too many loops!\n"); + complete(&hw->done); + return IRQ_HANDLED; + } else { + /* status is always 1 (RBR) here */ + status = in_8(&hw->regs->sr); + dev_dbg(hw->dev, "loops %d status %x\n", cnt, status); + } + } + + count = hw->count; + hw->count++; + + /* RBR triggered this interrupt. Therefore, data must be ready. */ + data = in_8(&hw->regs->rxd); + if (hw->rx) + hw->rx[count] = data; + + count++; + + if (count < hw->len) { + data = hw->tx ? hw->tx[count] : 0; + out_8(&hw->regs->txd, data); + out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); + } else { + complete(&hw->done); + } + + return IRQ_HANDLED; +} + +static void spi_ppc4xx_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static void spi_ppc4xx_enable(struct ppc4xx_spi *hw) +{ + /* + * On all 4xx PPC's the SPI bus is shared/multiplexed with + * the 2nd I2C bus. We need to enable the the SPI bus before + * using it. + */ + + /* need to clear bit 14 to enable SPC */ + dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0); +} + +static void free_gpios(struct ppc4xx_spi *hw) +{ + if (hw->master->num_chipselect) { + int i; + for (i = 0; i < hw->master->num_chipselect; i++) + if (gpio_is_valid(hw->gpios[i])) + gpio_free(hw->gpios[i]); + + kfree(hw->gpios); + hw->gpios = NULL; + } +} + +/* + * platform_device layer stuff... + */ +static int __init spi_ppc4xx_of_probe(struct platform_device *op, + const struct of_device_id *match) +{ + struct ppc4xx_spi *hw; + struct spi_master *master; + struct spi_bitbang *bbp; + struct resource resource; + struct device_node *np = op->dev.of_node; + struct device *dev = &op->dev; + struct device_node *opbnp; + int ret; + int num_gpios; + const unsigned int *clk; + + master = spi_alloc_master(dev, sizeof *hw); + if (master == NULL) + return -ENOMEM; + master->dev.of_node = np; + dev_set_drvdata(dev, master); + hw = spi_master_get_devdata(master); + hw->master = spi_master_get(master); + hw->dev = dev; + + init_completion(&hw->done); + + /* + * A count of zero implies a single SPI device without any chip-select. + * Note that of_gpio_count counts all gpios assigned to this spi master. + * This includes both "null" gpio's and real ones. + */ + num_gpios = of_gpio_count(np); + if (num_gpios) { + int i; + + hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL); + if (!hw->gpios) { + ret = -ENOMEM; + goto free_master; + } + + for (i = 0; i < num_gpios; i++) { + int gpio; + enum of_gpio_flags flags; + + gpio = of_get_gpio_flags(np, i, &flags); + hw->gpios[i] = gpio; + + if (gpio_is_valid(gpio)) { + /* Real CS - set the initial state. */ + ret = gpio_request(gpio, np->name); + if (ret < 0) { + dev_err(dev, "can't request gpio " + "#%d: %d\n", i, ret); + goto free_gpios; + } + + gpio_direction_output(gpio, + !!(flags & OF_GPIO_ACTIVE_LOW)); + } else if (gpio == -EEXIST) { + ; /* No CS, but that's OK. */ + } else { + dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); + ret = -EINVAL; + goto free_gpios; + } + } + } + + /* Setup the state for the bitbang driver */ + bbp = &hw->bitbang; + bbp->master = hw->master; + bbp->setup_transfer = spi_ppc4xx_setupxfer; + bbp->chipselect = spi_ppc4xx_chipsel; + bbp->txrx_bufs = spi_ppc4xx_txrx; + bbp->use_dma = 0; + bbp->master->setup = spi_ppc4xx_setup; + bbp->master->cleanup = spi_ppc4xx_cleanup; + + /* Allocate bus num dynamically. */ + bbp->master->bus_num = -1; + + /* the spi->mode bits understood by this driver: */ + bbp->master->mode_bits = + SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST; + + /* this many pins in all GPIO controllers */ + bbp->master->num_chipselect = num_gpios; + + /* Get the clock for the OPB */ + opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb"); + if (opbnp == NULL) { + dev_err(dev, "OPB: cannot find node\n"); + ret = -ENODEV; + goto free_gpios; + } + /* Get the clock (Hz) for the OPB */ + clk = of_get_property(opbnp, "clock-frequency", NULL); + if (clk == NULL) { + dev_err(dev, "OPB: no clock-frequency property set\n"); + of_node_put(opbnp); + ret = -ENODEV; + goto free_gpios; + } + hw->opb_freq = *clk; + hw->opb_freq >>= 2; + of_node_put(opbnp); + + ret = of_address_to_resource(np, 0, &resource); + if (ret) { + dev_err(dev, "error while parsing device node resource\n"); + goto free_gpios; + } + hw->mapbase = resource.start; + hw->mapsize = resource.end - resource.start + 1; + + /* Sanity check */ + if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) { + dev_err(dev, "too small to map registers\n"); + ret = -EINVAL; + goto free_gpios; + } + + /* Request IRQ */ + hw->irqnum = irq_of_parse_and_map(np, 0); + ret = request_irq(hw->irqnum, spi_ppc4xx_int, + IRQF_DISABLED, "spi_ppc4xx_of", (void *)hw); + if (ret) { + dev_err(dev, "unable to allocate interrupt\n"); + goto free_gpios; + } + + if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) { + dev_err(dev, "resource unavailable\n"); + ret = -EBUSY; + goto request_mem_error; + } + + hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs)); + + if (!hw->regs) { + dev_err(dev, "unable to memory map registers\n"); + ret = -ENXIO; + goto map_io_error; + } + + spi_ppc4xx_enable(hw); + + /* Finally register our spi controller */ + dev->dma_mask = 0; + ret = spi_bitbang_start(bbp); + if (ret) { + dev_err(dev, "failed to register SPI master\n"); + goto unmap_regs; + } + + dev_info(dev, "driver initialized\n"); + + return 0; + +unmap_regs: + iounmap(hw->regs); +map_io_error: + release_mem_region(hw->mapbase, hw->mapsize); +request_mem_error: + free_irq(hw->irqnum, hw); +free_gpios: + free_gpios(hw); +free_master: + dev_set_drvdata(dev, NULL); + spi_master_put(master); + + dev_err(dev, "initialization failed\n"); + return ret; +} + +static int __exit spi_ppc4xx_of_remove(struct platform_device *op) +{ + struct spi_master *master = dev_get_drvdata(&op->dev); + struct ppc4xx_spi *hw = spi_master_get_devdata(master); + + spi_bitbang_stop(&hw->bitbang); + dev_set_drvdata(&op->dev, NULL); + release_mem_region(hw->mapbase, hw->mapsize); + free_irq(hw->irqnum, hw); + iounmap(hw->regs); + free_gpios(hw); + return 0; +} + +static const struct of_device_id spi_ppc4xx_of_match[] = { + { .compatible = "ibm,ppc4xx-spi", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); + +static struct of_platform_driver spi_ppc4xx_of_driver = { + .probe = spi_ppc4xx_of_probe, + .remove = __exit_p(spi_ppc4xx_of_remove), + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = spi_ppc4xx_of_match, + }, +}; + +static int __init spi_ppc4xx_init(void) +{ + return of_register_platform_driver(&spi_ppc4xx_of_driver); +} +module_init(spi_ppc4xx_init); + +static void __exit spi_ppc4xx_exit(void) +{ + of_unregister_platform_driver(&spi_ppc4xx_of_driver); +} +module_exit(spi_ppc4xx_exit); + +MODULE_AUTHOR("Gary Jennejohn & Stefan Roese"); +MODULE_DESCRIPTION("Simple PPC4xx SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index e0d44af4745..151a95e4065 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -1,7 +1,7 @@ /* linux/drivers/spi/spi_s3c24xx.c * * Copyright (c) 2006 Ben Dooks - * Copyright (c) 2006 Simtec Electronics + * Copyright 2006-2009 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify @@ -20,17 +20,41 @@ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/gpio.h> +#include <linux/io.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> -#include <asm/io.h> -#include <asm/dma.h> -#include <mach/hardware.h> - #include <plat/regs-spi.h> #include <mach/spi.h> +#include <plat/fiq.h> +#include <asm/fiq.h> + +#include "spi_s3c24xx_fiq.h" + +/** + * s3c24xx_spi_devstate - per device data + * @hz: Last frequency calculated for @sppre field. + * @mode: Last mode setting for the @spcon field. + * @spcon: Value to write to the SPCON register. + * @sppre: Value to write to the SPPRE register. + */ +struct s3c24xx_spi_devstate { + unsigned int hz; + unsigned int mode; + u8 spcon; + u8 sppre; +}; + +enum spi_fiq_mode { + FIQ_MODE_NONE = 0, + FIQ_MODE_TX = 1, + FIQ_MODE_RX = 2, + FIQ_MODE_TXRX = 3, +}; + struct s3c24xx_spi { /* bitbang has to be first */ struct spi_bitbang bitbang; @@ -41,6 +65,11 @@ struct s3c24xx_spi { int len; int count; + struct fiq_handler fiq_handler; + enum spi_fiq_mode fiq_mode; + unsigned char fiq_inuse; + unsigned char fiq_claimed; + void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); @@ -56,6 +85,7 @@ struct s3c24xx_spi { struct s3c2410_spi_info *pdata; }; + #define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) #define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) @@ -71,106 +101,328 @@ static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol) static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) { + struct s3c24xx_spi_devstate *cs = spi->controller_state; struct s3c24xx_spi *hw = to_hw(spi); unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; - unsigned int spcon; + + /* change the chipselect state and the state of the spi engine clock */ switch (value) { case BITBANG_CS_INACTIVE: hw->set_cs(hw->pdata, spi->chip_select, cspol^1); + writeb(cs->spcon, hw->regs + S3C2410_SPCON); break; case BITBANG_CS_ACTIVE: - spcon = readb(hw->regs + S3C2410_SPCON); - - if (spi->mode & SPI_CPHA) - spcon |= S3C2410_SPCON_CPHA_FMTB; - else - spcon &= ~S3C2410_SPCON_CPHA_FMTB; - - if (spi->mode & SPI_CPOL) - spcon |= S3C2410_SPCON_CPOL_HIGH; - else - spcon &= ~S3C2410_SPCON_CPOL_HIGH; - - spcon |= S3C2410_SPCON_ENSCK; - - /* write new configration */ - - writeb(spcon, hw->regs + S3C2410_SPCON); + writeb(cs->spcon | S3C2410_SPCON_ENSCK, + hw->regs + S3C2410_SPCON); hw->set_cs(hw->pdata, spi->chip_select, cspol); - break; } } -static int s3c24xx_spi_setupxfer(struct spi_device *spi, - struct spi_transfer *t) +static int s3c24xx_spi_update_state(struct spi_device *spi, + struct spi_transfer *t) { struct s3c24xx_spi *hw = to_hw(spi); + struct s3c24xx_spi_devstate *cs = spi->controller_state; unsigned int bpw; unsigned int hz; unsigned int div; + unsigned long clk; bpw = t ? t->bits_per_word : spi->bits_per_word; hz = t ? t->speed_hz : spi->max_speed_hz; + if (!bpw) + bpw = 8; + + if (!hz) + hz = spi->max_speed_hz; + if (bpw != 8) { dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); return -EINVAL; } - div = clk_get_rate(hw->clk) / hz; + if (spi->mode != cs->mode) { + u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK; - /* is clk = pclk / (2 * (pre+1)), or is it - * clk = (pclk * 2) / ( pre + 1) */ + if (spi->mode & SPI_CPHA) + spcon |= S3C2410_SPCON_CPHA_FMTB; - div /= 2; + if (spi->mode & SPI_CPOL) + spcon |= S3C2410_SPCON_CPOL_HIGH; - if (div > 0) - div -= 1; + cs->mode = spi->mode; + cs->spcon = spcon; + } - if (div > 255) - div = 255; + if (cs->hz != hz) { + clk = clk_get_rate(hw->clk); + div = DIV_ROUND_UP(clk, hz * 2) - 1; - dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz); - writeb(div, hw->regs + S3C2410_SPPRE); + if (div > 255) + div = 255; - spin_lock(&hw->bitbang.lock); - if (!hw->bitbang.busy) { - hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); - /* need to ndelay for 0.5 clocktick ? */ + dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n", + div, hz, clk / (2 * (div + 1))); + + cs->hz = hz; + cs->sppre = div; } - spin_unlock(&hw->bitbang.lock); return 0; } +static int s3c24xx_spi_setupxfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct s3c24xx_spi_devstate *cs = spi->controller_state; + struct s3c24xx_spi *hw = to_hw(spi); + int ret; + + ret = s3c24xx_spi_update_state(spi, t); + if (!ret) + writeb(cs->sppre, hw->regs + S3C2410_SPPRE); + + return ret; +} + static int s3c24xx_spi_setup(struct spi_device *spi) { + struct s3c24xx_spi_devstate *cs = spi->controller_state; + struct s3c24xx_spi *hw = to_hw(spi); int ret; - ret = s3c24xx_spi_setupxfer(spi, NULL); - if (ret < 0) { - dev_err(&spi->dev, "setupxfer returned %d\n", ret); + /* allocate settings on the first call */ + if (!cs) { + cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL); + if (!cs) { + dev_err(&spi->dev, "no memory for controller state\n"); + return -ENOMEM; + } + + cs->spcon = SPCON_DEFAULT; + cs->hz = -1; + spi->controller_state = cs; + } + + /* initialise the state from the device */ + ret = s3c24xx_spi_update_state(spi, NULL); + if (ret) return ret; + + spin_lock(&hw->bitbang.lock); + if (!hw->bitbang.busy) { + hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); + /* need to ndelay for 0.5 clocktick ? */ } + spin_unlock(&hw->bitbang.lock); return 0; } +static void s3c24xx_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) { return hw->tx ? hw->tx[count] : 0; } +#ifdef CONFIG_SPI_S3C24XX_FIQ +/* Support for FIQ based pseudo-DMA to improve the transfer speed. + * + * This code uses the assembly helper in spi_s3c24xx_spi.S which is + * used by the FIQ core to move data between main memory and the peripheral + * block. Since this is code running on the processor, there is no problem + * with cache coherency of the buffers, so we can use any buffer we like. + */ + +/** + * struct spi_fiq_code - FIQ code and header + * @length: The length of the code fragment, excluding this header. + * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at. + * @data: The code itself to install as a FIQ handler. + */ +struct spi_fiq_code { + u32 length; + u32 ack_offset; + u8 data[0]; +}; + +extern struct spi_fiq_code s3c24xx_spi_fiq_txrx; +extern struct spi_fiq_code s3c24xx_spi_fiq_tx; +extern struct spi_fiq_code s3c24xx_spi_fiq_rx; + +/** + * ack_bit - turn IRQ into IRQ acknowledgement bit + * @irq: The interrupt number + * + * Returns the bit to write to the interrupt acknowledge register. + */ +static inline u32 ack_bit(unsigned int irq) +{ + return 1 << (irq - IRQ_EINT0); +} + +/** + * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer + * @hw: The hardware state. + * + * Claim the FIQ handler (only one can be active at any one time) and + * then setup the correct transfer code for this transfer. + * + * This call updates all the necessary state information if successful, + * so the caller does not need to do anything more than start the transfer + * as normal, since the IRQ will have been re-routed to the FIQ handler. +*/ +void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) +{ + struct pt_regs regs; + enum spi_fiq_mode mode; + struct spi_fiq_code *code; + int ret; + + if (!hw->fiq_claimed) { + /* try and claim fiq if we haven't got it, and if not + * then return and simply use another transfer method */ + + ret = claim_fiq(&hw->fiq_handler); + if (ret) + return; + } + + if (hw->tx && !hw->rx) + mode = FIQ_MODE_TX; + else if (hw->rx && !hw->tx) + mode = FIQ_MODE_RX; + else + mode = FIQ_MODE_TXRX; + + regs.uregs[fiq_rspi] = (long)hw->regs; + regs.uregs[fiq_rrx] = (long)hw->rx; + regs.uregs[fiq_rtx] = (long)hw->tx + 1; + regs.uregs[fiq_rcount] = hw->len - 1; + regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ; + + set_fiq_regs(®s); + + if (hw->fiq_mode != mode) { + u32 *ack_ptr; + + hw->fiq_mode = mode; + + switch (mode) { + case FIQ_MODE_TX: + code = &s3c24xx_spi_fiq_tx; + break; + case FIQ_MODE_RX: + code = &s3c24xx_spi_fiq_rx; + break; + case FIQ_MODE_TXRX: + code = &s3c24xx_spi_fiq_txrx; + break; + default: + code = NULL; + } + + BUG_ON(!code); + + ack_ptr = (u32 *)&code->data[code->ack_offset]; + *ack_ptr = ack_bit(hw->irq); + + set_fiq_handler(&code->data, code->length); + } + + s3c24xx_set_fiq(hw->irq, true); + + hw->fiq_mode = mode; + hw->fiq_inuse = 1; +} + +/** + * s3c24xx_spi_fiqop - FIQ core code callback + * @pw: Data registered with the handler + * @release: Whether this is a release or a return. + * + * Called by the FIQ code when another module wants to use the FIQ, so + * return whether we are currently using this or not and then update our + * internal state. + */ +static int s3c24xx_spi_fiqop(void *pw, int release) +{ + struct s3c24xx_spi *hw = pw; + int ret = 0; + + if (release) { + if (hw->fiq_inuse) + ret = -EBUSY; + + /* note, we do not need to unroute the FIQ, as the FIQ + * vector code de-routes it to signal the end of transfer */ + + hw->fiq_mode = FIQ_MODE_NONE; + hw->fiq_claimed = 0; + } else { + hw->fiq_claimed = 1; + } + + return ret; +} + +/** + * s3c24xx_spi_initfiq - setup the information for the FIQ core + * @hw: The hardware state. + * + * Setup the fiq_handler block to pass to the FIQ core. + */ +static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw) +{ + hw->fiq_handler.dev_id = hw; + hw->fiq_handler.name = dev_name(hw->dev); + hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop; +} + +/** + * s3c24xx_spi_usefiq - return if we should be using FIQ. + * @hw: The hardware state. + * + * Return true if the platform data specifies whether this channel is + * allowed to use the FIQ. + */ +static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw) +{ + return hw->pdata->use_fiq; +} + +/** + * s3c24xx_spi_usingfiq - return if channel is using FIQ + * @spi: The hardware state. + * + * Return whether the channel is currently using the FIQ (separate from + * whether the FIQ is claimed). + */ +static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi) +{ + return spi->fiq_inuse; +} +#else + +static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { } +static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { } +static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; } +static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; } + +#endif /* CONFIG_SPI_S3C24XX_FIQ */ + static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) { struct s3c24xx_spi *hw = to_hw(spi); - dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", - t->tx_buf, t->rx_buf, t->len); - hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->len = t->len; @@ -178,11 +430,14 @@ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) init_completion(&hw->done); + hw->fiq_inuse = 0; + if (s3c24xx_spi_usefiq(hw) && t->len >= 3) + s3c24xx_spi_tryfiq(hw); + /* send the first byte */ writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); wait_for_completion(&hw->done); - return hw->count; } @@ -204,17 +459,27 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev) goto irq_done; } - hw->count++; + if (!s3c24xx_spi_usingfiq(hw)) { + hw->count++; - if (hw->rx) - hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); + if (hw->rx) + hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); - count++; + count++; + + if (count < hw->len) + writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT); + else + complete(&hw->done); + } else { + hw->count = hw->len; + hw->fiq_inuse = 0; + + if (hw->rx) + hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT); - if (count < hw->len) - writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT); - else complete(&hw->done); + } irq_done: return IRQ_HANDLED; @@ -272,6 +537,10 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hw); init_completion(&hw->done); + /* initialise fiq handler */ + + s3c24xx_spi_initfiq(hw); + /* setup the master state. */ /* the spi->mode bits understood by this driver: */ @@ -286,7 +555,9 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev) hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer; hw->bitbang.chipselect = s3c24xx_spi_chipsel; hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; - hw->bitbang.master->setup = s3c24xx_spi_setup; + + hw->master->setup = s3c24xx_spi_setup; + hw->master->cleanup = s3c24xx_spi_cleanup; dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); @@ -299,7 +570,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev) goto err_no_iores; } - hw->ioarea = request_mem_region(res->start, (res->end - res->start)+1, + hw->ioarea = request_mem_region(res->start, resource_size(res), pdev->name); if (hw->ioarea == NULL) { @@ -308,7 +579,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev) goto err_no_iores; } - hw->regs = ioremap(res->start, (res->end - res->start)+1); + hw->regs = ioremap(res->start, resource_size(res)); if (hw->regs == NULL) { dev_err(&pdev->dev, "Cannot map IO\n"); err = -ENXIO; @@ -385,7 +656,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev) err_no_iores: err_no_pdata: - spi_master_put(hw->master);; + spi_master_put(hw->master); err_nomem: return err; @@ -418,9 +689,9 @@ static int __exit s3c24xx_spi_remove(struct platform_device *dev) #ifdef CONFIG_PM -static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg) +static int s3c24xx_spi_suspend(struct device *dev) { - struct s3c24xx_spi *hw = platform_get_drvdata(pdev); + struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); if (hw->pdata && hw->pdata->gpio_setup) hw->pdata->gpio_setup(hw->pdata, 0); @@ -429,27 +700,31 @@ static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg) return 0; } -static int s3c24xx_spi_resume(struct platform_device *pdev) +static int s3c24xx_spi_resume(struct device *dev) { - struct s3c24xx_spi *hw = platform_get_drvdata(pdev); + struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); s3c24xx_spi_initialsetup(hw); return 0; } +static const struct dev_pm_ops s3c24xx_spi_pmops = { + .suspend = s3c24xx_spi_suspend, + .resume = s3c24xx_spi_resume, +}; + +#define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops #else -#define s3c24xx_spi_suspend NULL -#define s3c24xx_spi_resume NULL -#endif +#define S3C24XX_SPI_PMOPS NULL +#endif /* CONFIG_PM */ MODULE_ALIAS("platform:s3c2410-spi"); static struct platform_driver s3c24xx_spi_driver = { .remove = __exit_p(s3c24xx_spi_remove), - .suspend = s3c24xx_spi_suspend, - .resume = s3c24xx_spi_resume, .driver = { .name = "s3c2410-spi", .owner = THIS_MODULE, + .pm = S3C24XX_SPI_PMOPS, }, }; diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi_s3c24xx_fiq.S new file mode 100644 index 00000000000..3793cae361d --- /dev/null +++ b/drivers/spi/spi_s3c24xx_fiq.S @@ -0,0 +1,116 @@ +/* linux/drivers/spi/spi_s3c24xx_fiq.S + * + * Copyright 2009 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * S3C24XX SPI - FIQ pseudo-DMA transfer code + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +#include <mach/map.h> +#include <mach/regs-irq.h> +#include <plat/regs-spi.h> + +#include "spi_s3c24xx_fiq.h" + + .text + + @ entry to these routines is as follows, with the register names + @ defined in fiq.h so that they can be shared with the C files which + @ setup the calling registers. + @ + @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND + @ fiq_rtmp Temporary register to hold tx/rx data + @ fiq_rspi The base of the SPI register block + @ fiq_rtx The tx buffer pointer + @ fiq_rrx The rx buffer pointer + @ fiq_rcount The number of bytes to move + + @ each entry starts with a word entry of how long it is + @ and an offset to the irq acknowledgment word + +ENTRY(s3c24xx_spi_fiq_rx) +s3c24xx_spi_fix_rx: + .word fiq_rx_end - fiq_rx_start + .word fiq_rx_irq_ack - fiq_rx_start +fiq_rx_start: + ldr fiq_rtmp, fiq_rx_irq_ack + str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] + + ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] + strb fiq_rtmp, [ fiq_rrx ], #1 + + mov fiq_rtmp, #0xff + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 + subnes pc, lr, #4 @@ return, still have work to do + + @@ set IRQ controller so that next op will trigger IRQ + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] + subs pc, lr, #4 + +fiq_rx_irq_ack: + .word 0 +fiq_rx_end: + +ENTRY(s3c24xx_spi_fiq_txrx) +s3c24xx_spi_fiq_txrx: + .word fiq_txrx_end - fiq_txrx_start + .word fiq_txrx_irq_ack - fiq_txrx_start +fiq_txrx_start: + + ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] + strb fiq_rtmp, [ fiq_rrx ], #1 + + ldr fiq_rtmp, fiq_txrx_irq_ack + str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] + + ldrb fiq_rtmp, [ fiq_rtx ], #1 + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 + subnes pc, lr, #4 @@ return, still have work to do + + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] + subs pc, lr, #4 + +fiq_txrx_irq_ack: + .word 0 + +fiq_txrx_end: + +ENTRY(s3c24xx_spi_fiq_tx) +s3c24xx_spi_fix_tx: + .word fiq_tx_end - fiq_tx_start + .word fiq_tx_irq_ack - fiq_tx_start +fiq_tx_start: + ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] + + ldr fiq_rtmp, fiq_tx_irq_ack + str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] + + ldrb fiq_rtmp, [ fiq_rtx ], #1 + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 + subnes pc, lr, #4 @@ return, still have work to do + + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] + subs pc, lr, #4 + +fiq_tx_irq_ack: + .word 0 + +fiq_tx_end: + + .end diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi_s3c24xx_fiq.h new file mode 100644 index 00000000000..a5950bb25b5 --- /dev/null +++ b/drivers/spi/spi_s3c24xx_fiq.h @@ -0,0 +1,26 @@ +/* linux/drivers/spi/spi_s3c24xx_fiq.h + * + * Copyright 2009 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * S3C24XX SPI - FIQ pseudo-DMA transfer support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +/* We have R8 through R13 to play with */ + +#ifdef __ASSEMBLY__ +#define __REG_NR(x) r##x +#else +#define __REG_NR(x) (x) +#endif + +#define fiq_rspi __REG_NR(8) +#define fiq_rtmp __REG_NR(9) +#define fiq_rrx __REG_NR(10) +#define fiq_rtx __REG_NR(11) +#define fiq_rcount __REG_NR(12) +#define fiq_rirq __REG_NR(13) diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c index bbf9371cd28..be991359bf9 100644 --- a/drivers/spi/spi_s3c24xx_gpio.c +++ b/drivers/spi/spi_s3c24xx_gpio.c @@ -58,32 +58,31 @@ static inline u32 getmiso(struct spi_device *dev) #define spidelay(x) ndelay(x) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); } diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c new file mode 100644 index 00000000000..795828b90f4 --- /dev/null +++ b/drivers/spi/spi_s3c64xx.c @@ -0,0 +1,1248 @@ +/* linux/drivers/spi/spi_s3c64xx.c + * + * Copyright (C) 2009 Samsung Electronics Ltd. + * Jaswinder Singh <jassi.brar@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +#include <mach/dma.h> +#include <plat/s3c64xx-spi.h> + +/* Registers and bit-fields */ + +#define S3C64XX_SPI_CH_CFG 0x00 +#define S3C64XX_SPI_CLK_CFG 0x04 +#define S3C64XX_SPI_MODE_CFG 0x08 +#define S3C64XX_SPI_SLAVE_SEL 0x0C +#define S3C64XX_SPI_INT_EN 0x10 +#define S3C64XX_SPI_STATUS 0x14 +#define S3C64XX_SPI_TX_DATA 0x18 +#define S3C64XX_SPI_RX_DATA 0x1C +#define S3C64XX_SPI_PACKET_CNT 0x20 +#define S3C64XX_SPI_PENDING_CLR 0x24 +#define S3C64XX_SPI_SWAP_CFG 0x28 +#define S3C64XX_SPI_FB_CLK 0x2C + +#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ +#define S3C64XX_SPI_CH_SW_RST (1<<5) +#define S3C64XX_SPI_CH_SLAVE (1<<4) +#define S3C64XX_SPI_CPOL_L (1<<3) +#define S3C64XX_SPI_CPHA_B (1<<2) +#define S3C64XX_SPI_CH_RXCH_ON (1<<1) +#define S3C64XX_SPI_CH_TXCH_ON (1<<0) + +#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) +#define S3C64XX_SPI_CLKSEL_SRCSHFT 9 +#define S3C64XX_SPI_ENCLK_ENABLE (1<<8) +#define S3C64XX_SPI_PSR_MASK 0xff + +#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) +#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) +#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29) +#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29) +#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17) +#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) +#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) +#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) +#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) +#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) +#define S3C64XX_SPI_MODE_4BURST (1<<0) + +#define S3C64XX_SPI_SLAVE_AUTO (1<<1) +#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) + +#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL) + +#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \ + (c)->regs + S3C64XX_SPI_SLAVE_SEL) + +#define S3C64XX_SPI_INT_TRAILING_EN (1<<6) +#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) +#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4) +#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3) +#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2) +#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1) +#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) + +#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) +#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) +#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) +#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) +#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) +#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) + +#define S3C64XX_SPI_PACKET_CNT_EN (1<<16) + +#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) +#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) +#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2) +#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1) +#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0) + +#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7) +#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6) +#define S3C64XX_SPI_SWAP_RX_BIT (1<<5) +#define S3C64XX_SPI_SWAP_RX_EN (1<<4) +#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3) +#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2) +#define S3C64XX_SPI_SWAP_TX_BIT (1<<1) +#define S3C64XX_SPI_SWAP_TX_EN (1<<0) + +#define S3C64XX_SPI_FBCLK_MSK (3<<0) + +#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \ + (((i)->fifo_lvl_mask + 1))) \ + ? 1 : 0) + +#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \ + (((i)->fifo_lvl_mask + 1) << 1)) \ + ? 1 : 0) +#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask) +#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask) + +#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff +#define S3C64XX_SPI_TRAILCNT_OFF 19 + +#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT + +#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + +#define SUSPND (1<<0) +#define SPIBUSY (1<<1) +#define RXBUSY (1<<2) +#define TXBUSY (1<<3) + +/** + * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. + * @clk: Pointer to the spi clock. + * @src_clk: Pointer to the clock used to generate SPI signals. + * @master: Pointer to the SPI Protocol master. + * @workqueue: Work queue for the SPI xfer requests. + * @cntrlr_info: Platform specific data for the controller this driver manages. + * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. + * @work: Work + * @queue: To log SPI xfer requests. + * @lock: Controller specific lock. + * @state: Set of FLAGS to indicate status. + * @rx_dmach: Controller's DMA channel for Rx. + * @tx_dmach: Controller's DMA channel for Tx. + * @sfr_start: BUS address of SPI controller regs. + * @regs: Pointer to ioremap'ed controller registers. + * @xfer_completion: To indicate completion of xfer task. + * @cur_mode: Stores the active configuration of the controller. + * @cur_bpw: Stores the active bits per word settings. + * @cur_speed: Stores the active xfer clock speed. + */ +struct s3c64xx_spi_driver_data { + void __iomem *regs; + struct clk *clk; + struct clk *src_clk; + struct platform_device *pdev; + struct spi_master *master; + struct workqueue_struct *workqueue; + struct s3c64xx_spi_info *cntrlr_info; + struct spi_device *tgl_spi; + struct work_struct work; + struct list_head queue; + spinlock_t lock; + enum dma_ch rx_dmach; + enum dma_ch tx_dmach; + unsigned long sfr_start; + struct completion xfer_completion; + unsigned state; + unsigned cur_mode, cur_bpw; + unsigned cur_speed; +}; + +static struct s3c2410_dma_client s3c64xx_spi_dma_client = { + .name = "samsung-spi-dma", +}; + +static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + unsigned long loops; + u32 val; + + writel(0, regs + S3C64XX_SPI_PACKET_CNT); + + val = readl(regs + S3C64XX_SPI_CH_CFG); + val |= S3C64XX_SPI_CH_SW_RST; + val &= ~S3C64XX_SPI_CH_HS_EN; + writel(val, regs + S3C64XX_SPI_CH_CFG); + + /* Flush TxFIFO*/ + loops = msecs_to_loops(1); + do { + val = readl(regs + S3C64XX_SPI_STATUS); + } while (TX_FIFO_LVL(val, sci) && loops--); + + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); + + /* Flush RxFIFO*/ + loops = msecs_to_loops(1); + do { + val = readl(regs + S3C64XX_SPI_STATUS); + if (RX_FIFO_LVL(val, sci)) + readl(regs + S3C64XX_SPI_RX_DATA); + else + break; + } while (loops--); + + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); + + val = readl(regs + S3C64XX_SPI_CH_CFG); + val &= ~S3C64XX_SPI_CH_SW_RST; + writel(val, regs + S3C64XX_SPI_CH_CFG); + + val = readl(regs + S3C64XX_SPI_MODE_CFG); + val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); + writel(val, regs + S3C64XX_SPI_MODE_CFG); + + val = readl(regs + S3C64XX_SPI_CH_CFG); + val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON); + writel(val, regs + S3C64XX_SPI_CH_CFG); +} + +static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, + struct spi_device *spi, + struct spi_transfer *xfer, int dma_mode) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + u32 modecfg, chcfg; + + modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); + modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); + + chcfg = readl(regs + S3C64XX_SPI_CH_CFG); + chcfg &= ~S3C64XX_SPI_CH_TXCH_ON; + + if (dma_mode) { + chcfg &= ~S3C64XX_SPI_CH_RXCH_ON; + } else { + /* Always shift in data in FIFO, even if xfer is Tx only, + * this helps setting PCKT_CNT value for generating clocks + * as exactly needed. + */ + chcfg |= S3C64XX_SPI_CH_RXCH_ON; + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) + | S3C64XX_SPI_PACKET_CNT_EN, + regs + S3C64XX_SPI_PACKET_CNT); + } + + if (xfer->tx_buf != NULL) { + sdd->state |= TXBUSY; + chcfg |= S3C64XX_SPI_CH_TXCH_ON; + if (dma_mode) { + modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; + s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); + s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, + xfer->tx_dma, xfer->len); + s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); + } else { + switch (sdd->cur_bpw) { + case 32: + iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 4); + break; + case 16: + iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 2); + break; + default: + iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len); + break; + } + } + } + + if (xfer->rx_buf != NULL) { + sdd->state |= RXBUSY; + + if (sci->high_speed && sdd->cur_speed >= 30000000UL + && !(sdd->cur_mode & SPI_CPHA)) + chcfg |= S3C64XX_SPI_CH_HS_EN; + + if (dma_mode) { + modecfg |= S3C64XX_SPI_MODE_RXDMA_ON; + chcfg |= S3C64XX_SPI_CH_RXCH_ON; + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) + | S3C64XX_SPI_PACKET_CNT_EN, + regs + S3C64XX_SPI_PACKET_CNT); + s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); + s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, + xfer->rx_dma, xfer->len); + s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); + } + } + + writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); + writel(chcfg, regs + S3C64XX_SPI_CH_CFG); +} + +static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, + struct spi_device *spi) +{ + struct s3c64xx_spi_csinfo *cs; + + if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ + if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ + /* Deselect the last toggled device */ + cs = sdd->tgl_spi->controller_data; + cs->set_level(cs->line, + spi->mode & SPI_CS_HIGH ? 0 : 1); + } + sdd->tgl_spi = NULL; + } + + cs = spi->controller_data; + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); +} + +static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, + struct spi_transfer *xfer, int dma_mode) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + unsigned long val; + int ms; + + /* millisecs to xfer 'len' bytes @ 'cur_speed' */ + ms = xfer->len * 8 * 1000 / sdd->cur_speed; + ms += 10; /* some tolerance */ + + if (dma_mode) { + val = msecs_to_jiffies(ms) + 10; + val = wait_for_completion_timeout(&sdd->xfer_completion, val); + } else { + u32 status; + val = msecs_to_loops(ms); + do { + status = readl(regs + S3C64XX_SPI_STATUS); + } while (RX_FIFO_LVL(status, sci) < xfer->len && --val); + } + + if (!val) + return -EIO; + + if (dma_mode) { + u32 status; + + /* + * DmaTx returns after simply writing data in the FIFO, + * w/o waiting for real transmission on the bus to finish. + * DmaRx returns only after Dma read data from FIFO which + * needs bus transmission to finish, so we don't worry if + * Xfer involved Rx(with or without Tx). + */ + if (xfer->rx_buf == NULL) { + val = msecs_to_loops(10); + status = readl(regs + S3C64XX_SPI_STATUS); + while ((TX_FIFO_LVL(status, sci) + || !S3C64XX_SPI_ST_TX_DONE(status, sci)) + && --val) { + cpu_relax(); + status = readl(regs + S3C64XX_SPI_STATUS); + } + + if (!val) + return -EIO; + } + } else { + /* If it was only Tx */ + if (xfer->rx_buf == NULL) { + sdd->state &= ~TXBUSY; + return 0; + } + + switch (sdd->cur_bpw) { + case 32: + ioread32_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 4); + break; + case 16: + ioread16_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 2); + break; + default: + ioread8_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len); + break; + } + sdd->state &= ~RXBUSY; + } + + return 0; +} + +static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, + struct spi_device *spi) +{ + struct s3c64xx_spi_csinfo *cs = spi->controller_data; + + if (sdd->tgl_spi == spi) + sdd->tgl_spi = NULL; + + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); +} + +static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + u32 val; + + /* Disable Clock */ + if (sci->clk_from_cmu) { + clk_disable(sdd->src_clk); + } else { + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } + + /* Set Polarity and Phase */ + val = readl(regs + S3C64XX_SPI_CH_CFG); + val &= ~(S3C64XX_SPI_CH_SLAVE | + S3C64XX_SPI_CPOL_L | + S3C64XX_SPI_CPHA_B); + + if (sdd->cur_mode & SPI_CPOL) + val |= S3C64XX_SPI_CPOL_L; + + if (sdd->cur_mode & SPI_CPHA) + val |= S3C64XX_SPI_CPHA_B; + + writel(val, regs + S3C64XX_SPI_CH_CFG); + + /* Set Channel & DMA Mode */ + val = readl(regs + S3C64XX_SPI_MODE_CFG); + val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK + | S3C64XX_SPI_MODE_CH_TSZ_MASK); + + switch (sdd->cur_bpw) { + case 32: + val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; + break; + case 16: + val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; + break; + default: + val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; + val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; + break; + } + + writel(val, regs + S3C64XX_SPI_MODE_CFG); + + if (sci->clk_from_cmu) { + /* Configure Clock */ + /* There is half-multiplier before the SPI */ + clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + /* Enable Clock */ + clk_enable(sdd->src_clk); + } else { + /* Configure Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_PSR_MASK; + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) + & S3C64XX_SPI_PSR_MASK); + writel(val, regs + S3C64XX_SPI_CLK_CFG); + + /* Enable Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val |= S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } +} + +static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) +{ + struct s3c64xx_spi_driver_data *sdd = buf_id; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + + if (res == S3C2410_RES_OK) + sdd->state &= ~RXBUSY; + else + dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size); + + /* If the other done */ + if (!(sdd->state & TXBUSY)) + complete(&sdd->xfer_completion); + + spin_unlock_irqrestore(&sdd->lock, flags); +} + +static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) +{ + struct s3c64xx_spi_driver_data *sdd = buf_id; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + + if (res == S3C2410_RES_OK) + sdd->state &= ~TXBUSY; + else + dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size); + + /* If the other done */ + if (!(sdd->state & RXBUSY)) + complete(&sdd->xfer_completion); + + spin_unlock_irqrestore(&sdd->lock, flags); +} + +#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) + +static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, + struct spi_message *msg) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + struct device *dev = &sdd->pdev->dev; + struct spi_transfer *xfer; + + if (msg->is_dma_mapped) + return 0; + + /* First mark all xfer unmapped */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + xfer->rx_dma = XFER_DMAADDR_INVALID; + xfer->tx_dma = XFER_DMAADDR_INVALID; + } + + /* Map until end or first fail */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + + if (xfer->tx_buf != NULL) { + xfer->tx_dma = dma_map_single(dev, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, xfer->tx_dma)) { + dev_err(dev, "dma_map_single Tx failed\n"); + xfer->tx_dma = XFER_DMAADDR_INVALID; + return -ENOMEM; + } + } + + if (xfer->rx_buf != NULL) { + xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, + xfer->len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, xfer->rx_dma)) { + dev_err(dev, "dma_map_single Rx failed\n"); + dma_unmap_single(dev, xfer->tx_dma, + xfer->len, DMA_TO_DEVICE); + xfer->tx_dma = XFER_DMAADDR_INVALID; + xfer->rx_dma = XFER_DMAADDR_INVALID; + return -ENOMEM; + } + } + } + + return 0; +} + +static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, + struct spi_message *msg) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + struct device *dev = &sdd->pdev->dev; + struct spi_transfer *xfer; + + if (msg->is_dma_mapped) + return; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + + if (xfer->rx_buf != NULL + && xfer->rx_dma != XFER_DMAADDR_INVALID) + dma_unmap_single(dev, xfer->rx_dma, + xfer->len, DMA_FROM_DEVICE); + + if (xfer->tx_buf != NULL + && xfer->tx_dma != XFER_DMAADDR_INVALID) + dma_unmap_single(dev, xfer->tx_dma, + xfer->len, DMA_TO_DEVICE); + } +} + +static void handle_msg(struct s3c64xx_spi_driver_data *sdd, + struct spi_message *msg) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + struct spi_device *spi = msg->spi; + struct s3c64xx_spi_csinfo *cs = spi->controller_data; + struct spi_transfer *xfer; + int status = 0, cs_toggle = 0; + u32 speed; + u8 bpw; + + /* If Master's(controller) state differs from that needed by Slave */ + if (sdd->cur_speed != spi->max_speed_hz + || sdd->cur_mode != spi->mode + || sdd->cur_bpw != spi->bits_per_word) { + sdd->cur_bpw = spi->bits_per_word; + sdd->cur_speed = spi->max_speed_hz; + sdd->cur_mode = spi->mode; + s3c64xx_spi_config(sdd); + } + + /* Map all the transfers if needed */ + if (s3c64xx_spi_map_mssg(sdd, msg)) { + dev_err(&spi->dev, + "Xfer: Unable to map message buffers!\n"); + status = -ENOMEM; + goto out; + } + + /* Configure feedback delay */ + writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + + unsigned long flags; + int use_dma; + + INIT_COMPLETION(sdd->xfer_completion); + + /* Only BPW and Speed may change across transfers */ + bpw = xfer->bits_per_word ? : spi->bits_per_word; + speed = xfer->speed_hz ? : spi->max_speed_hz; + + if (xfer->len % (bpw / 8)) { + dev_err(&spi->dev, + "Xfer length(%u) not a multiple of word size(%u)\n", + xfer->len, bpw / 8); + status = -EIO; + goto out; + } + + if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { + sdd->cur_bpw = bpw; + sdd->cur_speed = speed; + s3c64xx_spi_config(sdd); + } + + /* Polling method for xfers not bigger than FIFO capacity */ + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + use_dma = 0; + else + use_dma = 1; + + spin_lock_irqsave(&sdd->lock, flags); + + /* Pending only which is to be done */ + sdd->state &= ~RXBUSY; + sdd->state &= ~TXBUSY; + + enable_datapath(sdd, spi, xfer, use_dma); + + /* Slave Select */ + enable_cs(sdd, spi); + + /* Start the signals */ + S3C64XX_SPI_ACT(sdd); + + spin_unlock_irqrestore(&sdd->lock, flags); + + status = wait_for_xfer(sdd, xfer, use_dma); + + /* Quiese the signals */ + S3C64XX_SPI_DEACT(sdd); + + if (status) { + dev_err(&spi->dev, "I/O Error: " + "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", + xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, + (sdd->state & RXBUSY) ? 'f' : 'p', + (sdd->state & TXBUSY) ? 'f' : 'p', + xfer->len); + + if (use_dma) { + if (xfer->tx_buf != NULL + && (sdd->state & TXBUSY)) + s3c2410_dma_ctrl(sdd->tx_dmach, + S3C2410_DMAOP_FLUSH); + if (xfer->rx_buf != NULL + && (sdd->state & RXBUSY)) + s3c2410_dma_ctrl(sdd->rx_dmach, + S3C2410_DMAOP_FLUSH); + } + + goto out; + } + + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + + if (xfer->cs_change) { + /* Hint that the next mssg is gonna be + for the same device */ + if (list_is_last(&xfer->transfer_list, + &msg->transfers)) + cs_toggle = 1; + else + disable_cs(sdd, spi); + } + + msg->actual_length += xfer->len; + + flush_fifo(sdd); + } + +out: + if (!cs_toggle || status) + disable_cs(sdd, spi); + else + sdd->tgl_spi = spi; + + s3c64xx_spi_unmap_mssg(sdd, msg); + + msg->status = status; + + if (msg->complete) + msg->complete(msg->context); +} + +static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) +{ + if (s3c2410_dma_request(sdd->rx_dmach, + &s3c64xx_spi_dma_client, NULL) < 0) { + dev_err(&sdd->pdev->dev, "cannot get RxDMA\n"); + return 0; + } + s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb); + s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW, + sdd->sfr_start + S3C64XX_SPI_RX_DATA); + + if (s3c2410_dma_request(sdd->tx_dmach, + &s3c64xx_spi_dma_client, NULL) < 0) { + dev_err(&sdd->pdev->dev, "cannot get TxDMA\n"); + s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); + return 0; + } + s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb); + s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM, + sdd->sfr_start + S3C64XX_SPI_TX_DATA); + + return 1; +} + +static void s3c64xx_spi_work(struct work_struct *work) +{ + struct s3c64xx_spi_driver_data *sdd = container_of(work, + struct s3c64xx_spi_driver_data, work); + unsigned long flags; + + /* Acquire DMA channels */ + while (!acquire_dma(sdd)) + msleep(10); + + spin_lock_irqsave(&sdd->lock, flags); + + while (!list_empty(&sdd->queue) + && !(sdd->state & SUSPND)) { + + struct spi_message *msg; + + msg = container_of(sdd->queue.next, struct spi_message, queue); + + list_del_init(&msg->queue); + + /* Set Xfer busy flag */ + sdd->state |= SPIBUSY; + + spin_unlock_irqrestore(&sdd->lock, flags); + + handle_msg(sdd, msg); + + spin_lock_irqsave(&sdd->lock, flags); + + sdd->state &= ~SPIBUSY; + } + + spin_unlock_irqrestore(&sdd->lock, flags); + + /* Free DMA channels */ + s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); + s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); +} + +static int s3c64xx_spi_transfer(struct spi_device *spi, + struct spi_message *msg) +{ + struct s3c64xx_spi_driver_data *sdd; + unsigned long flags; + + sdd = spi_master_get_devdata(spi->master); + + spin_lock_irqsave(&sdd->lock, flags); + + if (sdd->state & SUSPND) { + spin_unlock_irqrestore(&sdd->lock, flags); + return -ESHUTDOWN; + } + + msg->status = -EINPROGRESS; + msg->actual_length = 0; + + list_add_tail(&msg->queue, &sdd->queue); + + queue_work(sdd->workqueue, &sdd->work); + + spin_unlock_irqrestore(&sdd->lock, flags); + + return 0; +} + +/* + * Here we only check the validity of requested configuration + * and save the configuration in a local data-structure. + * The controller is actually configured only just before we + * get a message to transfer. + */ +static int s3c64xx_spi_setup(struct spi_device *spi) +{ + struct s3c64xx_spi_csinfo *cs = spi->controller_data; + struct s3c64xx_spi_driver_data *sdd; + struct s3c64xx_spi_info *sci; + struct spi_message *msg; + unsigned long flags; + int err = 0; + + if (cs == NULL || cs->set_level == NULL) { + dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); + return -ENODEV; + } + + sdd = spi_master_get_devdata(spi->master); + sci = sdd->cntrlr_info; + + spin_lock_irqsave(&sdd->lock, flags); + + list_for_each_entry(msg, &sdd->queue, queue) { + /* Is some mssg is already queued for this device */ + if (msg->spi == spi) { + dev_err(&spi->dev, + "setup: attempt while mssg in queue!\n"); + spin_unlock_irqrestore(&sdd->lock, flags); + return -EBUSY; + } + } + + if (sdd->state & SUSPND) { + spin_unlock_irqrestore(&sdd->lock, flags); + dev_err(&spi->dev, + "setup: SPI-%d not active!\n", spi->master->bus_num); + return -ESHUTDOWN; + } + + spin_unlock_irqrestore(&sdd->lock, flags); + + if (spi->bits_per_word != 8 + && spi->bits_per_word != 16 + && spi->bits_per_word != 32) { + dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n", + spi->bits_per_word); + err = -EINVAL; + goto setup_exit; + } + + /* Check if we can provide the requested rate */ + if (!sci->clk_from_cmu) { + u32 psr, speed; + + /* Max possible */ + speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); + + if (spi->max_speed_hz > speed) + spi->max_speed_hz = speed; + + psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; + psr &= S3C64XX_SPI_PSR_MASK; + if (psr == S3C64XX_SPI_PSR_MASK) + psr--; + + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz < speed) { + if (psr+1 < S3C64XX_SPI_PSR_MASK) { + psr++; + } else { + err = -EINVAL; + goto setup_exit; + } + } + + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz >= speed) + spi->max_speed_hz = speed; + else + err = -EINVAL; + } + +setup_exit: + + /* setup() returns with device de-selected */ + disable_cs(sdd, spi); + + return err; +} + +static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + unsigned int val; + + sdd->cur_speed = 0; + + S3C64XX_SPI_DEACT(sdd); + + /* Disable Interrupts - we use Polling if not DMA mode */ + writel(0, regs + S3C64XX_SPI_INT_EN); + + if (!sci->clk_from_cmu) + writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, + regs + S3C64XX_SPI_CLK_CFG); + writel(0, regs + S3C64XX_SPI_MODE_CFG); + writel(0, regs + S3C64XX_SPI_PACKET_CNT); + + /* Clear any irq pending bits */ + writel(readl(regs + S3C64XX_SPI_PENDING_CLR), + regs + S3C64XX_SPI_PENDING_CLR); + + writel(0, regs + S3C64XX_SPI_SWAP_CFG); + + val = readl(regs + S3C64XX_SPI_MODE_CFG); + val &= ~S3C64XX_SPI_MODE_4BURST; + val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); + val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); + writel(val, regs + S3C64XX_SPI_MODE_CFG); + + flush_fifo(sdd); +} + +static int __init s3c64xx_spi_probe(struct platform_device *pdev) +{ + struct resource *mem_res, *dmatx_res, *dmarx_res; + struct s3c64xx_spi_driver_data *sdd; + struct s3c64xx_spi_info *sci; + struct spi_master *master; + int ret; + + if (pdev->id < 0) { + dev_err(&pdev->dev, + "Invalid platform device id-%d\n", pdev->id); + return -ENODEV; + } + + if (pdev->dev.platform_data == NULL) { + dev_err(&pdev->dev, "platform_data missing!\n"); + return -ENODEV; + } + + sci = pdev->dev.platform_data; + if (!sci->src_clk_name) { + dev_err(&pdev->dev, + "Board init must call s3c64xx_spi_set_info()\n"); + return -EINVAL; + } + + /* Check for availability of necessary resource */ + + dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (dmatx_res == NULL) { + dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n"); + return -ENXIO; + } + + dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (dmarx_res == NULL) { + dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n"); + return -ENXIO; + } + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (mem_res == NULL) { + dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); + return -ENXIO; + } + + master = spi_alloc_master(&pdev->dev, + sizeof(struct s3c64xx_spi_driver_data)); + if (master == NULL) { + dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, master); + + sdd = spi_master_get_devdata(master); + sdd->master = master; + sdd->cntrlr_info = sci; + sdd->pdev = pdev; + sdd->sfr_start = mem_res->start; + sdd->tx_dmach = dmatx_res->start; + sdd->rx_dmach = dmarx_res->start; + + sdd->cur_bpw = 8; + + master->bus_num = pdev->id; + master->setup = s3c64xx_spi_setup; + master->transfer = s3c64xx_spi_transfer; + master->num_chipselect = sci->num_cs; + master->dma_alignment = 8; + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + if (request_mem_region(mem_res->start, + resource_size(mem_res), pdev->name) == NULL) { + dev_err(&pdev->dev, "Req mem region failed\n"); + ret = -ENXIO; + goto err0; + } + + sdd->regs = ioremap(mem_res->start, resource_size(mem_res)); + if (sdd->regs == NULL) { + dev_err(&pdev->dev, "Unable to remap IO\n"); + ret = -ENXIO; + goto err1; + } + + if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) { + dev_err(&pdev->dev, "Unable to config gpio\n"); + ret = -EBUSY; + goto err2; + } + + /* Setup clocks */ + sdd->clk = clk_get(&pdev->dev, "spi"); + if (IS_ERR(sdd->clk)) { + dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); + ret = PTR_ERR(sdd->clk); + goto err3; + } + + if (clk_enable(sdd->clk)) { + dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); + ret = -EBUSY; + goto err4; + } + + sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); + if (IS_ERR(sdd->src_clk)) { + dev_err(&pdev->dev, + "Unable to acquire clock '%s'\n", sci->src_clk_name); + ret = PTR_ERR(sdd->src_clk); + goto err5; + } + + if (clk_enable(sdd->src_clk)) { + dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", + sci->src_clk_name); + ret = -EBUSY; + goto err6; + } + + sdd->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (sdd->workqueue == NULL) { + dev_err(&pdev->dev, "Unable to create workqueue\n"); + ret = -ENOMEM; + goto err7; + } + + /* Setup Deufult Mode */ + s3c64xx_spi_hwinit(sdd, pdev->id); + + spin_lock_init(&sdd->lock); + init_completion(&sdd->xfer_completion); + INIT_WORK(&sdd->work, s3c64xx_spi_work); + INIT_LIST_HEAD(&sdd->queue); + + if (spi_register_master(master)) { + dev_err(&pdev->dev, "cannot register SPI master\n"); + ret = -EBUSY; + goto err8; + } + + dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " + "with %d Slaves attached\n", + pdev->id, master->num_chipselect); + dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", + mem_res->end, mem_res->start, + sdd->rx_dmach, sdd->tx_dmach); + + return 0; + +err8: + destroy_workqueue(sdd->workqueue); +err7: + clk_disable(sdd->src_clk); +err6: + clk_put(sdd->src_clk); +err5: + clk_disable(sdd->clk); +err4: + clk_put(sdd->clk); +err3: +err2: + iounmap((void *) sdd->regs); +err1: + release_mem_region(mem_res->start, resource_size(mem_res)); +err0: + platform_set_drvdata(pdev, NULL); + spi_master_put(master); + + return ret; +} + +static int s3c64xx_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); + struct resource *mem_res; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + sdd->state |= SUSPND; + spin_unlock_irqrestore(&sdd->lock, flags); + + while (sdd->state & SPIBUSY) + msleep(10); + + spi_unregister_master(master); + + destroy_workqueue(sdd->workqueue); + + clk_disable(sdd->src_clk); + clk_put(sdd->src_clk); + + clk_disable(sdd->clk); + clk_put(sdd->clk); + + iounmap((void *) sdd->regs); + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (mem_res != NULL) + release_mem_region(mem_res->start, resource_size(mem_res)); + + platform_set_drvdata(pdev, NULL); + spi_master_put(master); + + return 0; +} + +#ifdef CONFIG_PM +static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + sdd->state |= SUSPND; + spin_unlock_irqrestore(&sdd->lock, flags); + + while (sdd->state & SPIBUSY) + msleep(10); + + /* Disable the clock */ + clk_disable(sdd->src_clk); + clk_disable(sdd->clk); + + sdd->cur_speed = 0; /* Output Clock is stopped */ + + return 0; +} + +static int s3c64xx_spi_resume(struct platform_device *pdev) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + unsigned long flags; + + sci->cfg_gpio(pdev); + + /* Enable the clock */ + clk_enable(sdd->src_clk); + clk_enable(sdd->clk); + + s3c64xx_spi_hwinit(sdd, pdev->id); + + spin_lock_irqsave(&sdd->lock, flags); + sdd->state &= ~SUSPND; + spin_unlock_irqrestore(&sdd->lock, flags); + + return 0; +} +#else +#define s3c64xx_spi_suspend NULL +#define s3c64xx_spi_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver s3c64xx_spi_driver = { + .driver = { + .name = "s3c64xx-spi", + .owner = THIS_MODULE, + }, + .remove = s3c64xx_spi_remove, + .suspend = s3c64xx_spi_suspend, + .resume = s3c64xx_spi_resume, +}; +MODULE_ALIAS("platform:s3c64xx-spi"); + +static int __init s3c64xx_spi_init(void) +{ + return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); +} +subsys_initcall(s3c64xx_spi_init); + +static void __exit s3c64xx_spi_exit(void) +{ + platform_driver_unregister(&s3c64xx_spi_driver); +} +module_exit(s3c64xx_spi_exit); + +MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); +MODULE_DESCRIPTION("S3C64XX SPI Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c new file mode 100644 index 00000000000..d93b66743ba --- /dev/null +++ b/drivers/spi/spi_sh_msiof.c @@ -0,0 +1,688 @@ +/* + * SuperH MSIOF SPI Master Interface + * + * Copyright (c) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/completion.h> +#include <linux/pm_runtime.h> +#include <linux/gpio.h> +#include <linux/bitmap.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/err.h> + +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/spi/sh_msiof.h> + +#include <asm/unaligned.h> + +struct sh_msiof_spi_priv { + struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */ + void __iomem *mapbase; + struct clk *clk; + struct platform_device *pdev; + struct sh_msiof_spi_info *info; + struct completion done; + unsigned long flags; + int tx_fifo_size; + int rx_fifo_size; +}; + +#define TMDR1 0x00 +#define TMDR2 0x04 +#define TMDR3 0x08 +#define RMDR1 0x10 +#define RMDR2 0x14 +#define RMDR3 0x18 +#define TSCR 0x20 +#define RSCR 0x22 +#define CTR 0x28 +#define FCTR 0x30 +#define STR 0x40 +#define IER 0x44 +#define TDR1 0x48 +#define TDR2 0x4c +#define TFDR 0x50 +#define RDR1 0x58 +#define RDR2 0x5c +#define RFDR 0x60 + +#define CTR_TSCKE (1 << 15) +#define CTR_TFSE (1 << 14) +#define CTR_TXE (1 << 9) +#define CTR_RXE (1 << 8) + +#define STR_TEOF (1 << 23) +#define STR_REOF (1 << 7) + +static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) +{ + switch (reg_offs) { + case TSCR: + case RSCR: + return ioread16(p->mapbase + reg_offs); + default: + return ioread32(p->mapbase + reg_offs); + } +} + +static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, + unsigned long value) +{ + switch (reg_offs) { + case TSCR: + case RSCR: + iowrite16(value, p->mapbase + reg_offs); + break; + default: + iowrite32(value, p->mapbase + reg_offs); + break; + } +} + +static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, + unsigned long clr, unsigned long set) +{ + unsigned long mask = clr | set; + unsigned long data; + int k; + + data = sh_msiof_read(p, CTR); + data &= ~clr; + data |= set; + sh_msiof_write(p, CTR, data); + + for (k = 100; k > 0; k--) { + if ((sh_msiof_read(p, CTR) & mask) == set) + break; + + udelay(10); + } + + return k > 0 ? 0 : -ETIMEDOUT; +} + +static irqreturn_t sh_msiof_spi_irq(int irq, void *data) +{ + struct sh_msiof_spi_priv *p = data; + + /* just disable the interrupt and wake up */ + sh_msiof_write(p, IER, 0); + complete(&p->done); + + return IRQ_HANDLED; +} + +static struct { + unsigned short div; + unsigned short scr; +} const sh_msiof_spi_clk_table[] = { + { 1, 0x0007 }, + { 2, 0x0000 }, + { 4, 0x0001 }, + { 8, 0x0002 }, + { 16, 0x0003 }, + { 32, 0x0004 }, + { 64, 0x1f00 }, + { 128, 0x1f01 }, + { 256, 0x1f02 }, + { 512, 0x1f03 }, + { 1024, 0x1f04 }, +}; + +static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, + unsigned long parent_rate, + unsigned long spi_hz) +{ + unsigned long div = 1024; + size_t k; + + if (!WARN_ON(!spi_hz || !parent_rate)) + div = parent_rate / spi_hz; + + /* TODO: make more fine grained */ + + for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { + if (sh_msiof_spi_clk_table[k].div >= div) + break; + } + + k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); + + sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); + sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); +} + +static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, + int cpol, int cpha, + int tx_hi_z, int lsb_first) +{ + unsigned long tmp; + int edge; + + /* + * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG + * 0 0 10 10 1 1 + * 0 1 10 10 0 0 + * 1 0 11 11 0 0 + * 1 1 11 11 1 1 + */ + sh_msiof_write(p, FCTR, 0); + sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); + sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); + + tmp = 0xa0000000; + tmp |= cpol << 30; /* TSCKIZ */ + tmp |= cpol << 28; /* RSCKIZ */ + + edge = cpol ? cpha : !cpha; + + tmp |= edge << 27; /* TEDG */ + tmp |= edge << 26; /* REDG */ + tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ + sh_msiof_write(p, CTR, tmp); +} + +static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, + const void *tx_buf, void *rx_buf, + int bits, int words) +{ + unsigned long dr2; + + dr2 = ((bits - 1) << 24) | ((words - 1) << 16); + + if (tx_buf) + sh_msiof_write(p, TMDR2, dr2); + else + sh_msiof_write(p, TMDR2, dr2 | 1); + + if (rx_buf) + sh_msiof_write(p, RMDR2, dr2); + + sh_msiof_write(p, IER, STR_TEOF | STR_REOF); +} + +static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) +{ + sh_msiof_write(p, STR, sh_msiof_read(p, STR)); +} + +static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned char *buf_8 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_8[k] << fs); +} + +static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned short *buf_16 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_16[k] << fs); +} + +static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned short *buf_16 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); +} + +static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned int *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_32[k] << fs); +} + +static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned int *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); +} + +static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned char *buf_8 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_8[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned short *buf_16 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_16[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned short *buf_16 = rx_buf; + int k; + + for (k = 0; k < words; k++) + put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); +} + +static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned int *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_32[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned int *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); +} + +static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t) +{ + int bits; + + bits = t ? t->bits_per_word : 0; + bits = bits ? bits : spi->bits_per_word; + return bits; +} + +static unsigned long sh_msiof_spi_hz(struct spi_device *spi, + struct spi_transfer *t) +{ + unsigned long hz; + + hz = t ? t->speed_hz : 0; + hz = hz ? hz : spi->max_speed_hz; + return hz; +} + +static int sh_msiof_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + int bits; + + /* noting to check hz values against since parent clock is disabled */ + + bits = sh_msiof_spi_bits(spi, t); + if (bits < 8) + return -EINVAL; + if (bits > 32) + return -EINVAL; + + return spi_bitbang_setup_transfer(spi, t); +} + +static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on) +{ + struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + int value; + + /* chip select is active low unless SPI_CS_HIGH is set */ + if (spi->mode & SPI_CS_HIGH) + value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0; + else + value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1; + + if (is_on == BITBANG_CS_ACTIVE) { + if (!test_and_set_bit(0, &p->flags)) { + pm_runtime_get_sync(&p->pdev->dev); + clk_enable(p->clk); + } + + /* Configure pins before asserting CS */ + sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL), + !!(spi->mode & SPI_CPHA), + !!(spi->mode & SPI_3WIRE), + !!(spi->mode & SPI_LSB_FIRST)); + } + + /* use spi->controller data for CS (same strategy as spi_gpio) */ + gpio_set_value((unsigned)spi->controller_data, value); + + if (is_on == BITBANG_CS_INACTIVE) { + if (test_and_clear_bit(0, &p->flags)) { + clk_disable(p->clk); + pm_runtime_put(&p->pdev->dev); + } + } +} + +static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, + void (*tx_fifo)(struct sh_msiof_spi_priv *, + const void *, int, int), + void (*rx_fifo)(struct sh_msiof_spi_priv *, + void *, int, int), + const void *tx_buf, void *rx_buf, + int words, int bits) +{ + int fifo_shift; + int ret; + + /* limit maximum word transfer to rx/tx fifo size */ + if (tx_buf) + words = min_t(int, words, p->tx_fifo_size); + if (rx_buf) + words = min_t(int, words, p->rx_fifo_size); + + /* the fifo contents need shifting */ + fifo_shift = 32 - bits; + + /* setup msiof transfer mode registers */ + sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); + + /* write tx fifo */ + if (tx_buf) + tx_fifo(p, tx_buf, words, fifo_shift); + + /* setup clock and rx/tx signals */ + ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); + if (rx_buf) + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); + + /* start by setting frame bit */ + INIT_COMPLETION(p->done); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); + if (ret) { + dev_err(&p->pdev->dev, "failed to start hardware\n"); + goto err; + } + + /* wait for tx fifo to be emptied / rx fifo to be filled */ + wait_for_completion(&p->done); + + /* read rx fifo */ + if (rx_buf) + rx_fifo(p, rx_buf, words, fifo_shift); + + /* clear status bits */ + sh_msiof_reset_str(p); + + /* shut down frame, tx/tx and clock signals */ + ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); + if (rx_buf) + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); + if (ret) { + dev_err(&p->pdev->dev, "failed to shut down hardware\n"); + goto err; + } + + return words; + + err: + sh_msiof_write(p, IER, 0); + return ret; +} + +static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); + void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); + int bits; + int bytes_per_word; + int bytes_done; + int words; + int n; + + bits = sh_msiof_spi_bits(spi, t); + + /* setup bytes per word and fifo read/write functions */ + if (bits <= 8) { + bytes_per_word = 1; + tx_fifo = sh_msiof_spi_write_fifo_8; + rx_fifo = sh_msiof_spi_read_fifo_8; + } else if (bits <= 16) { + bytes_per_word = 2; + if ((unsigned long)t->tx_buf & 0x01) + tx_fifo = sh_msiof_spi_write_fifo_16u; + else + tx_fifo = sh_msiof_spi_write_fifo_16; + + if ((unsigned long)t->rx_buf & 0x01) + rx_fifo = sh_msiof_spi_read_fifo_16u; + else + rx_fifo = sh_msiof_spi_read_fifo_16; + } else { + bytes_per_word = 4; + if ((unsigned long)t->tx_buf & 0x03) + tx_fifo = sh_msiof_spi_write_fifo_32u; + else + tx_fifo = sh_msiof_spi_write_fifo_32; + + if ((unsigned long)t->rx_buf & 0x03) + rx_fifo = sh_msiof_spi_read_fifo_32u; + else + rx_fifo = sh_msiof_spi_read_fifo_32; + } + + /* setup clocks (clock already enabled in chipselect()) */ + sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), + sh_msiof_spi_hz(spi, t)); + + /* transfer in fifo sized chunks */ + words = t->len / bytes_per_word; + bytes_done = 0; + + while (bytes_done < t->len) { + n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, + t->tx_buf + bytes_done, + t->rx_buf + bytes_done, + words, bits); + if (n < 0) + break; + + bytes_done += n * bytes_per_word; + words -= n; + } + + return bytes_done; +} + +static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs, + u32 word, u8 bits) +{ + BUG(); /* unused but needed by bitbang code */ + return 0; +} + +static int sh_msiof_spi_probe(struct platform_device *pdev) +{ + struct resource *r; + struct spi_master *master; + struct sh_msiof_spi_priv *p; + char clk_name[16]; + int i; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv)); + if (master == NULL) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + ret = -ENOMEM; + goto err0; + } + + p = spi_master_get_devdata(master); + + platform_set_drvdata(pdev, p); + p->info = pdev->dev.platform_data; + init_completion(&p->done); + + snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id); + p->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(p->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(p->clk); + goto err1; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i = platform_get_irq(pdev, 0); + if (!r || i < 0) { + dev_err(&pdev->dev, "cannot get platform resources\n"); + ret = -ENOENT; + goto err2; + } + p->mapbase = ioremap_nocache(r->start, resource_size(r)); + if (!p->mapbase) { + dev_err(&pdev->dev, "unable to ioremap\n"); + ret = -ENXIO; + goto err2; + } + + ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED, + dev_name(&pdev->dev), p); + if (ret) { + dev_err(&pdev->dev, "unable to request irq\n"); + goto err3; + } + + p->pdev = pdev; + pm_runtime_enable(&pdev->dev); + + /* The standard version of MSIOF use 64 word FIFOs */ + p->tx_fifo_size = 64; + p->rx_fifo_size = 64; + + /* Platform data may override FIFO sizes */ + if (p->info->tx_fifo_override) + p->tx_fifo_size = p->info->tx_fifo_override; + if (p->info->rx_fifo_override) + p->rx_fifo_size = p->info->rx_fifo_override; + + /* init master and bitbang code */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; + master->flags = 0; + master->bus_num = pdev->id; + master->num_chipselect = p->info->num_chipselect; + master->setup = spi_bitbang_setup; + master->cleanup = spi_bitbang_cleanup; + + p->bitbang.master = master; + p->bitbang.chipselect = sh_msiof_spi_chipselect; + p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer; + p->bitbang.txrx_bufs = sh_msiof_spi_txrx; + p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word; + + ret = spi_bitbang_start(&p->bitbang); + if (ret == 0) + return 0; + + pm_runtime_disable(&pdev->dev); + err3: + iounmap(p->mapbase); + err2: + clk_put(p->clk); + err1: + spi_master_put(master); + err0: + return ret; +} + +static int sh_msiof_spi_remove(struct platform_device *pdev) +{ + struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); + int ret; + + ret = spi_bitbang_stop(&p->bitbang); + if (!ret) { + pm_runtime_disable(&pdev->dev); + free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq); + iounmap(p->mapbase); + clk_put(p->clk); + spi_master_put(p->bitbang.master); + } + return ret; +} + +static int sh_msiof_spi_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { + .runtime_suspend = sh_msiof_spi_runtime_nop, + .runtime_resume = sh_msiof_spi_runtime_nop, +}; + +static struct platform_driver sh_msiof_spi_drv = { + .probe = sh_msiof_spi_probe, + .remove = sh_msiof_spi_remove, + .driver = { + .name = "spi_sh_msiof", + .owner = THIS_MODULE, + .pm = &sh_msiof_spi_dev_pm_ops, + }, +}; + +static int __init sh_msiof_spi_init(void) +{ + return platform_driver_register(&sh_msiof_spi_drv); +} +module_init(sh_msiof_spi_init); + +static void __exit sh_msiof_spi_exit(void) +{ + platform_driver_unregister(&sh_msiof_spi_drv); +} +module_exit(sh_msiof_spi_exit); + +MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:spi_sh_msiof"); diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c index 7d36720eb98..5c643916119 100644 --- a/drivers/spi/spi_sh_sci.c +++ b/drivers/spi/spi_sh_sci.c @@ -78,31 +78,30 @@ static inline u32 getmiso(struct spi_device *dev) #define spidelay(x) ndelay(x) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); } static void sh_sci_spi_chipselect(struct spi_device *dev, int value) @@ -148,7 +147,7 @@ static int sh_sci_spi_probe(struct platform_device *dev) ret = -ENOENT; goto err1; } - sp->membase = ioremap(r->start, r->end - r->start + 1); + sp->membase = ioremap(r->start, resource_size(r)); if (!sp->membase) { ret = -ENXIO; goto err1; diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c new file mode 100644 index 00000000000..fadff76eb7e --- /dev/null +++ b/drivers/spi/spi_stmp.c @@ -0,0 +1,679 @@ +/* + * Freescale STMP378X SPI master driver + * + * Author: dmitry pervushin <dimka@embeddedalley.com> + * + * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> + +#include <mach/platform.h> +#include <mach/stmp3xxx.h> +#include <mach/dma.h> +#include <mach/regs-ssp.h> +#include <mach/regs-apbh.h> + + +/* 0 means DMA mode(recommended, default), !0 - PIO mode */ +static int pio; +static int clock; + +/* default timeout for busy waits is 2 seconds */ +#define STMP_SPI_TIMEOUT (2 * HZ) + +struct stmp_spi { + int id; + + void * __iomem regs; /* vaddr of the control registers */ + + int irq, err_irq; + u32 dma; + struct stmp3xxx_dma_descriptor d; + + u32 speed_khz; + u32 saved_timings; + u32 divider; + + struct clk *clk; + struct device *master_dev; + + struct work_struct work; + struct workqueue_struct *workqueue; + + /* lock protects queue access */ + spinlock_t lock; + struct list_head queue; + + struct completion done; +}; + +#define busy_wait(cond) \ + ({ \ + unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \ + bool succeeded = false; \ + do { \ + if (cond) { \ + succeeded = true; \ + break; \ + } \ + cpu_relax(); \ + } while (time_before(jiffies, end_jiffies)); \ + succeeded; \ + }) + +/** + * stmp_spi_init_hw + * Initialize the SSP port + */ +static int stmp_spi_init_hw(struct stmp_spi *ss) +{ + int err = 0; + void *pins = ss->master_dev->platform_data; + + err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev)); + if (err) + goto out; + + ss->clk = clk_get(NULL, "ssp"); + if (IS_ERR(ss->clk)) { + err = PTR_ERR(ss->clk); + goto out_free_pins; + } + clk_enable(ss->clk); + + stmp3xxx_reset_block(ss->regs, false); + stmp3xxx_dma_reset_channel(ss->dma); + + return 0; + +out_free_pins: + stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev)); +out: + return err; +} + +static void stmp_spi_release_hw(struct stmp_spi *ss) +{ + void *pins = ss->master_dev->platform_data; + + if (ss->clk && !IS_ERR(ss->clk)) { + clk_disable(ss->clk); + clk_put(ss->clk); + } + stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev)); +} + +static int stmp_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + u8 bits_per_word; + u32 hz; + struct stmp_spi *ss = spi_master_get_devdata(spi->master); + u16 rate; + + bits_per_word = spi->bits_per_word; + if (t && t->bits_per_word) + bits_per_word = t->bits_per_word; + + /* + * Calculate speed: + * - by default, use maximum speed from ssp clk + * - if device overrides it, use it + * - if transfer specifies other speed, use transfer's one + */ + hz = 1000 * ss->speed_khz / ss->divider; + if (spi->max_speed_hz) + hz = min(hz, spi->max_speed_hz); + if (t && t->speed_hz) + hz = min(hz, t->speed_hz); + + if (hz == 0) { + dev_err(&spi->dev, "Cannot continue with zero clock\n"); + return -EINVAL; + } + + if (bits_per_word != 8) { + dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", + __func__, bits_per_word); + return -EINVAL; + } + + dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n", + hz, ss->speed_khz, ss->divider, + ss->speed_khz * 1000 / ss->divider); + + if (ss->speed_khz * 1000 / ss->divider < hz) { + dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n", + __func__, hz); + return -EINVAL; + } + + rate = 1000 * ss->speed_khz/ss->divider/hz; + + writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) | + BF(rate - 1, SSP_TIMING_CLOCK_RATE), + HW_SSP_TIMING + ss->regs); + + writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) | + BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) | + ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | + ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) | + (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE), + ss->regs + HW_SSP_CTRL1); + + return 0; +} + +static int stmp_spi_setup(struct spi_device *spi) +{ + /* spi_setup() does basic checks, + * stmp_spi_setup_transfer() does more later + */ + if (spi->bits_per_word != 8) { + dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", + __func__, spi->bits_per_word); + return -EINVAL; + } + return 0; +} + +static inline u32 stmp_spi_cs(unsigned cs) +{ + return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) | + ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0); +} + +static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs, + unsigned char *buf, dma_addr_t dma_buf, int len, + int first, int last, bool write) +{ + u32 c0 = 0; + dma_addr_t spi_buf_dma = dma_buf; + int status = 0; + enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0); + c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0); + c0 |= (write ? 0 : BM_SSP_CTRL0_READ); + c0 |= BM_SSP_CTRL0_DATA_XFER; + + c0 |= stmp_spi_cs(cs); + + c0 |= BF(len, SSP_CTRL0_XFER_COUNT); + + if (!dma_buf) + spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir); + + ss->d.command->cmd = + BF(len, APBH_CHn_CMD_XFER_COUNT) | + BF(1, APBH_CHn_CMD_CMDWORDS) | + BM_APBH_CHn_CMD_WAIT4ENDCMD | + BM_APBH_CHn_CMD_IRQONCMPLT | + BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ : + BV_APBH_CHn_CMD_COMMAND__DMA_WRITE, + APBH_CHn_CMD_COMMAND); + ss->d.command->pio_words[0] = c0; + ss->d.command->buf_ptr = spi_buf_dma; + + stmp3xxx_dma_reset_channel(ss->dma); + stmp3xxx_dma_clear_interrupt(ss->dma); + stmp3xxx_dma_enable_interrupt(ss->dma); + init_completion(&ss->done); + stmp3xxx_dma_go(ss->dma, &ss->d, 1); + wait_for_completion(&ss->done); + + if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN)) + status = -ETIMEDOUT; + + if (!dma_buf) + dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir); + + return status; +} + +static inline void stmp_spi_enable(struct stmp_spi *ss) +{ + stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0); + stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0); +} + +static inline void stmp_spi_disable(struct stmp_spi *ss) +{ + stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0); + stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0); +} + +static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs, + unsigned char *buf, int len, + bool first, bool last, bool write) +{ + if (first) + stmp_spi_enable(ss); + + stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0); + + while (len--) { + if (last && len <= 0) + stmp_spi_disable(ss); + + stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT, + ss->regs + HW_SSP_CTRL0); + stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0); + + if (write) + stmp3xxx_clearl(BM_SSP_CTRL0_READ, + ss->regs + HW_SSP_CTRL0); + else + stmp3xxx_setl(BM_SSP_CTRL0_READ, + ss->regs + HW_SSP_CTRL0); + + /* Run! */ + stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0); + + if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & + BM_SSP_CTRL0_RUN)) + break; + + if (write) + writel(*buf, ss->regs + HW_SSP_DATA); + + /* Set TRANSFER */ + stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0); + + if (!write) { + if (busy_wait((readl(ss->regs + HW_SSP_STATUS) & + BM_SSP_STATUS_FIFO_EMPTY))) + break; + *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF; + } + + if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & + BM_SSP_CTRL0_RUN)) + break; + + /* advance to the next byte */ + buf++; + } + + return len < 0 ? 0 : -ETIMEDOUT; +} + +static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m) +{ + bool first, last; + struct spi_transfer *t, *tmp_t; + int status = 0; + int cs; + + cs = m->spi->chip_select; + + list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { + + first = (&t->transfer_list == m->transfers.next); + last = (&t->transfer_list == m->transfers.prev); + + if (first || t->speed_hz || t->bits_per_word) + stmp_spi_setup_transfer(m->spi, t); + + /* reject "not last" transfers which request to change cs */ + if (t->cs_change && !last) { + dev_err(&m->spi->dev, + "Message with t->cs_change has been skipped\n"); + continue; + } + + if (t->tx_buf) { + status = pio ? + stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf, + t->len, first, last, true) : + stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf, + t->tx_dma, t->len, first, last, true); +#ifdef DEBUG + if (t->len < 0x10) + print_hex_dump_bytes("Tx ", + DUMP_PREFIX_OFFSET, + t->tx_buf, t->len); + else + pr_debug("Tx: %d bytes\n", t->len); +#endif + } + if (t->rx_buf) { + status = pio ? + stmp_spi_txrx_pio(ss, cs, t->rx_buf, + t->len, first, last, false) : + stmp_spi_txrx_dma(ss, cs, t->rx_buf, + t->rx_dma, t->len, first, last, false); +#ifdef DEBUG + if (t->len < 0x10) + print_hex_dump_bytes("Rx ", + DUMP_PREFIX_OFFSET, + t->rx_buf, t->len); + else + pr_debug("Rx: %d bytes\n", t->len); +#endif + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (status) + break; + + } + return status; +} + +/** + * stmp_spi_handle - handle messages from the queue + */ +static void stmp_spi_handle(struct work_struct *w) +{ + struct stmp_spi *ss = container_of(w, struct stmp_spi, work); + unsigned long flags; + struct spi_message *m; + + spin_lock_irqsave(&ss->lock, flags); + while (!list_empty(&ss->queue)) { + m = list_entry(ss->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irqrestore(&ss->lock, flags); + + m->status = stmp_spi_handle_message(ss, m); + m->complete(m->context); + + spin_lock_irqsave(&ss->lock, flags); + } + spin_unlock_irqrestore(&ss->lock, flags); + + return; +} + +/** + * stmp_spi_transfer - perform message transfer. + * Called indirectly from spi_async, queues all the messages to + * spi_handle_message. + * @spi: spi device + * @m: message to be queued + */ +static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct stmp_spi *ss = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->status = -EINPROGRESS; + spin_lock_irqsave(&ss->lock, flags); + list_add_tail(&m->queue, &ss->queue); + queue_work(ss->workqueue, &ss->work); + spin_unlock_irqrestore(&ss->lock, flags); + return 0; +} + +static irqreturn_t stmp_spi_irq(int irq, void *dev_id) +{ + struct stmp_spi *ss = dev_id; + + stmp3xxx_dma_clear_interrupt(ss->dma); + complete(&ss->done); + return IRQ_HANDLED; +} + +static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id) +{ + struct stmp_spi *ss = dev_id; + u32 c1, st; + + c1 = readl(ss->regs + HW_SSP_CTRL1); + st = readl(ss->regs + HW_SSP_STATUS); + dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n", + __func__, st, c1); + stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1); + + return IRQ_HANDLED; +} + +static int __devinit stmp_spi_probe(struct platform_device *dev) +{ + int err = 0; + struct spi_master *master; + struct stmp_spi *ss; + struct resource *r; + + master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi)); + if (master == NULL) { + err = -ENOMEM; + goto out0; + } + master->flags = SPI_MASTER_HALF_DUPLEX; + + ss = spi_master_get_devdata(master); + platform_set_drvdata(dev, master); + + /* Get resources(memory, IRQ) associated with the device */ + r = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (r == NULL) { + err = -ENODEV; + goto out_put_master; + } + ss->regs = ioremap(r->start, resource_size(r)); + if (!ss->regs) { + err = -EINVAL; + goto out_put_master; + } + + ss->master_dev = &dev->dev; + ss->id = dev->id; + + INIT_WORK(&ss->work, stmp_spi_handle); + INIT_LIST_HEAD(&ss->queue); + spin_lock_init(&ss->lock); + + ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev)); + if (!ss->workqueue) { + err = -ENXIO; + goto out_put_master; + } + master->transfer = stmp_spi_transfer; + master->setup = stmp_spi_setup; + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA; + + ss->irq = platform_get_irq(dev, 0); + if (ss->irq < 0) { + err = ss->irq; + goto out_put_master; + } + ss->err_irq = platform_get_irq(dev, 1); + if (ss->err_irq < 0) { + err = ss->err_irq; + goto out_put_master; + } + + r = platform_get_resource(dev, IORESOURCE_DMA, 0); + if (r == NULL) { + err = -ENODEV; + goto out_put_master; + } + + ss->dma = r->start; + err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev)); + if (err) + goto out_put_master; + + err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d); + if (err) + goto out_free_dma; + + master->bus_num = dev->id; + master->num_chipselect = 1; + + /* SPI controller initializations */ + err = stmp_spi_init_hw(ss); + if (err) { + dev_dbg(&dev->dev, "cannot initialize hardware\n"); + goto out_free_dma_desc; + } + + if (clock) { + dev_info(&dev->dev, "clock rate forced to %d\n", clock); + clk_set_rate(ss->clk, clock); + } + ss->speed_khz = clk_get_rate(ss->clk); + ss->divider = 2; + dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n", + ss->speed_khz, clk_get_rate(ss->clk), ss->divider); + + /* Register for SPI interrupt */ + err = request_irq(ss->irq, stmp_spi_irq, 0, + dev_name(&dev->dev), ss); + if (err) { + dev_dbg(&dev->dev, "request_irq failed, %d\n", err); + goto out_release_hw; + } + + /* ..and shared interrupt for all SSP controllers */ + err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED, + dev_name(&dev->dev), ss); + if (err) { + dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err); + goto out_free_irq; + } + + err = spi_register_master(master); + if (err) { + dev_dbg(&dev->dev, "cannot register spi master, %d\n", err); + goto out_free_irq_2; + } + dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n", + (u32)ss->regs, ss->irq, master->bus_num, + pio ? "PIO" : "DMA"); + return 0; + +out_free_irq_2: + free_irq(ss->err_irq, ss); +out_free_irq: + free_irq(ss->irq, ss); +out_free_dma_desc: + stmp3xxx_dma_free_command(ss->dma, &ss->d); +out_free_dma: + stmp3xxx_dma_release(ss->dma); +out_release_hw: + stmp_spi_release_hw(ss); +out_put_master: + if (ss->workqueue) + destroy_workqueue(ss->workqueue); + if (ss->regs) + iounmap(ss->regs); + platform_set_drvdata(dev, NULL); + spi_master_put(master); +out0: + return err; +} + +static int __devexit stmp_spi_remove(struct platform_device *dev) +{ + struct stmp_spi *ss; + struct spi_master *master; + + master = platform_get_drvdata(dev); + if (master == NULL) + goto out0; + ss = spi_master_get_devdata(master); + + spi_unregister_master(master); + + free_irq(ss->err_irq, ss); + free_irq(ss->irq, ss); + stmp3xxx_dma_free_command(ss->dma, &ss->d); + stmp3xxx_dma_release(ss->dma); + stmp_spi_release_hw(ss); + destroy_workqueue(ss->workqueue); + iounmap(ss->regs); + spi_master_put(master); + platform_set_drvdata(dev, NULL); +out0: + return 0; +} + +#ifdef CONFIG_PM +static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg) +{ + struct stmp_spi *ss; + struct spi_master *master; + + master = platform_get_drvdata(pdev); + ss = spi_master_get_devdata(master); + + ss->saved_timings = readl(HW_SSP_TIMING + ss->regs); + clk_disable(ss->clk); + + return 0; +} + +static int stmp_spi_resume(struct platform_device *pdev) +{ + struct stmp_spi *ss; + struct spi_master *master; + + master = platform_get_drvdata(pdev); + ss = spi_master_get_devdata(master); + + clk_enable(ss->clk); + stmp3xxx_reset_block(ss->regs, false); + writel(ss->saved_timings, ss->regs + HW_SSP_TIMING); + + return 0; +} + +#else +#define stmp_spi_suspend NULL +#define stmp_spi_resume NULL +#endif + +static struct platform_driver stmp_spi_driver = { + .probe = stmp_spi_probe, + .remove = __devexit_p(stmp_spi_remove), + .driver = { + .name = "stmp3xxx_ssp", + .owner = THIS_MODULE, + }, + .suspend = stmp_spi_suspend, + .resume = stmp_spi_resume, +}; + +static int __init stmp_spi_init(void) +{ + return platform_driver_register(&stmp_spi_driver); +} + +static void __exit stmp_spi_exit(void) +{ + platform_driver_unregister(&stmp_spi_driver); +} + +module_init(stmp_spi_init); +module_exit(stmp_spi_exit); +module_param(pio, int, S_IRUGO); +module_param(clock, int, S_IRUGO); +MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>"); +MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c new file mode 100644 index 00000000000..bb7df02a547 --- /dev/null +++ b/drivers/spi/spi_tegra.c @@ -0,0 +1,618 @@ +/* + * Driver for Nvidia TEGRA spi controller. + * + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Erik Gilling <konkers@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/delay.h> + +#include <linux/spi/spi.h> + +#include <mach/dma.h> + +#define SLINK_COMMAND 0x000 +#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) +#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) +#define SLINK_BOTH_EN (1 << 10) +#define SLINK_CS_SW (1 << 11) +#define SLINK_CS_VALUE (1 << 12) +#define SLINK_CS_POLARITY (1 << 13) +#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16) +#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16) +#define SLINK_IDLE_SDA_PULL_LOW (2 << 16) +#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16) +#define SLINK_IDLE_SDA_MASK (3 << 16) +#define SLINK_CS_POLARITY1 (1 << 20) +#define SLINK_CK_SDA (1 << 21) +#define SLINK_CS_POLARITY2 (1 << 22) +#define SLINK_CS_POLARITY3 (1 << 23) +#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24) +#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24) +#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24) +#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24) +#define SLINK_IDLE_SCLK_MASK (3 << 24) +#define SLINK_M_S (1 << 28) +#define SLINK_WAIT (1 << 29) +#define SLINK_GO (1 << 30) +#define SLINK_ENB (1 << 31) + +#define SLINK_COMMAND2 0x004 +#define SLINK_LSBFE (1 << 0) +#define SLINK_SSOE (1 << 1) +#define SLINK_SPIE (1 << 4) +#define SLINK_BIDIROE (1 << 6) +#define SLINK_MODFEN (1 << 7) +#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8) +#define SLINK_CS_ACTIVE_BETWEEN (1 << 17) +#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18) +#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20) +#define SLINK_FIFO_REFILLS_0 (0 << 22) +#define SLINK_FIFO_REFILLS_1 (1 << 22) +#define SLINK_FIFO_REFILLS_2 (2 << 22) +#define SLINK_FIFO_REFILLS_3 (3 << 22) +#define SLINK_FIFO_REFILLS_MASK (3 << 22) +#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26) +#define SLINK_SPC0 (1 << 29) +#define SLINK_TXEN (1 << 30) +#define SLINK_RXEN (1 << 31) + +#define SLINK_STATUS 0x008 +#define SLINK_COUNT(val) (((val) >> 0) & 0x1f) +#define SLINK_WORD(val) (((val) >> 5) & 0x1f) +#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff) +#define SLINK_MODF (1 << 16) +#define SLINK_RX_UNF (1 << 18) +#define SLINK_TX_OVF (1 << 19) +#define SLINK_TX_FULL (1 << 20) +#define SLINK_TX_EMPTY (1 << 21) +#define SLINK_RX_FULL (1 << 22) +#define SLINK_RX_EMPTY (1 << 23) +#define SLINK_TX_UNF (1 << 24) +#define SLINK_RX_OVF (1 << 25) +#define SLINK_TX_FLUSH (1 << 26) +#define SLINK_RX_FLUSH (1 << 27) +#define SLINK_SCLK (1 << 28) +#define SLINK_ERR (1 << 29) +#define SLINK_RDY (1 << 30) +#define SLINK_BSY (1 << 31) + +#define SLINK_MAS_DATA 0x010 +#define SLINK_SLAVE_DATA 0x014 + +#define SLINK_DMA_CTL 0x018 +#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0) +#define SLINK_TX_TRIG_1 (0 << 16) +#define SLINK_TX_TRIG_4 (1 << 16) +#define SLINK_TX_TRIG_8 (2 << 16) +#define SLINK_TX_TRIG_16 (3 << 16) +#define SLINK_TX_TRIG_MASK (3 << 16) +#define SLINK_RX_TRIG_1 (0 << 18) +#define SLINK_RX_TRIG_4 (1 << 18) +#define SLINK_RX_TRIG_8 (2 << 18) +#define SLINK_RX_TRIG_16 (3 << 18) +#define SLINK_RX_TRIG_MASK (3 << 18) +#define SLINK_PACKED (1 << 20) +#define SLINK_PACK_SIZE_4 (0 << 21) +#define SLINK_PACK_SIZE_8 (1 << 21) +#define SLINK_PACK_SIZE_16 (2 << 21) +#define SLINK_PACK_SIZE_32 (3 << 21) +#define SLINK_PACK_SIZE_MASK (3 << 21) +#define SLINK_IE_TXC (1 << 26) +#define SLINK_IE_RXC (1 << 27) +#define SLINK_DMA_EN (1 << 31) + +#define SLINK_STATUS2 0x01c +#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0) +#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16) + +#define SLINK_TX_FIFO 0x100 +#define SLINK_RX_FIFO 0x180 + +static const unsigned long spi_tegra_req_sels[] = { + TEGRA_DMA_REQ_SEL_SL2B1, + TEGRA_DMA_REQ_SEL_SL2B2, + TEGRA_DMA_REQ_SEL_SL2B3, + TEGRA_DMA_REQ_SEL_SL2B4, +}; + +#define BB_LEN 32 + +struct spi_tegra_data { + struct spi_master *master; + struct platform_device *pdev; + spinlock_t lock; + + struct clk *clk; + void __iomem *base; + unsigned long phys; + + u32 cur_speed; + + struct list_head queue; + struct spi_transfer *cur; + unsigned cur_pos; + unsigned cur_len; + unsigned cur_bytes_per_word; + + /* The tegra spi controller has a bug which causes the first word + * in PIO transactions to be garbage. Since packed DMA transactions + * require transfers to be 4 byte aligned we need a bounce buffer + * for the generic case. + */ + struct tegra_dma_req rx_dma_req; + struct tegra_dma_channel *rx_dma; + u32 *rx_bb; + dma_addr_t rx_bb_phys; +}; + + +static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi, + unsigned long reg) +{ + return readl(tspi->base + reg); +} + +static inline void spi_tegra_writel(struct spi_tegra_data *tspi, + unsigned long val, + unsigned long reg) +{ + writel(val, tspi->base + reg); +} + +static void spi_tegra_go(struct spi_tegra_data *tspi) +{ + unsigned long val; + + wmb(); + + val = spi_tegra_readl(tspi, SLINK_DMA_CTL); + val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN; + val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1); + spi_tegra_writel(tspi, val, SLINK_DMA_CTL); + + tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req); + + val |= SLINK_DMA_EN; + spi_tegra_writel(tspi, val, SLINK_DMA_CTL); +} + +static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi, + struct spi_transfer *t) +{ + unsigned len = min(t->len - tspi->cur_pos, BB_LEN * + tspi->cur_bytes_per_word); + u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos; + int i, j; + unsigned long val; + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + val &= ~SLINK_WORD_SIZE(~0); + val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1); + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + for (i = 0; i < len; i += tspi->cur_bytes_per_word) { + val = 0; + for (j = 0; j < tspi->cur_bytes_per_word; j++) + val |= tx_buf[i + j] << j * 8; + + spi_tegra_writel(tspi, val, SLINK_TX_FIFO); + } + + tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4; + + return len; +} + +static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi, + struct spi_transfer *t) +{ + unsigned len = tspi->cur_len; + u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos; + int i, j; + unsigned long val; + + for (i = 0; i < len; i += tspi->cur_bytes_per_word) { + val = tspi->rx_bb[i / tspi->cur_bytes_per_word]; + for (j = 0; j < tspi->cur_bytes_per_word; j++) + rx_buf[i + j] = (val >> (j * 8)) & 0xff; + } + + return len; +} + +static void spi_tegra_start_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + u32 speed; + u8 bits_per_word; + unsigned long val; + + speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; + bits_per_word = t->bits_per_word ? t->bits_per_word : + spi->bits_per_word; + + tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1; + + if (speed != tspi->cur_speed) + clk_set_rate(tspi->clk, speed); + + if (tspi->cur_speed == 0) + clk_enable(tspi->clk); + + tspi->cur_speed = speed; + + val = spi_tegra_readl(tspi, SLINK_COMMAND2); + val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN; + if (t->rx_buf) + val |= SLINK_RXEN; + if (t->tx_buf) + val |= SLINK_TXEN; + val |= SLINK_SS_EN_CS(spi->chip_select); + val |= SLINK_SPIE; + spi_tegra_writel(tspi, val, SLINK_COMMAND2); + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + val &= ~SLINK_BIT_LENGTH(~0); + val |= SLINK_BIT_LENGTH(bits_per_word - 1); + + /* FIXME: should probably control CS manually so that we can be sure + * it does not go low between transfer and to support delay_usecs + * correctly. + */ + val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW; + + if (spi->mode & SPI_CPHA) + val |= SLINK_CK_SDA; + + if (spi->mode & SPI_CPOL) + val |= SLINK_IDLE_SCLK_DRIVE_HIGH; + else + val |= SLINK_IDLE_SCLK_DRIVE_LOW; + + val |= SLINK_M_S; + + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS); + + tspi->cur = t; + tspi->cur_pos = 0; + tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t); + + spi_tegra_go(tspi); +} + +static void spi_tegra_start_message(struct spi_device *spi, + struct spi_message *m) +{ + struct spi_transfer *t; + + m->actual_length = 0; + m->status = 0; + + t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); + spi_tegra_start_transfer(spi, t); +} + +static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req) +{ + struct spi_tegra_data *tspi = req->dev; + unsigned long flags; + struct spi_message *m; + struct spi_device *spi; + int timeout = 0; + unsigned long val; + + /* the SPI controller may come back with both the BSY and RDY bits + * set. In this case we need to wait for the BSY bit to clear so + * that we are sure the DMA is finished. 1000 reads was empirically + * determined to be long enough. + */ + while (timeout++ < 1000) { + if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY)) + break; + } + + spin_lock_irqsave(&tspi->lock, flags); + + val = spi_tegra_readl(tspi, SLINK_STATUS); + val |= SLINK_RDY; + spi_tegra_writel(tspi, val, SLINK_STATUS); + + m = list_first_entry(&tspi->queue, struct spi_message, queue); + + if (timeout >= 1000) + m->status = -EIO; + + spi = m->state; + + tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur); + m->actual_length += tspi->cur_pos; + + if (tspi->cur_pos < tspi->cur->len) { + tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur); + spi_tegra_go(tspi); + } else if (!list_is_last(&tspi->cur->transfer_list, + &m->transfers)) { + tspi->cur = list_first_entry(&tspi->cur->transfer_list, + struct spi_transfer, + transfer_list); + spi_tegra_start_transfer(spi, tspi->cur); + } else { + list_del(&m->queue); + + m->complete(m->context); + + if (!list_empty(&tspi->queue)) { + m = list_first_entry(&tspi->queue, struct spi_message, + queue); + spi = m->state; + spi_tegra_start_message(spi, m); + } else { + clk_disable(tspi->clk); + tspi->cur_speed = 0; + } + } + + spin_unlock_irqrestore(&tspi->lock, flags); +} + +static int spi_tegra_setup(struct spi_device *spi) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + unsigned long cs_bit; + unsigned long val; + unsigned long flags; + + dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", + spi->bits_per_word, + spi->mode & SPI_CPOL ? "" : "~", + spi->mode & SPI_CPHA ? "" : "~", + spi->max_speed_hz); + + + switch (spi->chip_select) { + case 0: + cs_bit = SLINK_CS_POLARITY; + break; + + case 1: + cs_bit = SLINK_CS_POLARITY1; + break; + + case 2: + cs_bit = SLINK_CS_POLARITY2; + break; + + case 4: + cs_bit = SLINK_CS_POLARITY3; + break; + + default: + return -EINVAL; + } + + spin_lock_irqsave(&tspi->lock, flags); + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + if (spi->mode & SPI_CS_HIGH) + val |= cs_bit; + else + val &= ~cs_bit; + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + spin_unlock_irqrestore(&tspi->lock, flags); + + return 0; +} + +static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + struct spi_transfer *t; + unsigned long flags; + int was_empty; + + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word < 0 || t->bits_per_word > 32) + return -EINVAL; + + if (t->len == 0) + return -EINVAL; + + if (!t->rx_buf && !t->tx_buf) + return -EINVAL; + } + + m->state = spi; + + spin_lock_irqsave(&tspi->lock, flags); + was_empty = list_empty(&tspi->queue); + list_add_tail(&m->queue, &tspi->queue); + + if (was_empty) + spi_tegra_start_message(spi, m); + + spin_unlock_irqrestore(&tspi->lock, flags); + + return 0; +} + +static int __init spi_tegra_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct spi_tegra_data *tspi; + struct resource *r; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof *tspi); + if (master == NULL) { + dev_err(&pdev->dev, "master allocation failed\n"); + return -ENOMEM; + } + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + master->bus_num = pdev->id; + + master->setup = spi_tegra_setup; + master->transfer = spi_tegra_transfer; + master->num_chipselect = 4; + + dev_set_drvdata(&pdev->dev, master); + tspi = spi_master_get_devdata(master); + tspi->master = master; + tspi->pdev = pdev; + spin_lock_init(&tspi->lock); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENODEV; + goto err0; + } + + if (!request_mem_region(r->start, (r->end - r->start) + 1, + dev_name(&pdev->dev))) { + ret = -EBUSY; + goto err0; + } + + tspi->phys = r->start; + tspi->base = ioremap(r->start, r->end - r->start + 1); + if (!tspi->base) { + dev_err(&pdev->dev, "can't ioremap iomem\n"); + ret = -ENOMEM; + goto err1; + } + + tspi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR_OR_NULL(tspi->clk)) { + dev_err(&pdev->dev, "can not get clock\n"); + ret = PTR_ERR(tspi->clk); + goto err2; + } + + INIT_LIST_HEAD(&tspi->queue); + + tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); + if (!tspi->rx_dma) { + dev_err(&pdev->dev, "can not allocate rx dma channel\n"); + ret = -ENODEV; + goto err3; + } + + tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + &tspi->rx_bb_phys, GFP_KERNEL); + if (!tspi->rx_bb) { + dev_err(&pdev->dev, "can not allocate rx bounce buffer\n"); + ret = -ENOMEM; + goto err4; + } + + tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete; + tspi->rx_dma_req.to_memory = 1; + tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys; + tspi->rx_dma_req.dest_bus_width = 32; + tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO; + tspi->rx_dma_req.source_bus_width = 32; + tspi->rx_dma_req.source_wrap = 4; + tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; + tspi->rx_dma_req.dev = tspi; + + ret = spi_register_master(master); + + if (ret < 0) + goto err5; + + return ret; + +err5: + dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + tspi->rx_bb, tspi->rx_bb_phys); +err4: + tegra_dma_free_channel(tspi->rx_dma); +err3: + clk_put(tspi->clk); +err2: + iounmap(tspi->base); +err1: + release_mem_region(r->start, (r->end - r->start) + 1); +err0: + spi_master_put(master); + return ret; +} + +static int __devexit spi_tegra_remove(struct platform_device *pdev) +{ + struct spi_master *master; + struct spi_tegra_data *tspi; + struct resource *r; + + master = dev_get_drvdata(&pdev->dev); + tspi = spi_master_get_devdata(master); + + tegra_dma_free_channel(tspi->rx_dma); + + dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + tspi->rx_bb, tspi->rx_bb_phys); + + clk_put(tspi->clk); + iounmap(tspi->base); + + spi_master_put(master); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(r->start, (r->end - r->start) + 1); + + return 0; +} + +MODULE_ALIAS("platform:spi_tegra"); + +static struct platform_driver spi_tegra_driver = { + .driver = { + .name = "spi_tegra", + .owner = THIS_MODULE, + }, + .remove = __devexit_p(spi_tegra_remove), +}; + +static int __init spi_tegra_init(void) +{ + return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe); +} +module_init(spi_tegra_init); + +static void __exit spi_tegra_exit(void) +{ + platform_driver_unregister(&spi_tegra_driver); +} +module_exit(spi_tegra_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c new file mode 100644 index 00000000000..79e48d45113 --- /dev/null +++ b/drivers/spi/spi_topcliff_pch.c @@ -0,0 +1,1303 @@ +/* + * SPI bus driver for the Topcliff PCH used by Intel SoCs + * + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/wait.h> +#include <linux/spi/spi.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/spi/spidev.h> +#include <linux/module.h> +#include <linux/device.h> + +/* Register offsets */ +#define PCH_SPCR 0x00 /* SPI control register */ +#define PCH_SPBRR 0x04 /* SPI baud rate register */ +#define PCH_SPSR 0x08 /* SPI status register */ +#define PCH_SPDWR 0x0C /* SPI write data register */ +#define PCH_SPDRR 0x10 /* SPI read data register */ +#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ +#define PCH_SRST 0x1C /* SPI reset register */ + +#define PCH_SPSR_TFD 0x000007C0 +#define PCH_SPSR_RFD 0x0000F800 + +#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11) +#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6) + +#define PCH_RX_THOLD 7 +#define PCH_RX_THOLD_MAX 15 + +#define PCH_MAX_BAUDRATE 5000000 +#define PCH_MAX_FIFO_DEPTH 16 + +#define STATUS_RUNNING 1 +#define STATUS_EXITING 2 +#define PCH_SLEEP_TIME 10 + +#define PCH_ADDRESS_SIZE 0x20 + +#define SSN_LOW 0x02U +#define SSN_NO_CONTROL 0x00U +#define PCH_MAX_CS 0xFF +#define PCI_DEVICE_ID_GE_SPI 0x8816 + +#define SPCR_SPE_BIT (1 << 0) +#define SPCR_MSTR_BIT (1 << 1) +#define SPCR_LSBF_BIT (1 << 4) +#define SPCR_CPHA_BIT (1 << 5) +#define SPCR_CPOL_BIT (1 << 6) +#define SPCR_TFIE_BIT (1 << 8) +#define SPCR_RFIE_BIT (1 << 9) +#define SPCR_FIE_BIT (1 << 10) +#define SPCR_ORIE_BIT (1 << 11) +#define SPCR_MDFIE_BIT (1 << 12) +#define SPCR_FICLR_BIT (1 << 24) +#define SPSR_TFI_BIT (1 << 0) +#define SPSR_RFI_BIT (1 << 1) +#define SPSR_FI_BIT (1 << 2) +#define SPBRR_SIZE_BIT (1 << 10) + +#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) + +#define SPCR_RFIC_FIELD 20 +#define SPCR_TFIC_FIELD 16 + +#define SPSR_INT_BITS 0x1F +#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) +#define MASK_RFIC_SPCR_BITS (~(0xf << 20)) +#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) + +#define PCH_CLOCK_HZ 50000000 +#define PCH_MAX_SPBR 1023 + + +/** + * struct pch_spi_data - Holds the SPI channel specific details + * @io_remap_addr: The remapped PCI base address + * @master: Pointer to the SPI master structure + * @work: Reference to work queue handler + * @wk: Workqueue for carrying out execution of the + * requests + * @wait: Wait queue for waking up upon receiving an + * interrupt. + * @transfer_complete: Status of SPI Transfer + * @bcurrent_msg_processing: Status flag for message processing + * @lock: Lock for protecting this structure + * @queue: SPI Message queue + * @status: Status of the SPI driver + * @bpw_len: Length of data to be transferred in bits per + * word + * @transfer_active: Flag showing active transfer + * @tx_index: Transmit data count; for bookkeeping during + * transfer + * @rx_index: Receive data count; for bookkeeping during + * transfer + * @tx_buff: Buffer for data to be transmitted + * @rx_index: Buffer for Received data + * @n_curnt_chip: The chip number that this SPI driver currently + * operates on + * @current_chip: Reference to the current chip that this SPI + * driver currently operates on + * @current_msg: The current message that this SPI driver is + * handling + * @cur_trans: The current transfer that this SPI driver is + * handling + * @board_dat: Reference to the SPI device data structure + */ +struct pch_spi_data { + void __iomem *io_remap_addr; + struct spi_master *master; + struct work_struct work; + struct workqueue_struct *wk; + wait_queue_head_t wait; + u8 transfer_complete; + u8 bcurrent_msg_processing; + spinlock_t lock; + struct list_head queue; + u8 status; + u32 bpw_len; + u8 transfer_active; + u32 tx_index; + u32 rx_index; + u16 *pkt_tx_buff; + u16 *pkt_rx_buff; + u8 n_curnt_chip; + struct spi_device *current_chip; + struct spi_message *current_msg; + struct spi_transfer *cur_trans; + struct pch_spi_board_data *board_dat; +}; + +/** + * struct pch_spi_board_data - Holds the SPI device specific details + * @pdev: Pointer to the PCI device + * @irq_reg_sts: Status of IRQ registration + * @pci_req_sts: Status of pci_request_regions + * @suspend_sts: Status of suspend + * @data: Pointer to SPI channel data structure + */ +struct pch_spi_board_data { + struct pci_dev *pdev; + u8 irq_reg_sts; + u8 pci_req_sts; + u8 suspend_sts; + struct pch_spi_data *data; +}; + +static struct pci_device_id pch_spi_pcidev_id[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, + {0,} +}; + +/** + * pch_spi_writereg() - Performs register writes + * @master: Pointer to struct spi_master. + * @idx: Register offset. + * @val: Value to be written to register. + */ +static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + iowrite32(val, (data->io_remap_addr + idx)); +} + +/** + * pch_spi_readreg() - Performs register reads + * @master: Pointer to struct spi_master. + * @idx: Register offset. + */ +static inline u32 pch_spi_readreg(struct spi_master *master, int idx) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + return ioread32(data->io_remap_addr + idx); +} + +static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, + u32 set, u32 clr) +{ + u32 tmp = pch_spi_readreg(master, idx); + tmp = (tmp & ~clr) | set; + pch_spi_writereg(master, idx, tmp); +} + +static void pch_spi_set_master_mode(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); +} + +/** + * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs + * @master: Pointer to struct spi_master. + */ +static void pch_spi_clear_fifo(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); + pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); +} + +static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, + void __iomem *io_remap_addr) +{ + u32 n_read, tx_index, rx_index, bpw_len; + u16 *pkt_rx_buffer, *pkt_tx_buff; + int read_cnt; + u32 reg_spcr_val; + void __iomem *spsr; + void __iomem *spdrr; + void __iomem *spdwr; + + spsr = io_remap_addr + PCH_SPSR; + iowrite32(reg_spsr_val, spsr); + + if (data->transfer_active) { + rx_index = data->rx_index; + tx_index = data->tx_index; + bpw_len = data->bpw_len; + pkt_rx_buffer = data->pkt_rx_buff; + pkt_tx_buff = data->pkt_tx_buff; + + spdrr = io_remap_addr + PCH_SPDRR; + spdwr = io_remap_addr + PCH_SPDWR; + + n_read = PCH_READABLE(reg_spsr_val); + + for (read_cnt = 0; (read_cnt < n_read); read_cnt++) { + pkt_rx_buffer[rx_index++] = ioread32(spdrr); + if (tx_index < bpw_len) + iowrite32(pkt_tx_buff[tx_index++], spdwr); + } + + /* disable RFI if not needed */ + if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) { + reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR); + reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ + + /* reset rx threshold */ + reg_spcr_val &= MASK_RFIC_SPCR_BITS; + reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); + iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), + (io_remap_addr + PCH_SPCR)); + } + + /* update counts */ + data->tx_index = tx_index; + data->rx_index = rx_index; + + } + + /* if transfer complete interrupt */ + if (reg_spsr_val & SPSR_FI_BIT) { + /* disable FI & RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, + SPCR_FIE_BIT | SPCR_RFIE_BIT); + + /* transfer is completed;inform pch_spi_process_messages */ + data->transfer_complete = true; + wake_up(&data->wait); + } +} + +/** + * pch_spi_handler() - Interrupt handler + * @irq: The interrupt number. + * @dev_id: Pointer to struct pch_spi_board_data. + */ +static irqreturn_t pch_spi_handler(int irq, void *dev_id) +{ + u32 reg_spsr_val; + struct pch_spi_data *data; + void __iomem *spsr; + void __iomem *io_remap_addr; + irqreturn_t ret = IRQ_NONE; + struct pch_spi_board_data *board_dat = dev_id; + + if (board_dat->suspend_sts) { + dev_dbg(&board_dat->pdev->dev, + "%s returning due to suspend\n", __func__); + return IRQ_NONE; + } + + data = board_dat->data; + io_remap_addr = data->io_remap_addr; + spsr = io_remap_addr + PCH_SPSR; + + reg_spsr_val = ioread32(spsr); + + /* Check if the interrupt is for SPI device */ + if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { + pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); + ret = IRQ_HANDLED; + } + + dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n", + __func__, ret); + + return ret; +} + +/** + * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR + * @master: Pointer to struct spi_master. + * @speed_hz: Baud rate. + */ +static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) +{ + u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); + + /* if baud rate is less than we can support limit it */ + if (n_spbr > PCH_MAX_SPBR) + n_spbr = PCH_MAX_SPBR; + + pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); +} + +/** + * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR + * @master: Pointer to struct spi_master. + * @bits_per_word: Bits per word for SPI transfer. + */ +static void pch_spi_set_bits_per_word(struct spi_master *master, + u8 bits_per_word) +{ + if (bits_per_word == 8) + pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); + else + pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); +} + +/** + * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer + * @spi: Pointer to struct spi_device. + */ +static void pch_spi_setup_transfer(struct spi_device *spi) +{ + u32 flags = 0; + + dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", + __func__, pch_spi_readreg(spi->master, PCH_SPBRR), + spi->max_speed_hz); + pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); + + /* set bits per word */ + pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); + + if (!(spi->mode & SPI_LSB_FIRST)) + flags |= SPCR_LSBF_BIT; + if (spi->mode & SPI_CPOL) + flags |= SPCR_CPOL_BIT; + if (spi->mode & SPI_CPHA) + flags |= SPCR_CPHA_BIT; + pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, + (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); + + /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ + pch_spi_clear_fifo(spi->master); +} + +/** + * pch_spi_reset() - Clears SPI registers + * @master: Pointer to struct spi_master. + */ +static void pch_spi_reset(struct spi_master *master) +{ + /* write 1 to reset SPI */ + pch_spi_writereg(master, PCH_SRST, 0x1); + + /* clear reset */ + pch_spi_writereg(master, PCH_SRST, 0x0); +} + +static int pch_spi_setup(struct spi_device *pspi) +{ + /* check bits per word */ + if (pspi->bits_per_word == 0) { + pspi->bits_per_word = 8; + dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__); + } + + if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) { + dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); + return -EINVAL; + } + + /* Check baud rate setting */ + /* if baud rate of chip is greater than + max we can support,return error */ + if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE) + pspi->max_speed_hz = PCH_MAX_BAUDRATE; + + dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__, + (pspi->mode) & (SPI_CPOL | SPI_CPHA)); + + return 0; +} + +static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) +{ + + struct spi_transfer *transfer; + struct pch_spi_data *data = spi_master_get_devdata(pspi->master); + int retval; + unsigned long flags; + + /* validate spi message and baud rate */ + if (unlikely(list_empty(&pmsg->transfers) == 1)) { + dev_err(&pspi->dev, "%s list empty\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (unlikely(pspi->max_speed_hz == 0)) { + dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", + __func__, pspi->max_speed_hz); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Transfer List not empty. " + "Transfer Speed is set.\n", __func__); + + /* validate Tx/Rx buffers and Transfer length */ + list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { + if (!transfer->tx_buf && !transfer->rx_buf) { + dev_err(&pspi->dev, + "%s Tx and Rx buffer NULL\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (!transfer->len) { + dev_err(&pspi->dev, "%s Transfer length invalid\n", + __func__); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" + " valid\n", __func__); + + /* if baud rate hs been specified validate the same */ + if (transfer->speed_hz > PCH_MAX_BAUDRATE) + transfer->speed_hz = PCH_MAX_BAUDRATE; + + /* if bits per word has been specified validate the same */ + if (transfer->bits_per_word) { + if ((transfer->bits_per_word != 8) + && (transfer->bits_per_word != 16)) { + retval = -EINVAL; + dev_err(&pspi->dev, + "%s Invalid bits per word\n", __func__); + goto err_out; + } + } + } + + spin_lock_irqsave(&data->lock, flags); + + /* We won't process any messages if we have been asked to terminate */ + if (data->status == STATUS_EXITING) { + dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); + retval = -ESHUTDOWN; + goto err_return_spinlock; + } + + /* If suspended ,return -EINVAL */ + if (data->board_dat->suspend_sts) { + dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); + retval = -EINVAL; + goto err_return_spinlock; + } + + /* set status of message */ + pmsg->actual_length = 0; + dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); + + pmsg->status = -EINPROGRESS; + + /* add message to queue */ + list_add_tail(&pmsg->queue, &data->queue); + dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); + + /* schedule work queue to run */ + queue_work(data->wk, &data->work); + dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__); + + retval = 0; + +err_return_spinlock: + spin_unlock_irqrestore(&data->lock, flags); +err_out: + dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); + return retval; +} + +static inline void pch_spi_select_chip(struct pch_spi_data *data, + struct spi_device *pspi) +{ + if (data->current_chip != NULL) { + if (pspi->chip_select != data->n_curnt_chip) { + dev_dbg(&pspi->dev, "%s : different slave\n", __func__); + data->current_chip = NULL; + } + } + + data->current_chip = pspi; + + data->n_curnt_chip = data->current_chip->chip_select; + + dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__); + pch_spi_setup_transfer(pspi); +} + +static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, + struct spi_message **ppmsg) +{ + int size; + u32 n_writes; + int j; + struct spi_message *pmsg; + const u8 *tx_buf; + const u16 *tx_sbuf; + + pmsg = *ppmsg; + + /* set baud rate if needed */ + if (data->cur_trans->speed_hz) { + dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); + pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); + } + + /* set bits per word if needed */ + if (data->cur_trans->bits_per_word && + (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { + dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); + pch_spi_set_bits_per_word(data->master, + data->cur_trans->bits_per_word); + *bpw = data->cur_trans->bits_per_word; + } else { + *bpw = data->current_msg->spi->bits_per_word; + } + + /* reset Tx/Rx index */ + data->tx_index = 0; + data->rx_index = 0; + + data->bpw_len = data->cur_trans->len / (*bpw / 8); + + /* find alloc size */ + size = data->cur_trans->len * sizeof(*data->pkt_tx_buff); + + /* allocate memory for pkt_tx_buff & pkt_rx_buffer */ + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); + if (data->pkt_tx_buff != NULL) { + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); + if (!data->pkt_rx_buff) + kfree(data->pkt_tx_buff); + } + + if (!data->pkt_rx_buff) { + /* flush queue and set status of all transfers to -ENOMEM */ + dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -ENOMEM; + + if (pmsg->complete != 0) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + return; + } + + /* copy Tx Data */ + if (data->cur_trans->tx_buf != NULL) { + if (*bpw == 8) { + tx_buf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_buf++; + } else { + tx_sbuf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_sbuf++; + } + } + + /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */ + n_writes = data->bpw_len; + if (n_writes > PCH_MAX_FIFO_DEPTH) + n_writes = PCH_MAX_FIFO_DEPTH; + + dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " + "0x2 to SSNXCR\n", __func__); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); + + for (j = 0; j < n_writes; j++) + pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); + + /* update tx_index */ + data->tx_index = j; + + /* reset transfer complete flag */ + data->transfer_complete = false; + data->transfer_active = true; +} + + +static void pch_spi_nomore_transfer(struct pch_spi_data *data, + struct spi_message *pmsg) +{ + dev_dbg(&data->master->dev, "%s called\n", __func__); + /* Invoke complete callback + * [To the spi core..indicating end of transfer] */ + data->current_msg->status = 0; + + if (data->current_msg->complete != 0) { + dev_dbg(&data->master->dev, + "%s:Invoking callback of SPI core\n", __func__); + data->current_msg->complete(data->current_msg->context); + } + + /* update status in global variable */ + data->bcurrent_msg_processing = false; + + dev_dbg(&data->master->dev, + "%s:data->bcurrent_msg_processing = false\n", __func__); + + data->current_msg = NULL; + data->cur_trans = NULL; + + /* check if we have items in list and not suspending + * return 1 if list empty */ + if ((list_empty(&data->queue) == 0) && + (!data->board_dat->suspend_sts) && + (data->status != STATUS_EXITING)) { + /* We have some more work to do (either there is more tranint + * bpw;sfer requests in the current message or there are + *more messages) + */ + dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); + queue_work(data->wk, &data->work); + } else if (data->board_dat->suspend_sts || + data->status == STATUS_EXITING) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated, flushing queue\n", + __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + } +} + +static void pch_spi_set_ir(struct pch_spi_data *data) +{ + /* enable interrupts */ + if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { + /* set receive threshold to PCH_RX_THOLD */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD << SPCR_RFIC_FIELD, + ~MASK_RFIC_SPCR_BITS); + /* enable FI and RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + SPCR_RFIE_BIT | SPCR_FIE_BIT, 0); + } else { + /* set receive threshold to maximum */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, + ~MASK_TFIC_SPCR_BITS); + /* enable FI interrupt */ + pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); + } + + dev_dbg(&data->master->dev, + "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); + + /* SPI set enable */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); + + /* Wait until the transfer completes; go to sleep after + initiating the transfer. */ + dev_dbg(&data->master->dev, + "%s:waiting for transfer to get over\n", __func__); + + wait_event_interruptible(data->wait, data->transfer_complete); + + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); + dev_dbg(&data->master->dev, + "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); + + data->transfer_active = false; + dev_dbg(&data->master->dev, + "%s set data->transfer_active = false\n", __func__); + + /* clear all interrupts */ + pch_spi_writereg(data->master, PCH_SPSR, + pch_spi_readreg(data->master, PCH_SPSR)); + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); +} + +static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) +{ + int j; + u8 *rx_buf; + u16 *rx_sbuf; + + /* copy Rx Data */ + if (!data->cur_trans->rx_buf) + return; + + if (bpw == 8) { + rx_buf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_buf++ = data->pkt_rx_buff[j] & 0xFF; + } else { + rx_sbuf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_sbuf++ = data->pkt_rx_buff[j]; + } +} + + +static void pch_spi_process_messages(struct work_struct *pwork) +{ + struct spi_message *pmsg; + struct pch_spi_data *data; + int bpw; + + data = container_of(pwork, struct pch_spi_data, work); + dev_dbg(&data->master->dev, "%s data initialized\n", __func__); + + spin_lock(&data->lock); + + /* check if suspend has been initiated;if yes flush queue */ + if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated,flushing queue\n", + __func__); + + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete != 0) { + spin_unlock(&data->lock); + pmsg->complete(pmsg->context); + spin_lock(&data->lock); + } + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + + spin_unlock(&data->lock); + return; + } + + data->bcurrent_msg_processing = true; + dev_dbg(&data->master->dev, + "%s Set data->bcurrent_msg_processing= true\n", __func__); + + /* Get the message from the queue and delete it from there. */ + data->current_msg = list_entry(data->queue.next, struct spi_message, + queue); + + list_del_init(&data->current_msg->queue); + + data->current_msg->status = 0; + + pch_spi_select_chip(data, data->current_msg->spi); + + spin_unlock(&data->lock); + + do { + /* If we are already processing a message get the next + transfer structure from the message otherwise retrieve + the 1st transfer request from the message. */ + spin_lock(&data->lock); + + if (data->cur_trans == NULL) { + data->cur_trans = + list_entry(data->current_msg->transfers. + next, struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting 1st transfer message\n", __func__); + } else { + data->cur_trans = + list_entry(data->cur_trans->transfer_list.next, + struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting next transfer message\n", + __func__); + } + + spin_unlock(&data->lock); + + pch_spi_set_tx(data, &bpw, &pmsg); + + /* Control interrupt*/ + pch_spi_set_ir(data); + + /* Disable SPI transfer */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, + SPCR_SPE_BIT); + + /* clear FIFO */ + pch_spi_clear_fifo(data->master); + + /* copy Rx Data */ + pch_spi_copy_rx_data(data, bpw); + + /* free memory */ + kfree(data->pkt_rx_buff); + data->pkt_rx_buff = NULL; + + kfree(data->pkt_tx_buff); + data->pkt_tx_buff = NULL; + + /* increment message count */ + data->current_msg->actual_length += data->cur_trans->len; + + dev_dbg(&data->master->dev, + "%s:data->current_msg->actual_length=%d\n", + __func__, data->current_msg->actual_length); + + /* check for delay */ + if (data->cur_trans->delay_usecs) { + dev_dbg(&data->master->dev, "%s:" + "delay in usec=%d\n", __func__, + data->cur_trans->delay_usecs); + udelay(data->cur_trans->delay_usecs); + } + + spin_lock(&data->lock); + + /* No more transfer in this message. */ + if ((data->cur_trans->transfer_list.next) == + &(data->current_msg->transfers)) { + pch_spi_nomore_transfer(data, pmsg); + } + + spin_unlock(&data->lock); + + } while (data->cur_trans != NULL); +} + +static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) +{ + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* free workqueue */ + if (board_dat->data->wk != NULL) { + destroy_workqueue(board_dat->data->wk); + board_dat->data->wk = NULL; + dev_dbg(&board_dat->pdev->dev, + "%s destroy_workqueue invoked successfully\n", + __func__); + } + + /* disable interrupts & free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + + /* free IRQ */ + free_irq(board_dat->pdev->irq, board_dat); + + dev_dbg(&board_dat->pdev->dev, + "%s free_irq invoked successfully\n", __func__); + + board_dat->irq_reg_sts = false; + } + + /* unmap PCI base address */ + if (board_dat->data->io_remap_addr != 0) { + pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); + + board_dat->data->io_remap_addr = 0; + + dev_dbg(&board_dat->pdev->dev, + "%s pci_iounmap invoked successfully\n", __func__); + } + + /* release PCI region */ + if (board_dat->pci_req_sts) { + pci_release_regions(board_dat->pdev); + dev_dbg(&board_dat->pdev->dev, + "%s pci_release_regions invoked successfully\n", + __func__); + board_dat->pci_req_sts = false; + } +} + +static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) +{ + void __iomem *io_remap_addr; + int retval; + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* create workqueue */ + board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); + if (!board_dat->data->wk) { + dev_err(&board_dat->pdev->dev, + "%s create_singlet hread_workqueue failed\n", __func__); + retval = -EBUSY; + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, + "%s create_singlethread_workqueue success\n", __func__); + + retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_region failed\n", __func__); + goto err_return; + } + + board_dat->pci_req_sts = true; + + io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); + if (io_remap_addr == 0) { + dev_err(&board_dat->pdev->dev, + "%s pci_iomap failed\n", __func__); + retval = -ENOMEM; + goto err_return; + } + + /* calculate base address for all channels */ + board_dat->data->io_remap_addr = io_remap_addr; + + /* reset PCH SPI h/w */ + pch_spi_reset(board_dat->data->master); + dev_dbg(&board_dat->pdev->dev, + "%s pch_spi_reset invoked successfully\n", __func__); + + /* register IRQ */ + retval = request_irq(board_dat->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, board_dat); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_irq failed\n", __func__); + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", + __func__, retval); + + board_dat->irq_reg_sts = true; + dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); + +err_return: + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s FAIL:invoking pch_spi_free_resources\n", __func__); + pch_spi_free_resources(board_dat); + } + + dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + + struct spi_master *master; + + struct pch_spi_board_data *board_dat; + int retval; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + /* allocate memory for private data */ + board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); + if (board_dat == NULL) { + dev_err(&pdev->dev, + " %s memory allocation for private data failed\n", + __func__); + retval = -ENOMEM; + goto err_kmalloc; + } + + dev_dbg(&pdev->dev, + "%s memory allocation for private data success\n", __func__); + + /* enable PCI device */ + retval = pci_enable_device(pdev); + if (retval != 0) { + dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); + + goto err_pci_en_device; + } + + dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", + __func__, retval); + + board_dat->pdev = pdev; + + /* alllocate memory for SPI master */ + master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); + if (master == NULL) { + retval = -ENOMEM; + dev_err(&pdev->dev, "%s Fail.\n", __func__); + goto err_spi_alloc_master; + } + + dev_dbg(&pdev->dev, + "%s spi_alloc_master returned non NULL\n", __func__); + + /* initialize members of SPI master */ + master->bus_num = -1; + master->num_chipselect = PCH_MAX_CS; + master->setup = pch_spi_setup; + master->transfer = pch_spi_transfer; + dev_dbg(&pdev->dev, + "%s transfer member of SPI master initialized\n", __func__); + + board_dat->data = spi_master_get_devdata(master); + + board_dat->data->master = master; + board_dat->data->n_curnt_chip = 255; + board_dat->data->board_dat = board_dat; + board_dat->data->status = STATUS_RUNNING; + + INIT_LIST_HEAD(&board_dat->data->queue); + spin_lock_init(&board_dat->data->lock); + INIT_WORK(&board_dat->data->work, pch_spi_process_messages); + init_waitqueue_head(&board_dat->data->wait); + + /* allocate resources for PCH SPI */ + retval = pch_spi_get_resources(board_dat); + if (retval) { + dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); + goto err_spi_get_resources; + } + + dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", + __func__, retval); + + /* save private data in dev */ + pci_set_drvdata(pdev, board_dat); + dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); + + /* set master mode */ + pch_spi_set_master_mode(master); + dev_dbg(&pdev->dev, + "%s invoked pch_spi_set_master_mode\n", __func__); + + /* Register the controller with the SPI core. */ + retval = spi_register_master(master); + if (retval != 0) { + dev_err(&pdev->dev, + "%s spi_register_master FAILED\n", __func__); + goto err_spi_reg_master; + } + + dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", + __func__, retval); + + + return 0; + +err_spi_reg_master: + spi_unregister_master(master); +err_spi_get_resources: +err_spi_alloc_master: + spi_master_put(master); + pci_disable_device(pdev); +err_pci_en_device: + kfree(board_dat); +err_kmalloc: + return retval; +} + +static void pch_spi_remove(struct pci_dev *pdev) +{ + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + int count; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return; + } + + /* check for any pending messages; no action is taken if the queue + * is still full; but at least we tried. Unload anyway */ + count = 500; + spin_lock(&board_dat->data->lock); + board_dat->data->status = STATUS_EXITING; + while ((list_empty(&board_dat->data->queue) == 0) && --count) { + dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", + __func__); + spin_unlock(&board_dat->data->lock); + msleep(PCH_SLEEP_TIME); + spin_lock(&board_dat->data->lock); + } + spin_unlock(&board_dat->data->lock); + + /* Free resources allocated for PCH SPI */ + pch_spi_free_resources(board_dat); + + spi_unregister_master(board_dat->data->master); + + /* free memory for private data */ + kfree(board_dat); + + pci_set_drvdata(pdev, NULL); + + /* disable PCI device */ + pci_disable_device(pdev); + + dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); +} + +#ifdef CONFIG_PM +static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + u8 count; + int retval; + + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + retval = 0; + board_dat->suspend_sts = true; + + /* check if the current message is processed: + Only after thats done the transfer will be suspended */ + count = 255; + while ((--count) > 0) { + if (!(board_dat->data->bcurrent_msg_processing)) { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" + "msg_processing = false\n", __func__); + break; + } else { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" + "processing = true\n", __func__); + } + msleep(PCH_SLEEP_TIME); + } + + /* Free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable all interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + pch_spi_reset(board_dat->data->master); + + free_irq(board_dat->pdev->irq, board_dat); + + board_dat->irq_reg_sts = false; + dev_dbg(&pdev->dev, + "%s free_irq invoked successfully.\n", __func__); + } + + /* save config space */ + retval = pci_save_state(pdev); + + if (retval == 0) { + dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", + __func__, retval); + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + dev_dbg(&pdev->dev, + "%s pci_enable_wake invoked successfully\n", __func__); + /* disable PCI device */ + pci_disable_device(pdev); + dev_dbg(&pdev->dev, + "%s pci_disable_device invoked successfully\n", + __func__); + /* move device to D3hot state */ + pci_set_power_state(pdev, PCI_D3hot); + dev_dbg(&pdev->dev, + "%s pci_set_power_state invoked successfully\n", + __func__); + } else { + dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); + } + + dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_resume(struct pci_dev *pdev) +{ + int retval; + + struct pch_spi_board_data *board = pci_get_drvdata(pdev); + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + /* move device to DO power state */ + pci_set_power_state(pdev, PCI_D0); + + /* restore state */ + pci_restore_state(pdev); + + retval = pci_enable_device(pdev); + if (retval < 0) { + dev_err(&pdev->dev, + "%s pci_enable_device failed\n", __func__); + } else { + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + + /* register IRQ handler */ + if (!board->irq_reg_sts) { + /* register IRQ */ + retval = request_irq(board->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, + board); + if (retval < 0) { + dev_err(&pdev->dev, + "%s request_irq failed\n", __func__); + return retval; + } + board->irq_reg_sts = true; + + /* reset PCH SPI h/w */ + pch_spi_reset(board->data->master); + pch_spi_set_master_mode(board->data->master); + + /* set suspend status to false */ + board->suspend_sts = false; + + } + } + + dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); + + return retval; +} +#else +#define pch_spi_suspend NULL +#define pch_spi_resume NULL + +#endif + +static struct pci_driver pch_spi_pcidev = { + .name = "pch_spi", + .id_table = pch_spi_pcidev_id, + .probe = pch_spi_probe, + .remove = pch_spi_remove, + .suspend = pch_spi_suspend, + .resume = pch_spi_resume, +}; + +static int __init pch_spi_init(void) +{ + return pci_register_driver(&pch_spi_pcidev); +} +module_init(pch_spi_init); + +static void __exit pch_spi_exit(void) +{ + pci_unregister_driver(&pch_spi_pcidev); +} +module_exit(pch_spi_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c index 96057de133a..dfa024b633e 100644 --- a/drivers/spi/spi_txx9.c +++ b/drivers/spi/spi_txx9.c @@ -29,6 +29,8 @@ #define SPI_FIFO_SIZE 4 +#define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */ +#define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */ #define TXx9_SPMCR 0x00 #define TXx9_SPCR0 0x04 @@ -193,11 +195,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) if (prev_speed_hz != speed_hz || prev_bits_per_word != bits_per_word) { - u32 n = (c->baseclk + speed_hz - 1) / speed_hz; - if (n < 1) - n = 1; - else if (n > 0xff) - n = 0xff; + int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; + n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); @@ -370,18 +369,16 @@ static int __init txx9spi_probe(struct platform_device *dev) goto exit; } c->baseclk = clk_get_rate(c->clk); - c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff; - c->max_speed_hz = c->baseclk; + c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1); + c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1); res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) goto exit_busy; - if (!devm_request_mem_region(&dev->dev, - res->start, res->end - res->start + 1, + if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res), "spi_txx9")) goto exit_busy; - c->membase = devm_ioremap(&dev->dev, - res->start, res->end - res->start + 1); + c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res)); if (!c->membase) goto exit_busy; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 5d869c4d3eb..4e6245e6799 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -30,7 +30,6 @@ #include <linux/errno.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/spi/spi.h> #include <linux/spi/spidev.h> @@ -42,7 +41,7 @@ * This supports acccess to SPI devices using normal userspace I/O calls. * Note that while traditional UNIX/POSIX I/O semantics are half duplex, * and often mask message boundaries, full SPI support requires full duplex - * transfers. There are several kinds of of internal message boundaries to + * transfers. There are several kinds of internal message boundaries to * handle chipselect management and other protocol options. * * SPI has a character major number assigned. We allocate minor numbers @@ -54,19 +53,24 @@ #define SPIDEV_MAJOR 153 /* assigned */ #define N_SPI_MINORS 32 /* ... up to 256 */ -static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; +static DECLARE_BITMAP(minors, N_SPI_MINORS); /* Bit masks for spi_device.mode management. Note that incorrect - * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other - * devices on a shared bus: CS_HIGH, because this device will be - * active when it shouldn't be; 3WIRE, because when active it won't - * behave as it should. + * settings for some settings can cause *lots* of trouble for other + * devices on a shared bus: * - * REVISIT should changing those two modes be privileged? + * - CS_HIGH ... this device will be active when it shouldn't be + * - 3WIRE ... when active, it won't behave as it should + * - NO_CS ... there will be no explicit message boundaries; this + * is completely incompatible with the shared bus model + * - READY ... transfers may proceed when they shouldn't. + * + * REVISIT should changing those flags be privileged? */ #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ - | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP) + | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \ + | SPI_NO_CS | SPI_READY) struct spidev_data { dev_t devt; @@ -262,15 +266,15 @@ static int spidev_message(struct spidev_data *spidev, k_tmp->delay_usecs = u_tmp->delay_usecs; k_tmp->speed_hz = u_tmp->speed_hz; #ifdef VERBOSE - dev_dbg(&spi->dev, + dev_dbg(&spidev->spi->dev, " xfer len %zd %s%s%s%dbits %u usec %uHz\n", u_tmp->len, u_tmp->rx_buf ? "rx " : "", u_tmp->tx_buf ? "tx " : "", u_tmp->cs_change ? "cs " : "", - u_tmp->bits_per_word ? : spi->bits_per_word, + u_tmp->bits_per_word ? : spidev->spi->bits_per_word, u_tmp->delay_usecs, - u_tmp->speed_hz ? : spi->max_speed_hz); + u_tmp->speed_hz ? : spidev->spi->max_speed_hz); #endif spi_message_add_tail(k_tmp, &msg); } @@ -472,7 +476,6 @@ static int spidev_open(struct inode *inode, struct file *filp) struct spidev_data *spidev; int status = -ENXIO; - lock_kernel(); mutex_lock(&device_list_lock); list_for_each_entry(spidev, &device_list, device_entry) { @@ -498,7 +501,6 @@ static int spidev_open(struct inode *inode, struct file *filp) pr_debug("spidev: nothing for minor %d\n", iminor(inode)); mutex_unlock(&device_list_lock); - unlock_kernel(); return status; } @@ -532,7 +534,7 @@ static int spidev_release(struct inode *inode, struct file *filp) return status; } -static struct file_operations spidev_fops = { +static const struct file_operations spidev_fops = { .owner = THIS_MODULE, /* REVISIT switch to aio primitives, so that userspace * gets more complete API coverage. It'll simplify things @@ -543,6 +545,7 @@ static struct file_operations spidev_fops = { .unlocked_ioctl = spidev_ioctl, .open = spidev_open, .release = spidev_release, + .llseek = no_llseek, }; /*-------------------------------------------------------------------------*/ @@ -556,7 +559,7 @@ static struct class *spidev_class; /*-------------------------------------------------------------------------*/ -static int spidev_probe(struct spi_device *spi) +static int __devinit spidev_probe(struct spi_device *spi) { struct spidev_data *spidev; int status; @@ -605,7 +608,7 @@ static int spidev_probe(struct spi_device *spi) return status; } -static int spidev_remove(struct spi_device *spi) +static int __devexit spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); @@ -627,7 +630,7 @@ static int spidev_remove(struct spi_device *spi) return 0; } -static struct spi_driver spidev_spi = { +static struct spi_driver spidev_spi_driver = { .driver = { .name = "spidev", .owner = THIS_MODULE, @@ -659,14 +662,14 @@ static int __init spidev_init(void) spidev_class = class_create(THIS_MODULE, "spidev"); if (IS_ERR(spidev_class)) { - unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); + unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); return PTR_ERR(spidev_class); } - status = spi_register_driver(&spidev_spi); + status = spi_register_driver(&spidev_spi_driver); if (status < 0) { class_destroy(spidev_class); - unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); + unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); } return status; } @@ -674,12 +677,13 @@ module_init(spidev_init); static void __exit spidev_exit(void) { - spi_unregister_driver(&spidev_spi); + spi_unregister_driver(&spidev_spi_driver); class_destroy(spidev_class); - unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); + unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); } module_exit(spidev_exit); MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>"); MODULE_DESCRIPTION("User mode SPI device interface"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:spidev"); diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c index 455991fbe28..a3938958147 100644 --- a/drivers/spi/tle62x0.c +++ b/drivers/spi/tle62x0.c @@ -11,6 +11,7 @@ #include <linux/device.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/tle62x0.h> @@ -329,3 +330,4 @@ module_exit(tle62x0_exit); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("TLE62x0 SPI driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("spi:tle62x0"); diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 46b8c5c2f45..7adaef62a99 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -1,27 +1,26 @@ /* - * xilinx_spi.c - * * Xilinx SPI controller driver (master mode only) * * Author: MontaVista Software, Inc. * source@mvista.com * - * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the - * terms of the GNU General Public License version 2. This program is licensed - * "as is" without any warranty of any kind, whether express or implied. + * Copyright (c) 2010 Secret Lab Technologies, Ltd. + * Copyright (c) 2009 Intel Corporation + * 2002-2007 (c) MontaVista Software, Inc. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/platform_device.h> - -#include <linux/of_platform.h> -#include <linux/of_device.h> -#include <linux/of_spi.h> - #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> +#include <linux/spi/xilinx_spi.h> #include <linux/io.h> #define XILINX_SPI_NAME "xilinx_spi" @@ -29,7 +28,7 @@ /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) * Product Specification", DS464 */ -#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */ +#define XSPI_CR_OFFSET 0x60 /* Control Register */ #define XSPI_CR_ENABLE 0x02 #define XSPI_CR_MASTER_MODE 0x04 @@ -40,8 +39,9 @@ #define XSPI_CR_RXFIFO_RESET 0x40 #define XSPI_CR_MANUAL_SSELECT 0x80 #define XSPI_CR_TRANS_INHIBIT 0x100 +#define XSPI_CR_LSB_FIRST 0x200 -#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */ +#define XSPI_SR_OFFSET 0x64 /* Status Register */ #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */ #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */ @@ -49,8 +49,8 @@ #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */ #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */ -#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */ -#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */ +#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */ +#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */ #define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */ @@ -70,6 +70,7 @@ #define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */ #define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */ #define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */ +#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */ #define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */ #define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */ @@ -78,35 +79,105 @@ struct xilinx_spi { /* bitbang has to be first */ struct spi_bitbang bitbang; struct completion done; - + struct resource mem; /* phys mem */ void __iomem *regs; /* virt. address of the control registers */ u32 irq; - u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */ - u8 *rx_ptr; /* pointer in the Tx buffer */ const u8 *tx_ptr; /* pointer in the Rx buffer */ int remaining_bytes; /* the number of bytes left to transfer */ + u8 bits_per_word; + unsigned int (*read_fn) (void __iomem *); + void (*write_fn) (u32, void __iomem *); + void (*tx_fn) (struct xilinx_spi *); + void (*rx_fn) (struct xilinx_spi *); }; -static void xspi_init_hw(void __iomem *regs_base) +static void xspi_write32(u32 val, void __iomem *addr) +{ + iowrite32(val, addr); +} + +static unsigned int xspi_read32(void __iomem *addr) +{ + return ioread32(addr); +} + +static void xspi_write32_be(u32 val, void __iomem *addr) +{ + iowrite32be(val, addr); +} + +static unsigned int xspi_read32_be(void __iomem *addr) +{ + return ioread32be(addr); +} + +static void xspi_tx8(struct xilinx_spi *xspi) +{ + xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr++; +} + +static void xspi_tx16(struct xilinx_spi *xspi) +{ + xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr += 2; +} + +static void xspi_tx32(struct xilinx_spi *xspi) +{ + xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr += 4; +} + +static void xspi_rx8(struct xilinx_spi *xspi) { + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *xspi->rx_ptr = data & 0xff; + xspi->rx_ptr++; + } +} + +static void xspi_rx16(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *(u16 *)(xspi->rx_ptr) = data & 0xffff; + xspi->rx_ptr += 2; + } +} + +static void xspi_rx32(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *(u32 *)(xspi->rx_ptr) = data; + xspi->rx_ptr += 4; + } +} + +static void xspi_init_hw(struct xilinx_spi *xspi) +{ + void __iomem *regs_base = xspi->regs; + /* Reset the SPI device */ - out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET, - XIPIF_V123B_RESET_MASK); + xspi->write_fn(XIPIF_V123B_RESET_MASK, + regs_base + XIPIF_V123B_RESETR_OFFSET); /* Disable all the interrupts just in case */ - out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0); + xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); /* Enable the global IPIF interrupt */ - out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET, - XIPIF_V123B_GINTR_ENABLE); + xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, + regs_base + XIPIF_V123B_DGIER_OFFSET); /* Deselect the slave on the SPI bus */ - out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff); + xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); /* Disable the transmitter, enable Manual Slave Select Assertion, * put SPI controller into master mode, and enable it */ - out_be16(regs_base + XSPI_CR_OFFSET, - XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT - | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE); + xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | + XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | + XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); } static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) @@ -115,16 +186,16 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) if (is_on == BITBANG_CS_INACTIVE) { /* Deselect the slave on the SPI bus */ - out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff); + xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); } else if (is_on == BITBANG_CS_ACTIVE) { /* Set the SPI clock phase and polarity */ - u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET) + u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK; if (spi->mode & SPI_CPHA) cr |= XSPI_CR_CPHA; if (spi->mode & SPI_CPOL) cr |= XSPI_CR_CPOL; - out_be16(xspi->regs + XSPI_CR_OFFSET, cr); + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); /* We do not check spi->max_speed_hz here as the SPI clock * frequency is not software programmable (the IP block design @@ -132,24 +203,27 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) */ /* Activate the chip select */ - out_be32(xspi->regs + XSPI_SSR_OFFSET, - ~(0x0001 << spi->chip_select)); + xspi->write_fn(~(0x0001 << spi->chip_select), + xspi->regs + XSPI_SSR_OFFSET); } } /* spi_bitbang requires custom setup_transfer() to be defined if there is a * custom txrx_bufs(). We have nothing to setup here as the SPI IP block - * supports just 8 bits per word, and SPI clock can't be changed in software. - * Check for 8 bits per word. Chip select delay calculations could be + * supports 8 or 16 bits per word which cannot be changed in software. + * SPI clock can't be changed in software either. + * Check for correct bits per word. Chip select delay calculations could be * added here as soon as bitbang_work() can be made aware of the delay value. */ static int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { + struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); u8 bits_per_word; - bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; - if (bits_per_word != 8) { + bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; + if (bits_per_word != xspi->bits_per_word) { dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", __func__, bits_per_word); return -EINVAL; @@ -160,17 +234,16 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi, static int xilinx_spi_setup(struct spi_device *spi) { - struct spi_bitbang *bitbang; - struct xilinx_spi *xspi; - int retval; - - xspi = spi_master_get_devdata(spi->master); - bitbang = &xspi->bitbang; - - retval = xilinx_spi_setup_transfer(spi, NULL); - if (retval < 0) - return retval; - + /* always return 0, we can not check the number of bits. + * There are cases when SPI setup is called before any driver is + * there, in that case the SPI core defaults to 8 bits, which we + * do not support in some cases. But if we return an error, the + * SPI device would not be registered and no driver can get hold of it + * When the driver is there, it will call SPI setup again with the + * correct number of bits per transfer. + * If a driver setups with the wrong bit number, it will fail when + * it tries to do a transfer + */ return 0; } @@ -179,15 +252,14 @@ static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) u8 sr; /* Fill the Tx FIFO with as many bytes as possible */ - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { - if (xspi->tx_ptr) { - out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++); - } else { - out_8(xspi->regs + XSPI_TXD_OFFSET, 0); - } - xspi->remaining_bytes--; - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + if (xspi->tx_ptr) + xspi->tx_fn(xspi); + else + xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); + xspi->remaining_bytes -= xspi->bits_per_word / 8; + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); } } @@ -209,18 +281,19 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) /* Enable the transmit empty interrupt, which we use to determine * progress on the transmission. */ - ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET); - out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, - ipif_ier | XSPI_INTR_TX_EMPTY); + ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET); + xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, + xspi->regs + XIPIF_V123B_IIER_OFFSET); /* Start the transfer by not inhibiting the transmitter any longer */ - cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT; - out_be16(xspi->regs + XSPI_CR_OFFSET, cr); + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & + ~XSPI_CR_TRANS_INHIBIT; + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); wait_for_completion(&xspi->done); /* Disable the transmit empty interrupt */ - out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier); + xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); return t->len - xspi->remaining_bytes; } @@ -237,8 +310,8 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) u32 ipif_isr; /* Get the IPIF interrupts, and clear them immediately */ - ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET); - out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr); + ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET); + xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ u16 cr; @@ -249,20 +322,15 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) * transmitter while the Isr refills the transmit register/FIFO, * or make sure it is stopped if we're done. */ - cr = in_be16(xspi->regs + XSPI_CR_OFFSET); - out_be16(xspi->regs + XSPI_CR_OFFSET, - cr | XSPI_CR_TRANS_INHIBIT); + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, + xspi->regs + XSPI_CR_OFFSET); /* Read out all the data from the Rx FIFO */ - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { - u8 data; - - data = in_8(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { - *xspi->rx_ptr++ = data; - } - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + xspi->rx_fn(xspi); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); } /* See if there is more data to send */ @@ -271,7 +339,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) /* Start the transfer by not inhibiting the * transmitter any longer */ - out_be16(xspi->regs + XSPI_CR_OFFSET, cr); + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); } else { /* No more data to send. * Indicate the transfer is completed. @@ -283,40 +351,25 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int __init xilinx_spi_of_probe(struct of_device *ofdev, - const struct of_device_id *match) +#ifdef CONFIG_OF +static const struct of_device_id xilinx_spi_of_match[] = { + { .compatible = "xlnx,xps-spi-2.00.a", }, + { .compatible = "xlnx,xps-spi-2.00.b", }, + {} +}; +MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); +#endif + +struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, + u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word) { struct spi_master *master; struct xilinx_spi *xspi; - struct resource r_irq_struct; - struct resource r_mem_struct; - - struct resource *r_irq = &r_irq_struct; - struct resource *r_mem = &r_mem_struct; - int rc = 0; - const u32 *prop; - int len; - - /* Get resources(memory, IRQ) associated with the device */ - master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi)); - - if (master == NULL) { - return -ENOMEM; - } - - dev_set_drvdata(&ofdev->dev, master); - - rc = of_address_to_resource(ofdev->node, 0, r_mem); - if (rc) { - dev_warn(&ofdev->dev, "invalid address\n"); - goto put_master; - } + int ret; - rc = of_irq_to_resource(ofdev->node, 0, r_irq); - if (rc == NO_IRQ) { - dev_warn(&ofdev->dev, "no IRQ found\n"); - goto put_master; - } + master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); + if (!master) + return NULL; /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA; @@ -329,128 +382,181 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev, xspi->bitbang.master->setup = xilinx_spi_setup; init_completion(&xspi->done); - xspi->irq = r_irq->start; - - if (!request_mem_region(r_mem->start, - r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) { - rc = -ENXIO; - dev_warn(&ofdev->dev, "memory request failure\n"); + if (!request_mem_region(mem->start, resource_size(mem), + XILINX_SPI_NAME)) goto put_master; - } - xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1); + xspi->regs = ioremap(mem->start, resource_size(mem)); if (xspi->regs == NULL) { - rc = -ENOMEM; - dev_warn(&ofdev->dev, "ioremap failure\n"); - goto release_mem; + dev_warn(dev, "ioremap failure\n"); + goto map_failed; } - xspi->irq = r_irq->start; - - /* dynamic bus assignment */ - master->bus_num = -1; - /* number of slave select bits is required */ - prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len); - if (!prop || len < sizeof(*prop)) { - dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); - goto unmap_io; + master->bus_num = bus_num; + master->num_chipselect = num_cs; +#ifdef CONFIG_OF + master->dev.of_node = dev->of_node; +#endif + + xspi->mem = *mem; + xspi->irq = irq; + if (little_endian) { + xspi->read_fn = xspi_read32; + xspi->write_fn = xspi_write32; + } else { + xspi->read_fn = xspi_read32_be; + xspi->write_fn = xspi_write32_be; } - master->num_chipselect = *prop; + xspi->bits_per_word = bits_per_word; + if (xspi->bits_per_word == 8) { + xspi->tx_fn = xspi_tx8; + xspi->rx_fn = xspi_rx8; + } else if (xspi->bits_per_word == 16) { + xspi->tx_fn = xspi_tx16; + xspi->rx_fn = xspi_rx16; + } else if (xspi->bits_per_word == 32) { + xspi->tx_fn = xspi_tx32; + xspi->rx_fn = xspi_rx32; + } else + goto unmap_io; + /* SPI controller initializations */ - xspi_init_hw(xspi->regs); + xspi_init_hw(xspi); /* Register for SPI Interrupt */ - rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); - if (rc != 0) { - dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq); + ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); + if (ret) goto unmap_io; - } - rc = spi_bitbang_start(&xspi->bitbang); - if (rc != 0) { - dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n"); + ret = spi_bitbang_start(&xspi->bitbang); + if (ret) { + dev_err(dev, "spi_bitbang_start FAILED\n"); goto free_irq; } - dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n", - (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq); - - /* Add any subnodes on the SPI bus */ - of_register_spi_devices(master, ofdev->node); - - return rc; + dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n", + (unsigned long long)mem->start, xspi->regs, xspi->irq); + return master; free_irq: free_irq(xspi->irq, xspi); unmap_io: iounmap(xspi->regs); -release_mem: - release_mem_region(r_mem->start, resource_size(r_mem)); +map_failed: + release_mem_region(mem->start, resource_size(mem)); put_master: spi_master_put(master); - return rc; + return NULL; } +EXPORT_SYMBOL(xilinx_spi_init); -static int __devexit xilinx_spi_remove(struct of_device *ofdev) +void xilinx_spi_deinit(struct spi_master *master) { struct xilinx_spi *xspi; - struct spi_master *master; - struct resource r_mem; - master = platform_get_drvdata(ofdev); xspi = spi_master_get_devdata(master); spi_bitbang_stop(&xspi->bitbang); free_irq(xspi->irq, xspi); iounmap(xspi->regs); - if (!of_address_to_resource(ofdev->node, 0, &r_mem)) - release_mem_region(r_mem.start, resource_size(&r_mem)); - dev_set_drvdata(&ofdev->dev, 0); + + release_mem_region(xspi->mem.start, resource_size(&xspi->mem)); spi_master_put(xspi->bitbang.master); +} +EXPORT_SYMBOL(xilinx_spi_deinit); + +static int __devinit xilinx_spi_probe(struct platform_device *dev) +{ + struct xspi_platform_data *pdata; + struct resource *r; + int irq, num_cs = 0, little_endian = 0, bits_per_word = 8; + struct spi_master *master; + u8 i; + + pdata = dev->dev.platform_data; + if (pdata) { + num_cs = pdata->num_chipselect; + little_endian = pdata->little_endian; + bits_per_word = pdata->bits_per_word; + } + +#ifdef CONFIG_OF + if (dev->dev.of_node) { + const __be32 *prop; + int len; + + /* number of slave select bits is required */ + prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits", + &len); + if (prop && len >= sizeof(*prop)) + num_cs = __be32_to_cpup(prop); + } +#endif + + if (!num_cs) { + dev_err(&dev->dev, "Missing slave select configuration data\n"); + return -EINVAL; + } + + r = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!r) + return -ENODEV; + + irq = platform_get_irq(dev, 0); + if (irq < 0) + return -ENXIO; + + master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs, + little_endian, bits_per_word); + if (!master) + return -ENODEV; + + if (pdata) { + for (i = 0; i < pdata->num_devices; i++) + spi_new_device(master, pdata->devices + i); + } + + platform_set_drvdata(dev, master); return 0; } -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:" XILINX_SPI_NAME); - -static int __exit xilinx_spi_of_remove(struct of_device *op) +static int __devexit xilinx_spi_remove(struct platform_device *dev) { - return xilinx_spi_remove(op); -} + xilinx_spi_deinit(platform_get_drvdata(dev)); + platform_set_drvdata(dev, 0); -static struct of_device_id xilinx_spi_of_match[] = { - { .compatible = "xlnx,xps-spi-2.00.a", }, - { .compatible = "xlnx,xps-spi-2.00.b", }, - {} -}; + return 0; +} -MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:" XILINX_SPI_NAME); -static struct of_platform_driver xilinx_spi_of_driver = { - .owner = THIS_MODULE, - .name = "xilinx-xps-spi", - .match_table = xilinx_spi_of_match, - .probe = xilinx_spi_of_probe, - .remove = __exit_p(xilinx_spi_of_remove), +static struct platform_driver xilinx_spi_driver = { + .probe = xilinx_spi_probe, + .remove = __devexit_p(xilinx_spi_remove), .driver = { - .name = "xilinx-xps-spi", + .name = XILINX_SPI_NAME, .owner = THIS_MODULE, +#ifdef CONFIG_OF + .of_match_table = xilinx_spi_of_match, +#endif }, }; -static int __init xilinx_spi_init(void) +static int __init xilinx_spi_pltfm_init(void) { - return of_register_platform_driver(&xilinx_spi_of_driver); + return platform_driver_register(&xilinx_spi_driver); } -module_init(xilinx_spi_init); +module_init(xilinx_spi_pltfm_init); -static void __exit xilinx_spi_exit(void) +static void __exit xilinx_spi_pltfm_exit(void) { - of_unregister_platform_driver(&xilinx_spi_of_driver); + platform_driver_unregister(&xilinx_spi_driver); } -module_exit(xilinx_spi_exit); +module_exit(xilinx_spi_pltfm_exit); + MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); MODULE_DESCRIPTION("Xilinx SPI driver"); MODULE_LICENSE("GPL"); |