diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-02 14:33:21 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-02 14:33:21 -0700 |
commit | 0bf6a210a43f7118d858806200127e421649fc4e (patch) | |
tree | 9a17d88ebd1b9bc693fba7f39c12123dec96e930 /drivers | |
parent | ee1a8d402e7e204d57fb108aa40003b6d1633036 (diff) | |
parent | 5c913a9a9772f4b434aaea7328836419287b5d1c (diff) |
Merge tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver specific changes from Arnd Bergmann:
"These changes are all driver specific and cross over between arm-soc
contents and some other subsystem, in these cases cpufreq, crypto,
dma, pinctrl, mailbox and usb, and the subsystem owners agreed to have
these changes merged through arm-soc.
As we proceed to untangle the dependencies between platform code and
driver code, the amount of changes in this category is fortunately
shrinking, for 3.11 we have 16 branches here and 101 non-merge
changesets, the majority of which are for the stedma40 dma engine
driver used in the ux500 platform. Cleaning up that code touches
multiple subsystems, but gets rid of the dependency in the end.
The mailbox code moved out from mach-omap2 to drivers/mailbox is an
intermediate step and is still omap specific at the moment. Patches
exist to generalize the subsystem and add other drivers with the same
API, but those did not make it for 3.11."
* tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (101 commits)
crypto: ux500: use dmaengine_submit API
crypto: ux500: use dmaengine_prep_slave_sg API
crypto: ux500: use dmaengine_device_control API
crypto: ux500/crypt: add missing __iomem qualifiers
crypto: ux500/hash: add missing static qualifiers
crypto: ux500/hash: use readl on iomem addresses
dmaengine: ste_dma40: Declare memcpy config as static
ARM: ux500: Remove mop500_snowball_ethernet_clock_enable()
ARM: ux500: Correct the EN_3v3 regulator's on/off GPIO
ARM: ux500: Provide a AB8500 GPIO Device Tree node
gpio: rcar: fix gpio_rcar_of_table
gpio-rcar: Remove #ifdef CONFIG_OF around OF-specific sections
gpio-rcar: Reference core gpio documentation in the DT bindings
clk: exynos5250: Add enum entries for divider clock of i2s1 and i2s2
ARM: dts: Update Samsung I2S documentation
ARM: dts: add clock provider information for i2s controllers in Exynos5250
ARM: dts: add Exynos audio subsystem clock controller node
clk: samsung: register audio subsystem clocks using common clock framework
ARM: dts: use #include for all device trees for Samsung
pinctrl: s3c24xx: use correct header for chained_irq functions
...
Diffstat (limited to 'drivers')
44 files changed, 4899 insertions, 572 deletions
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index b7c232e6742..187681013bd 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o +obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c new file mode 100644 index 00000000000..9b1bbd52fd1 --- /dev/null +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Padmavathi Venna <padma.v@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for Audio Subsystem Clock Controller. +*/ + +#include <linux/clkdev.h> +#include <linux/io.h> +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/clk/exynos-audss-clk.h> + +static DEFINE_SPINLOCK(lock); +static struct clk **clk_table; +static void __iomem *reg_base; +static struct clk_onecell_data clk_data; + +#define ASS_CLK_SRC 0x0 +#define ASS_CLK_DIV 0x4 +#define ASS_CLK_GATE 0x8 + +static unsigned long reg_save[][2] = { + {ASS_CLK_SRC, 0}, + {ASS_CLK_DIV, 0}, + {ASS_CLK_GATE, 0}, +}; + +/* list of all parent clock list */ +static const char *mout_audss_p[] = { "fin_pll", "fout_epll" }; +static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" }; + +#ifdef CONFIG_PM_SLEEP +static int exynos_audss_clk_suspend(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(reg_save); i++) + reg_save[i][1] = readl(reg_base + reg_save[i][0]); + + return 0; +} + +static void exynos_audss_clk_resume(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(reg_save); i++) + writel(reg_save[i][1], reg_base + reg_save[i][0]); +} + +static struct syscore_ops exynos_audss_clk_syscore_ops = { + .suspend = exynos_audss_clk_suspend, + .resume = exynos_audss_clk_resume, +}; +#endif /* CONFIG_PM_SLEEP */ + +/* register exynos_audss clocks */ +void __init exynos_audss_clk_init(struct device_node *np) +{ + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: failed to map audss registers\n", __func__); + return; + } + + clk_table = kzalloc(sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS, + GFP_KERNEL); + if (!clk_table) { + pr_err("%s: could not allocate clk lookup table\n", __func__); + return; + } + + clk_data.clks = clk_table; + clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS; + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + + clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss", + mout_audss_p, ARRAY_SIZE(mout_audss_p), 0, + reg_base + ASS_CLK_SRC, 0, 1, 0, &lock); + + clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s", + mout_i2s_p, ARRAY_SIZE(mout_i2s_p), 0, + reg_base + ASS_CLK_SRC, 2, 2, 0, &lock); + + clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp", + "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4, + 0, &lock); + + clk_table[EXYNOS_DOUT_AUD_BUS] = clk_register_divider(NULL, + "dout_aud_bus", "dout_srp", 0, + reg_base + ASS_CLK_DIV, 4, 4, 0, &lock); + + clk_table[EXYNOS_DOUT_I2S] = clk_register_divider(NULL, "dout_i2s", + "mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0, + &lock); + + clk_table[EXYNOS_SRP_CLK] = clk_register_gate(NULL, "srp_clk", + "dout_srp", CLK_SET_RATE_PARENT, + reg_base + ASS_CLK_GATE, 0, 0, &lock); + + clk_table[EXYNOS_I2S_BUS] = clk_register_gate(NULL, "i2s_bus", + "dout_aud_bus", CLK_SET_RATE_PARENT, + reg_base + ASS_CLK_GATE, 2, 0, &lock); + + clk_table[EXYNOS_SCLK_I2S] = clk_register_gate(NULL, "sclk_i2s", + "dout_i2s", CLK_SET_RATE_PARENT, + reg_base + ASS_CLK_GATE, 3, 0, &lock); + + clk_table[EXYNOS_PCM_BUS] = clk_register_gate(NULL, "pcm_bus", + "sclk_pcm", CLK_SET_RATE_PARENT, + reg_base + ASS_CLK_GATE, 4, 0, &lock); + + clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm", + "div_pcm0", CLK_SET_RATE_PARENT, + reg_base + ASS_CLK_GATE, 5, 0, &lock); + +#ifdef CONFIG_PM_SLEEP + register_syscore_ops(&exynos_audss_clk_syscore_ops); +#endif + + pr_info("Exynos: Audss: clock setup completed\n"); +} +CLK_OF_DECLARE(exynos4210_audss_clk, "samsung,exynos4210-audss-clock", + exynos_audss_clk_init); +CLK_OF_DECLARE(exynos5250_audss_clk, "samsung,exynos5250-audss-clock", + exynos_audss_clk_init); diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 22d7699e7ce..6f767c515ec 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -87,6 +87,7 @@ enum exynos5250_clks { sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3, sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm, sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2, + div_i2s1, div_i2s2, /* gate clocks */ gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0, @@ -291,8 +292,8 @@ struct samsung_div_clock exynos5250_div_clks[] __initdata = { DIV(none, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8), DIV(none, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4), DIV(none, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8), - DIV(none, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6), - DIV(none, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6), + DIV(div_i2s1, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6), + DIV(div_i2s2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6), DIV(sclk_pixel, "div_hdmi_pixel", "sclk_vpll", DIV_DISP1_0, 28, 4), DIV_A(none, "armclk", "div_arm", DIV_CPU0, 28, 3, "armclk"), DIV_F(none, "div_mipi1_pre", "div_mipi1", diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index 54f3d119d99..77398f8c19a 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -10,7 +10,7 @@ * DBx500-PRCMU Timer * The PRCMU has 5 timers which are available in a always-on * power domain. We use the Timer 4 for our always-on clock - * source on DB8500 and Timer 3 on DB5500. + * source on DB8500. */ #include <linux/clockchips.h> #include <linux/clksrc-dbx500-prcmu.h> @@ -30,15 +30,14 @@ static void __iomem *clksrc_dbx500_timer_base; -static cycle_t clksrc_dbx500_prcmu_read(struct clocksource *cs) +static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) { + void __iomem *base = clksrc_dbx500_timer_base; u32 count, count2; do { - count = readl(clksrc_dbx500_timer_base + - PRCMU_TIMER_DOWNCOUNT); - count2 = readl(clksrc_dbx500_timer_base + - PRCMU_TIMER_DOWNCOUNT); + count = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT); + count2 = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT); } while (count2 != count); /* Negate because the timer is a decrementing counter */ diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 6e57543fe0b..a9244089686 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -96,6 +96,56 @@ config ARM_OMAP2PLUS_CPUFREQ default ARCH_OMAP2PLUS select CPU_FREQ_TABLE +config ARM_S3C_CPUFREQ + bool + help + Internal configuration node for common cpufreq on Samsung SoC + +config ARM_S3C24XX_CPUFREQ + bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)" + depends on ARCH_S3C24XX + select ARM_S3C_CPUFREQ + help + This enables the CPUfreq driver for the Samsung S3C24XX family + of CPUs. + + For details, take a look at <file:Documentation/cpu-freq>. + + If in doubt, say N. + +config ARM_S3C24XX_CPUFREQ_DEBUG + bool "Debug CPUfreq Samsung driver core" + depends on ARM_S3C24XX_CPUFREQ + help + Enable s3c_freq_dbg for the Samsung S3C CPUfreq core + +config ARM_S3C24XX_CPUFREQ_IODEBUG + bool "Debug CPUfreq Samsung driver IO timing" + depends on ARM_S3C24XX_CPUFREQ + help + Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core + +config ARM_S3C24XX_CPUFREQ_DEBUGFS + bool "Export debugfs for CPUFreq" + depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS + help + Export status information via debugfs. + +config ARM_S3C2410_CPUFREQ + bool + depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410 + select S3C2410_CPUFREQ_UTILS + help + CPU Frequency scaling support for S3C2410 + +config ARM_S3C2412_CPUFREQ + bool + depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412 + default y + select S3C2412_IOTIMING + help + CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs. + config ARM_S3C2416_CPUFREQ bool "S3C2416 CPU Frequency scaling support" depends on CPU_S3C2416 @@ -118,6 +168,14 @@ config ARM_S3C2416_CPUFREQ_VCORESCALE If in doubt, say N. +config ARM_S3C2440_CPUFREQ + bool "S3C2440/S3C2442 CPU Frequency scaling support" + depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442) + select S3C2410_CPUFREQ_UTILS + default y + help + CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs. + config ARM_S3C64XX_CPUFREQ bool "Samsung S3C64XX" depends on CPU_S3C6410 diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 315b9231feb..6ad0b913ca1 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -65,7 +65,12 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o +obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o +obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o +obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o +obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o +obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c new file mode 100644 index 00000000000..cfa0dd8723e --- /dev/null +++ b/drivers/cpufreq/s3c2410-cpufreq.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2006-2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C2410 CPU Frequency scaling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <mach/regs-clock.h> + +#include <plat/cpu.h> +#include <plat/clock.h> +#include <plat/cpu-freq-core.h> + +/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */ + +static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + u32 clkdiv = 0; + + if (cfg->divs.h_divisor == 2) + clkdiv |= S3C2410_CLKDIVN_HDIVN; + + if (cfg->divs.p_divisor != cfg->divs.h_divisor) + clkdiv |= S3C2410_CLKDIVN_PDIVN; + + __raw_writel(clkdiv, S3C2410_CLKDIVN); +} + +static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned long hclk, fclk, pclk; + unsigned int hdiv, pdiv; + unsigned long hclk_max; + + fclk = cfg->freq.fclk; + hclk_max = cfg->max.hclk; + + cfg->freq.armclk = fclk; + + s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n", + __func__, fclk, hclk_max); + + hdiv = (fclk > cfg->max.hclk) ? 2 : 1; + hclk = fclk / hdiv; + + if (hclk > cfg->max.hclk) { + s3c_freq_dbg("%s: hclk too big\n", __func__); + return -EINVAL; + } + + pdiv = (hclk > cfg->max.pclk) ? 2 : 1; + pclk = hclk / pdiv; + + if (pclk > cfg->max.pclk) { + s3c_freq_dbg("%s: pclk too big\n", __func__); + return -EINVAL; + } + + pdiv *= hdiv; + + /* record the result */ + cfg->divs.p_divisor = pdiv; + cfg->divs.h_divisor = hdiv; + + return 0; +} + +static struct s3c_cpufreq_info s3c2410_cpufreq_info = { + .max = { + .fclk = 200000000, + .hclk = 100000000, + .pclk = 50000000, + }, + + /* transition latency is about 5ms worst-case, so + * set 10ms to be sure */ + .latency = 10000000, + + .locktime_m = 150, + .locktime_u = 150, + .locktime_bits = 12, + + .need_pll = 1, + + .name = "s3c2410", + .calc_iotiming = s3c2410_iotiming_calc, + .set_iotiming = s3c2410_iotiming_set, + .get_iotiming = s3c2410_iotiming_get, + .resume_clocks = s3c2410_setup_clocks, + + .set_fvco = s3c2410_set_fvco, + .set_refresh = s3c2410_cpufreq_setrefresh, + .set_divs = s3c2410_cpufreq_setdivs, + .calc_divs = s3c2410_cpufreq_calcdivs, + + .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), +}; + +static int s3c2410_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + return s3c_cpufreq_register(&s3c2410_cpufreq_info); +} + +static struct subsys_interface s3c2410_cpufreq_interface = { + .name = "s3c2410_cpufreq", + .subsys = &s3c2410_subsys, + .add_dev = s3c2410_cpufreq_add, +}; + +static int __init s3c2410_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2410_cpufreq_interface); +} +arch_initcall(s3c2410_cpufreq_init); + +static int s3c2410a_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + /* alter the maximum freq settings for S3C2410A. If a board knows + * it only has a maximum of 200, then it should register its own + * limits. */ + + s3c2410_cpufreq_info.max.fclk = 266000000; + s3c2410_cpufreq_info.max.hclk = 133000000; + s3c2410_cpufreq_info.max.pclk = 66500000; + s3c2410_cpufreq_info.name = "s3c2410a"; + + return s3c2410_cpufreq_add(dev, sif); +} + +static struct subsys_interface s3c2410a_cpufreq_interface = { + .name = "s3c2410a_cpufreq", + .subsys = &s3c2410a_subsys, + .add_dev = s3c2410a_cpufreq_add, +}; + +static int __init s3c2410a_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2410a_cpufreq_interface); +} +arch_initcall(s3c2410a_cpufreq_init); diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c new file mode 100644 index 00000000000..4645b489899 --- /dev/null +++ b/drivers/cpufreq/s3c2412-cpufreq.c @@ -0,0 +1,257 @@ +/* + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C2412 CPU Frequency scalling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <mach/regs-clock.h> +#include <mach/s3c2412.h> + +#include <plat/cpu.h> +#include <plat/clock.h> +#include <plat/cpu-freq-core.h> + +/* our clock resources. */ +static struct clk *xtal; +static struct clk *fclk; +static struct clk *hclk; +static struct clk *armclk; + +/* HDIV: 1, 2, 3, 4, 6, 8 */ + +static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned int hdiv, pdiv, armdiv, dvs; + unsigned long hclk, fclk, armclk, armdiv_clk; + unsigned long hclk_max; + + fclk = cfg->freq.fclk; + armclk = cfg->freq.armclk; + hclk_max = cfg->max.hclk; + + /* We can't run hclk above armclk as at the best we have to + * have armclk and hclk in dvs mode. */ + + if (hclk_max > armclk) + hclk_max = armclk; + + s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n", + __func__, fclk, armclk, hclk_max); + s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n", + __func__, cfg->freq.fclk, cfg->freq.armclk, + cfg->freq.hclk, cfg->freq.pclk); + + armdiv = fclk / armclk; + + if (armdiv < 1) + armdiv = 1; + if (armdiv > 2) + armdiv = 2; + + cfg->divs.arm_divisor = armdiv; + armdiv_clk = fclk / armdiv; + + hdiv = armdiv_clk / hclk_max; + if (hdiv < 1) + hdiv = 1; + + cfg->freq.hclk = hclk = armdiv_clk / hdiv; + + /* set dvs depending on whether we reached armclk or not. */ + cfg->divs.dvs = dvs = armclk < armdiv_clk; + + /* update the actual armclk we achieved. */ + cfg->freq.armclk = dvs ? hclk : armdiv_clk; + + s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n", + __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs); + + if (hdiv > 4) + goto invalid; + + pdiv = (hclk > cfg->max.pclk) ? 2 : 1; + + if ((hclk / pdiv) > cfg->max.pclk) + pdiv++; + + cfg->freq.pclk = hclk / pdiv; + + s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv); + + if (pdiv > 2) + goto invalid; + + pdiv *= hdiv; + + /* store the result, and then return */ + + cfg->divs.h_divisor = hdiv * armdiv; + cfg->divs.p_divisor = pdiv * armdiv; + + return 0; + +invalid: + return -EINVAL; +} + +static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned long clkdiv; + unsigned long olddiv; + + olddiv = clkdiv = __raw_readl(S3C2410_CLKDIVN); + + /* clear off current clock info */ + + clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN; + clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK; + clkdiv &= ~S3C2412_CLKDIVN_PDIVN; + + if (cfg->divs.arm_divisor == 2) + clkdiv |= S3C2412_CLKDIVN_ARMDIVN; + + clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1); + + if (cfg->divs.p_divisor != cfg->divs.h_divisor) + clkdiv |= S3C2412_CLKDIVN_PDIVN; + + s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv); + __raw_writel(clkdiv, S3C2410_CLKDIVN); + + clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk); +} + +static void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) +{ + struct s3c_cpufreq_board *board = cfg->board; + unsigned long refresh; + + s3c_freq_dbg("%s: refresh %u ns, hclk %lu\n", __func__, + board->refresh, cfg->freq.hclk); + + /* Reduce both the refresh time (in ns) and the frequency (in MHz) + * by 10 each to ensure that we do not overflow 32 bit numbers. This + * should work for HCLK up to 133MHz and refresh period up to 30usec. + */ + + refresh = (board->refresh / 10); + refresh *= (cfg->freq.hclk / 100); + refresh /= (1 * 1000 * 1000); /* 10^6 */ + + s3c_freq_dbg("%s: setting refresh 0x%08lx\n", __func__, refresh); + __raw_writel(refresh, S3C2412_REFRESH); +} + +/* set the default cpu frequency information, based on an 200MHz part + * as we have no other way of detecting the speed rating in software. + */ + +static struct s3c_cpufreq_info s3c2412_cpufreq_info = { + .max = { + .fclk = 200000000, + .hclk = 100000000, + .pclk = 50000000, + }, + + .latency = 5000000, /* 5ms */ + + .locktime_m = 150, + .locktime_u = 150, + .locktime_bits = 16, + + .name = "s3c2412", + .set_refresh = s3c2412_cpufreq_setrefresh, + .set_divs = s3c2412_cpufreq_setdivs, + .calc_divs = s3c2412_cpufreq_calcdivs, + + .calc_iotiming = s3c2412_iotiming_calc, + .set_iotiming = s3c2412_iotiming_set, + .get_iotiming = s3c2412_iotiming_get, + + .resume_clocks = s3c2412_setup_clocks, + + .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs), +}; + +static int s3c2412_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + unsigned long fclk_rate; + + hclk = clk_get(NULL, "hclk"); + if (IS_ERR(hclk)) { + printk(KERN_ERR "%s: cannot find hclk clock\n", __func__); + return -ENOENT; + } + + fclk = clk_get(NULL, "fclk"); + if (IS_ERR(fclk)) { + printk(KERN_ERR "%s: cannot find fclk clock\n", __func__); + goto err_fclk; + } + + fclk_rate = clk_get_rate(fclk); + if (fclk_rate > 200000000) { + printk(KERN_INFO + "%s: fclk %ld MHz, assuming 266MHz capable part\n", + __func__, fclk_rate / 1000000); + s3c2412_cpufreq_info.max.fclk = 266000000; + s3c2412_cpufreq_info.max.hclk = 133000000; + s3c2412_cpufreq_info.max.pclk = 66000000; + } + + armclk = clk_get(NULL, "armclk"); + if (IS_ERR(armclk)) { + printk(KERN_ERR "%s: cannot find arm clock\n", __func__); + goto err_armclk; + } + + xtal = clk_get(NULL, "xtal"); + if (IS_ERR(xtal)) { + printk(KERN_ERR "%s: cannot find xtal clock\n", __func__); + goto err_xtal; + } + + return s3c_cpufreq_register(&s3c2412_cpufreq_info); + +err_xtal: + clk_put(armclk); +err_armclk: + clk_put(fclk); +err_fclk: + clk_put(hclk); + + return -ENOENT; +} + +static struct subsys_interface s3c2412_cpufreq_interface = { + .name = "s3c2412_cpufreq", + .subsys = &s3c2412_subsys, + .add_dev = s3c2412_cpufreq_add, +}; + +static int s3c2412_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2412_cpufreq_interface); +} +arch_initcall(s3c2412_cpufreq_init); diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c new file mode 100644 index 00000000000..72b2cc8a5a8 --- /dev/null +++ b/drivers/cpufreq/s3c2440-cpufreq.c @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2006-2009 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * Vincent Sanders <vince@simtec.co.uk> + * + * S3C2440/S3C2442 CPU Frequency scaling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> + +#include <mach/hardware.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <mach/regs-clock.h> + +#include <plat/cpu.h> +#include <plat/cpu-freq-core.h> +#include <plat/clock.h> + +static struct clk *xtal; +static struct clk *fclk; +static struct clk *hclk; +static struct clk *armclk; + +/* HDIV: 1, 2, 3, 4, 6, 8 */ + +static inline int within_khz(unsigned long a, unsigned long b) +{ + long diff = a - b; + + return (diff >= -1000 && diff <= 1000); +} + +/** + * s3c2440_cpufreq_calcdivs - calculate divider settings + * @cfg: The cpu frequency settings. + * + * Calcualte the divider values for the given frequency settings + * specified in @cfg. The values are stored in @cfg for later use + * by the relevant set routine if the request settings can be reached. + */ +int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned int hdiv, pdiv; + unsigned long hclk, fclk, armclk; + unsigned long hclk_max; + + fclk = cfg->freq.fclk; + armclk = cfg->freq.armclk; + hclk_max = cfg->max.hclk; + + s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n", + __func__, fclk, armclk, hclk_max); + + if (armclk > fclk) { + printk(KERN_WARNING "%s: armclk > fclk\n", __func__); + armclk = fclk; + } + + /* if we are in DVS, we need HCLK to be <= ARMCLK */ + if (armclk < fclk && armclk < hclk_max) + hclk_max = armclk; + + for (hdiv = 1; hdiv < 9; hdiv++) { + if (hdiv == 5 || hdiv == 7) + hdiv++; + + hclk = (fclk / hdiv); + if (hclk <= hclk_max || within_khz(hclk, hclk_max)) + break; + } + + s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv); + + if (hdiv > 8) + goto invalid; + + pdiv = (hclk > cfg->max.pclk) ? 2 : 1; + + if ((hclk / pdiv) > cfg->max.pclk) + pdiv++; + + s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv); + + if (pdiv > 2) + goto invalid; + + pdiv *= hdiv; + + /* calculate a valid armclk */ + + if (armclk < hclk) + armclk = hclk; + + /* if we're running armclk lower than fclk, this really means + * that the system should go into dvs mode, which means that + * armclk is connected to hclk. */ + if (armclk < fclk) { + cfg->divs.dvs = 1; + armclk = hclk; + } else + cfg->divs.dvs = 0; + + cfg->freq.armclk = armclk; + + /* store the result, and then return */ + + cfg->divs.h_divisor = hdiv; + cfg->divs.p_divisor = pdiv; + + return 0; + + invalid: + return -EINVAL; +} + +#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \ + S3C2440_CAMDIVN_HCLK4_HALF) + +/** + * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings + * @cfg: The cpu frequency settings. + * + * Set the divisors from the settings in @cfg, which where generated + * during the calculation phase by s3c2440_cpufreq_calcdivs(). + */ +static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned long clkdiv, camdiv; + + s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__, + cfg->divs.h_divisor, cfg->divs.p_divisor); + + clkdiv = __raw_readl(S3C2410_CLKDIVN); + camdiv = __raw_readl(S3C2440_CAMDIVN); + + clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN); + camdiv &= ~CAMDIVN_HCLK_HALF; + + switch (cfg->divs.h_divisor) { + case 1: + clkdiv |= S3C2440_CLKDIVN_HDIVN_1; + break; + + case 2: + clkdiv |= S3C2440_CLKDIVN_HDIVN_2; + break; + + case 6: + camdiv |= S3C2440_CAMDIVN_HCLK3_HALF; + case 3: + clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6; + break; + + case 8: + camdiv |= S3C2440_CAMDIVN_HCLK4_HALF; + case 4: + clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8; + break; + + default: + BUG(); /* we don't expect to get here. */ + } + + if (cfg->divs.p_divisor != cfg->divs.h_divisor) + clkdiv |= S3C2440_CLKDIVN_PDIVN; + + /* todo - set pclk. */ + + /* Write the divisors first with hclk intentionally halved so that + * when we write clkdiv we will under-frequency instead of over. We + * then make a short delay and remove the hclk halving if necessary. + */ + + __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN); + __raw_writel(clkdiv, S3C2410_CLKDIVN); + + ndelay(20); + __raw_writel(camdiv, S3C2440_CAMDIVN); + + clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk); +} + +static int run_freq_for(unsigned long max_hclk, unsigned long fclk, + int *divs, + struct cpufreq_frequency_table *table, + size_t table_size) +{ + unsigned long freq; + int index = 0; + int div; + + for (div = *divs; div > 0; div = *divs++) { + freq = fclk / div; + + if (freq > max_hclk && div != 1) + continue; + + freq /= 1000; /* table is in kHz */ + index = s3c_cpufreq_addfreq(table, index, table_size, freq); + if (index < 0) + break; + } + + return index; +} + +static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 }; + +static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg, + struct cpufreq_frequency_table *table, + size_t table_size) +{ + int ret; + + WARN_ON(cfg->info == NULL); + WARN_ON(cfg->board == NULL); + + ret = run_freq_for(cfg->info->max.hclk, + cfg->info->max.fclk, + hclk_divs, + table, table_size); + + s3c_freq_dbg("%s: returning %d\n", __func__, ret); + + return ret; +} + +struct s3c_cpufreq_info s3c2440_cpufreq_info = { + .max = { + .fclk = 400000000, + .hclk = 133333333, + .pclk = 66666666, + }, + + .locktime_m = 300, + .locktime_u = 300, + .locktime_bits = 16, + + .name = "s3c244x", + .calc_iotiming = s3c2410_iotiming_calc, + .set_iotiming = s3c2410_iotiming_set, + .get_iotiming = s3c2410_iotiming_get, + .set_fvco = s3c2410_set_fvco, + + .set_refresh = s3c2410_cpufreq_setrefresh, + .set_divs = s3c2440_cpufreq_setdivs, + .calc_divs = s3c2440_cpufreq_calcdivs, + .calc_freqtable = s3c2440_cpufreq_calctable, + + .resume_clocks = s3c244x_setup_clocks, + + .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), +}; + +static int s3c2440_cpufreq_add(struct device *dev, + struct subsys_interface *sif) +{ + xtal = s3c_cpufreq_clk_get(NULL, "xtal"); + hclk = s3c_cpufreq_clk_get(NULL, "hclk"); + fclk = s3c_cpufreq_clk_get(NULL, "fclk"); + armclk = s3c_cpufreq_clk_get(NULL, "armclk"); + + if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) { + printk(KERN_ERR "%s: failed to get clocks\n", __func__); + return -ENOENT; + } + + return s3c_cpufreq_register(&s3c2440_cpufreq_info); +} + +static struct subsys_interface s3c2440_cpufreq_interface = { + .name = "s3c2440_cpufreq", + .subsys = &s3c2440_subsys, + .add_dev = s3c2440_cpufreq_add, +}; + +static int s3c2440_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2440_cpufreq_interface); +} + +/* arch_initcall adds the clocks we need, so use subsys_initcall. */ +subsys_initcall(s3c2440_cpufreq_init); + +static struct subsys_interface s3c2442_cpufreq_interface = { + .name = "s3c2442_cpufreq", + .subsys = &s3c2442_subsys, + .add_dev = s3c2440_cpufreq_add, +}; + +static int s3c2442_cpufreq_init(void) +{ + return subsys_interface_register(&s3c2442_cpufreq_interface); +} +subsys_initcall(s3c2442_cpufreq_init); diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c new file mode 100644 index 00000000000..9b7b4289d66 --- /dev/null +++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2009 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C24XX CPU Frequency scaling - debugfs status support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/err.h> + +#include <plat/cpu-freq-core.h> + +static struct dentry *dbgfs_root; +static struct dentry *dbgfs_file_io; +static struct dentry *dbgfs_file_info; +static struct dentry *dbgfs_file_board; + +#define print_ns(x) ((x) / 10), ((x) % 10) + +static void show_max(struct seq_file *seq, struct s3c_freq *f) +{ + seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n", + f->fclk, f->hclk, f->pclk, f->armclk); +} + +static int board_show(struct seq_file *seq, void *p) +{ + struct s3c_cpufreq_config *cfg; + struct s3c_cpufreq_board *brd; + + cfg = s3c_cpufreq_getconfig(); + if (!cfg) { + seq_printf(seq, "no configuration registered\n"); + return 0; + } + + brd = cfg->board; + if (!brd) { + seq_printf(seq, "no board definition set?\n"); + return 0; + } + + seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh); + seq_printf(seq, "auto_io=%u\n", brd->auto_io); + seq_printf(seq, "need_io=%u\n", brd->need_io); + + show_max(seq, &brd->max); + + + return 0; +} + +static int fops_board_open(struct inode *inode, struct file *file) +{ + return single_open(file, board_show, NULL); +} + +static const struct file_operations fops_board = { + .open = fops_board_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int info_show(struct seq_file *seq, void *p) +{ + struct s3c_cpufreq_config *cfg; + + cfg = s3c_cpufreq_getconfig(); + if (!cfg) { + seq_printf(seq, "no configuration registered\n"); + return 0; + } + + seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk); + seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n", + cfg->freq.hclk, print_ns(cfg->freq.hclk_tns)); + seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk); + seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk); + seq_printf(seq, "\n"); + + show_max(seq, &cfg->max); + + seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n", + cfg->divs.h_divisor, cfg->divs.p_divisor, + cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off"); + seq_printf(seq, "\n"); + + seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll); + + return 0; +} + +static int fops_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, info_show, NULL); +} + +static const struct file_operations fops_info = { + .open = fops_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int io_show(struct seq_file *seq, void *p) +{ + void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *); + struct s3c_cpufreq_config *cfg; + struct s3c_iotimings *iot; + union s3c_iobank *iob; + int bank; + + cfg = s3c_cpufreq_getconfig(); + if (!cfg) { + seq_printf(seq, "no configuration registered\n"); + return 0; + } + + show_bank = cfg->info->debug_io_show; + if (!show_bank) { + seq_printf(seq, "no code to show bank timing\n"); + return 0; + } + + iot = s3c_cpufreq_getiotimings(); + if (!iot) { + seq_printf(seq, "no io timings registered\n"); + return 0; + } + + seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns)); + + for (bank = 0; bank < MAX_BANKS; bank++) { + iob = &iot->bank[bank]; + + seq_printf(seq, "bank %d: ", bank); + + if (!iob->io_2410) { + seq_printf(seq, "nothing set\n"); + continue; + } + + show_bank(seq, cfg, iob); + } + + return 0; +} + +static int fops_io_open(struct inode *inode, struct file *file) +{ + return single_open(file, io_show, NULL); +} + +static const struct file_operations fops_io = { + .open = fops_io_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + + +static int __init s3c_freq_debugfs_init(void) +{ + dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL); + if (IS_ERR(dbgfs_root)) { + printk(KERN_ERR "%s: error creating debugfs root\n", __func__); + return PTR_ERR(dbgfs_root); + } + + dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root, + NULL, &fops_io); + + dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root, + NULL, &fops_info); + + dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root, + NULL, &fops_board); + + return 0; +} + +late_initcall(s3c_freq_debugfs_init); + diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c new file mode 100644 index 00000000000..3c0e78ede0d --- /dev/null +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -0,0 +1,711 @@ +/* + * Copyright (c) 2006-2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C24XX CPU Frequency scaling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/cpufreq.h> +#include <linux/cpu.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/slab.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include <plat/cpu.h> +#include <plat/clock.h> +#include <plat/cpu-freq-core.h> + +#include <mach/regs-clock.h> + +/* note, cpufreq support deals in kHz, no Hz */ + +static struct cpufreq_driver s3c24xx_driver; +static struct s3c_cpufreq_config cpu_cur; +static struct s3c_iotimings s3c24xx_iotiming; +static struct cpufreq_frequency_table *pll_reg; +static unsigned int last_target = ~0; +static unsigned int ftab_size; +static struct cpufreq_frequency_table *ftab; + +static struct clk *_clk_mpll; +static struct clk *_clk_xtal; +static struct clk *clk_fclk; +static struct clk *clk_hclk; +static struct clk *clk_pclk; +static struct clk *clk_arm; + +#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS +struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void) +{ + return &cpu_cur; +} + +struct s3c_iotimings *s3c_cpufreq_getiotimings(void) +{ + return &s3c24xx_iotiming; +} +#endif /* CONFIG_CPU_FREQ_S3C24XX_DEBUGFS */ + +static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg) +{ + unsigned long fclk, pclk, hclk, armclk; + + cfg->freq.fclk = fclk = clk_get_rate(clk_fclk); + cfg->freq.hclk = hclk = clk_get_rate(clk_hclk); + cfg->freq.pclk = pclk = clk_get_rate(clk_pclk); + cfg->freq.armclk = armclk = clk_get_rate(clk_arm); + + cfg->pll.index = __raw_readl(S3C2410_MPLLCON); + cfg->pll.frequency = fclk; + + cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10); + + cfg->divs.h_divisor = fclk / hclk; + cfg->divs.p_divisor = fclk / pclk; +} + +static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg) +{ + unsigned long pll = cfg->pll.frequency; + + cfg->freq.fclk = pll; + cfg->freq.hclk = pll / cfg->divs.h_divisor; + cfg->freq.pclk = pll / cfg->divs.p_divisor; + + /* convert hclk into 10ths of nanoseconds for io calcs */ + cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10); +} + +static inline int closer(unsigned int target, unsigned int n, unsigned int c) +{ + int diff_cur = abs(target - c); + int diff_new = abs(target - n); + + return (diff_new < diff_cur); +} + +static void s3c_cpufreq_show(const char *pfx, + struct s3c_cpufreq_config *cfg) +{ + s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n", + pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk, + cfg->freq.hclk, cfg->divs.h_divisor, + cfg->freq.pclk, cfg->divs.p_divisor); +} + +/* functions to wrapper the driver info calls to do the cpu specific work */ + +static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg) +{ + if (cfg->info->set_iotiming) + (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming); +} + +static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg) +{ + if (cfg->info->calc_iotiming) + return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming); + + return 0; +} + +static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) +{ + (cfg->info->set_refresh)(cfg); +} + +static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + (cfg->info->set_divs)(cfg); +} + +static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + return (cfg->info->calc_divs)(cfg); +} + +static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg) +{ + (cfg->info->set_fvco)(cfg); +} + +static inline void s3c_cpufreq_resume_clocks(void) +{ + cpu_cur.info->resume_clocks(); +} + +static inline void s3c_cpufreq_updateclk(struct clk *clk, + unsigned int freq) +{ + clk_set_rate(clk, freq); +} + +static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, + unsigned int target_freq, + struct cpufreq_frequency_table *pll) +{ + struct s3c_cpufreq_freqs freqs; + struct s3c_cpufreq_config cpu_new; + unsigned long flags; + + cpu_new = cpu_cur; /* copy new from current */ + + s3c_cpufreq_show("cur", &cpu_cur); + + /* TODO - check for DMA currently outstanding */ + + cpu_new.pll = pll ? *pll : cpu_cur.pll; + + if (pll) + freqs.pll_changing = 1; + + /* update our frequencies */ + + cpu_new.freq.armclk = target_freq; + cpu_new.freq.fclk = cpu_new.pll.frequency; + + if (s3c_cpufreq_calcdivs(&cpu_new) < 0) { + printk(KERN_ERR "no divisors for %d\n", target_freq); + goto err_notpossible; + } + + s3c_freq_dbg("%s: got divs\n", __func__); + + s3c_cpufreq_calc(&cpu_new); + + s3c_freq_dbg("%s: calculated frequencies for new\n", __func__); + + if (cpu_new.freq.hclk != cpu_cur.freq.hclk) { + if (s3c_cpufreq_calcio(&cpu_new) < 0) { + printk(KERN_ERR "%s: no IO timings\n", __func__); + goto err_notpossible; + } + } + + s3c_cpufreq_show("new", &cpu_new); + + /* setup our cpufreq parameters */ + + freqs.old = cpu_cur.freq; + freqs.new = cpu_new.freq; + + freqs.freqs.old = cpu_cur.freq.armclk / 1000; + freqs.freqs.new = cpu_new.freq.armclk / 1000; + + /* update f/h/p clock settings before we issue the change + * notification, so that drivers do not need to do anything + * special if they want to recalculate on CPUFREQ_PRECHANGE. */ + + s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency); + s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk); + s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk); + s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); + + /* start the frequency change */ + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE); + + /* If hclk is staying the same, then we do not need to + * re-write the IO or the refresh timings whilst we are changing + * speed. */ + + local_irq_save(flags); + + /* is our memory clock slowing down? */ + if (cpu_new.freq.hclk < cpu_cur.freq.hclk) { + s3c_cpufreq_setrefresh(&cpu_new); + s3c_cpufreq_setio(&cpu_new); + } + + if (cpu_new.freq.fclk == cpu_cur.freq.fclk) { + /* not changing PLL, just set the divisors */ + + s3c_cpufreq_setdivs(&cpu_new); + } else { + if (cpu_new.freq.fclk < cpu_cur.freq.fclk) { + /* slow the cpu down, then set divisors */ + + s3c_cpufreq_setfvco(&cpu_new); + s3c_cpufreq_setdivs(&cpu_new); + } else { + /* set the divisors, then speed up */ + + s3c_cpufreq_setdivs(&cpu_new); + s3c_cpufreq_setfvco(&cpu_new); + } + } + + /* did our memory clock speed up */ + if (cpu_new.freq.hclk > cpu_cur.freq.hclk) { + s3c_cpufreq_setrefresh(&cpu_new); + s3c_cpufreq_setio(&cpu_new); + } + + /* update our current settings */ + cpu_cur = cpu_new; + + local_irq_restore(flags); + + /* notify everyone we've done this */ + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE); + + s3c_freq_dbg("%s: finished\n", __func__); + return 0; + + err_notpossible: + printk(KERN_ERR "no compatible settings for %d\n", target_freq); + return -EINVAL; +} + +/* s3c_cpufreq_target + * + * called by the cpufreq core to adjust the frequency that the CPU + * is currently running at. + */ + +static int s3c_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_frequency_table *pll; + unsigned int index; + + /* avoid repeated calls which cause a needless amout of duplicated + * logging output (and CPU time as the calculation process is + * done) */ + if (target_freq == last_target) + return 0; + + last_target = target_freq; + + s3c_freq_dbg("%s: policy %p, target %u, relation %u\n", + __func__, policy, target_freq, relation); + + if (ftab) { + if (cpufreq_frequency_table_target(policy, ftab, + target_freq, relation, + &index)) { + s3c_freq_dbg("%s: table failed\n", __func__); + return -EINVAL; + } + + s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__, + target_freq, index, ftab[index].frequency); + target_freq = ftab[index].frequency; + } + + target_freq *= 1000; /* convert target to Hz */ + + /* find the settings for our new frequency */ + + if (!pll_reg || cpu_cur.lock_pll) { + /* either we've not got any PLL values, or we've locked + * to the current one. */ + pll = NULL; + } else { + struct cpufreq_policy tmp_policy; + int ret; + + /* we keep the cpu pll table in Hz, to ensure we get an + * accurate value for the PLL output. */ + + tmp_policy.min = policy->min * 1000; + tmp_policy.max = policy->max * 1000; + tmp_policy.cpu = policy->cpu; + + /* cpufreq_frequency_table_target uses a pointer to 'index' + * which is the number of the table entry, not the value of + * the table entry's index field. */ + + ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg, + target_freq, relation, + &index); + + if (ret < 0) { + printk(KERN_ERR "%s: no PLL available\n", __func__); + goto err_notpossible; + } + + pll = pll_reg + index; + + s3c_freq_dbg("%s: target %u => %u\n", + __func__, target_freq, pll->frequency); + + target_freq = pll->frequency; + } + + return s3c_cpufreq_settarget(policy, target_freq, pll); + + err_notpossible: + printk(KERN_ERR "no compatible settings for %d\n", target_freq); + return -EINVAL; +} + +static unsigned int s3c_cpufreq_get(unsigned int cpu) +{ + return clk_get_rate(clk_arm) / 1000; +} + +struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) +{ + struct clk *clk; + + clk = clk_get(dev, name); + if (IS_ERR(clk)) + printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name); + + return clk; +} + +static int s3c_cpufreq_init(struct cpufreq_policy *policy) +{ + printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy); + + if (policy->cpu != 0) + return -EINVAL; + + policy->cur = s3c_cpufreq_get(0); + policy->min = policy->cpuinfo.min_freq = 0; + policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000; + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; + + /* feed the latency information from the cpu driver */ + policy->cpuinfo.transition_latency = cpu_cur.info->latency; + + if (ftab) + cpufreq_frequency_table_cpuinfo(policy, ftab); + + return 0; +} + +static __init int s3c_cpufreq_initclks(void) +{ + _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll"); + _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal"); + clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk"); + clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk"); + clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk"); + clk_arm = s3c_cpufreq_clk_get(NULL, "armclk"); + + if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) || + IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) { + printk(KERN_ERR "%s: could not get clock(s)\n", __func__); + return -ENOENT; + } + + printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__, + clk_get_rate(clk_fclk) / 1000, + clk_get_rate(clk_hclk) / 1000, + clk_get_rate(clk_pclk) / 1000, + clk_get_rate(clk_arm) / 1000); + + return 0; +} + +static int s3c_cpufreq_verify(struct cpufreq_policy *policy) +{ + if (policy->cpu != 0) + return -EINVAL; + + return 0; +} + +#ifdef CONFIG_PM +static struct cpufreq_frequency_table suspend_pll; +static unsigned int suspend_freq; + +static int s3c_cpufreq_suspend(struct cpufreq_policy *policy) +{ + suspend_pll.frequency = clk_get_rate(_clk_mpll); + suspend_pll.index = __raw_readl(S3C2410_MPLLCON); + suspend_freq = s3c_cpufreq_get(0) * 1000; + + return 0; +} + +static int s3c_cpufreq_resume(struct cpufreq_policy *policy) +{ + int ret; + + s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy); + + last_target = ~0; /* invalidate last_target setting */ + + /* first, find out what speed we resumed at. */ + s3c_cpufreq_resume_clocks(); + + /* whilst we will be called later on, we try and re-set the + * cpu frequencies as soon as possible so that we do not end + * up resuming devices and then immediately having to re-set + * a number of settings once these devices have restarted. + * + * as a note, it is expected devices are not used until they + * have been un-suspended and at that time they should have + * used the updated clock settings. + */ + + ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll); + if (ret) { + printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__); + return ret; + } + + return 0; +} +#else +#define s3c_cpufreq_resume NULL +#define s3c_cpufreq_suspend NULL +#endif + +static struct cpufreq_driver s3c24xx_driver = { + .flags = CPUFREQ_STICKY, + .verify = s3c_cpufreq_verify, + .target = s3c_cpufreq_target, + .get = s3c_cpufreq_get, + .init = s3c_cpufreq_init, + .suspend = s3c_cpufreq_suspend, + .resume = s3c_cpufreq_resume, + .name = "s3c24xx", +}; + + +int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) +{ + if (!info || !info->name) { + printk(KERN_ERR "%s: failed to pass valid information\n", + __func__); + return -EINVAL; + } + + printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n", + info->name); + + /* check our driver info has valid data */ + + BUG_ON(info->set_refresh == NULL); + BUG_ON(info->set_divs == NULL); + BUG_ON(info->calc_divs == NULL); + + /* info->set_fvco is optional, depending on whether there + * is a need to set the clock code. */ + + cpu_cur.info = info; + + /* Note, driver registering should probably update locktime */ + + return 0; +} + +int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board) +{ + struct s3c_cpufreq_board *ours; + + if (!board) { + printk(KERN_INFO "%s: no board data\n", __func__); + return -EINVAL; + } + + /* Copy the board information so that each board can make this + * initdata. */ + + ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL); + if (ours == NULL) { + printk(KERN_ERR "%s: no memory\n", __func__); + return -ENOMEM; + } + + *ours = *board; + cpu_cur.board = ours; + + return 0; +} + +int __init s3c_cpufreq_auto_io(void) +{ + int ret; + + if (!cpu_cur.info->get_iotiming) { + printk(KERN_ERR "%s: get_iotiming undefined\n", __func__); + return -ENOENT; + } + + printk(KERN_INFO "%s: working out IO settings\n", __func__); + + ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming); + if (ret) + printk(KERN_ERR "%s: failed to get timings\n", __func__); + + return ret; +} + +/* if one or is zero, then return the other, otherwise return the min */ +#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b)) + +/** + * s3c_cpufreq_freq_min - find the minimum settings for the given freq. + * @dst: The destination structure + * @a: One argument. + * @b: The other argument. + * + * Create a minimum of each frequency entry in the 'struct s3c_freq', + * unless the entry is zero when it is ignored and the non-zero argument + * used. + */ +static void s3c_cpufreq_freq_min(struct s3c_freq *dst, + struct s3c_freq *a, struct s3c_freq *b) +{ + dst->fclk = do_min(a->fclk, b->fclk); + dst->hclk = do_min(a->hclk, b->hclk); + dst->pclk = do_min(a->pclk, b->pclk); + dst->armclk = do_min(a->armclk, b->armclk); +} + +static inline u32 calc_locktime(u32 freq, u32 time_us) +{ + u32 result; + + result = freq * time_us; + result = DIV_ROUND_UP(result, 1000 * 1000); + + return result; +} + +static void s3c_cpufreq_update_loctkime(void) +{ + unsigned int bits = cpu_cur.info->locktime_bits; + u32 rate = (u32)clk_get_rate(_clk_xtal); + u32 val; + + if (bits == 0) { + WARN_ON(1); + return; + } + + val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits; + val |= calc_locktime(rate, cpu_cur.info->locktime_m); + + printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val); + __raw_writel(val, S3C2410_LOCKTIME); +} + +static int s3c_cpufreq_build_freq(void) +{ + int size, ret; + + if (!cpu_cur.info->calc_freqtable) + return -EINVAL; + + kfree(ftab); + ftab = NULL; + + size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); + size++; + + ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL); + if (!ftab) { + printk(KERN_ERR "%s: no memory for tables\n", __func__); + return -ENOMEM; + } + + ftab_size = size; + + ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size); + s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END); + + return 0; +} + +static int __init s3c_cpufreq_initcall(void) +{ + int ret = 0; + + if (cpu_cur.info && cpu_cur.board) { + ret = s3c_cpufreq_initclks(); + if (ret) + goto out; + + /* get current settings */ + s3c_cpufreq_getcur(&cpu_cur); + s3c_cpufreq_show("cur", &cpu_cur); + + if (cpu_cur.board->auto_io) { + ret = s3c_cpufreq_auto_io(); + if (ret) { + printk(KERN_ERR "%s: failed to get io timing\n", + __func__); + goto out; + } + } + + if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) { + printk(KERN_ERR "%s: no IO support registered\n", + __func__); + ret = -EINVAL; + goto out; + } + + if (!cpu_cur.info->need_pll) + cpu_cur.lock_pll = 1; + + s3c_cpufreq_update_loctkime(); + + s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max, + &cpu_cur.info->max); + + if (cpu_cur.info->calc_freqtable) + s3c_cpufreq_build_freq(); + + ret = cpufreq_register_driver(&s3c24xx_driver); + } + + out: + return ret; +} + +late_initcall(s3c_cpufreq_initcall); + +/** + * s3c_plltab_register - register CPU PLL table. + * @plls: The list of PLL entries. + * @plls_no: The size of the PLL entries @plls. + * + * Register the given set of PLLs with the system. + */ +int __init s3c_plltab_register(struct cpufreq_frequency_table *plls, + unsigned int plls_no) +{ + struct cpufreq_frequency_table *vals; + unsigned int size; + + size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1); + + vals = kmalloc(size, GFP_KERNEL); + if (vals) { + memcpy(vals, plls, size); + pll_reg = vals; + + /* write a terminating entry, we don't store it in the + * table that is stored in the kernel */ + vals += plls_no; + vals->frequency = CPUFREQ_TABLE_END; + + printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no); + } else + printk(KERN_ERR "cpufreq: no memory for PLL tables\n"); + + return vals ? 0 : -ENOMEM; +} diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 3eafa903ebc..43a0c8a26ab 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -291,7 +291,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data, int cryp_mode) { enum cryp_algo_mode algomode; - struct cryp_register *src_reg = device_data->base; + struct cryp_register __iomem *src_reg = device_data->base; struct cryp_config *config = (struct cryp_config *)device_data->current_ctx; @@ -349,7 +349,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data, void cryp_restore_device_context(struct cryp_device_data *device_data, struct cryp_device_context *ctx) { - struct cryp_register *reg = device_data->base; + struct cryp_register __iomem *reg = device_data->base; struct cryp_config *config = (struct cryp_config *)device_data->current_ctx; diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index 14cfd05b777..d1d6606fe56 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -114,6 +114,9 @@ enum cryp_status_id { }; /* Cryp DMA interface */ +#define CRYP_DMA_TX_FIFO 0x08 +#define CRYP_DMA_RX_FIFO 0x10 + enum cryp_dma_req_type { CRYP_DMA_DISABLE_BOTH, CRYP_DMA_ENABLE_IN_DATA, @@ -217,7 +220,8 @@ struct cryp_dma { /** * struct cryp_device_data - structure for a cryp device. - * @base: Pointer to the hardware base address. + * @base: Pointer to virtual base address of the cryp device. + * @phybase: Pointer to physical memory location of the cryp device. * @dev: Pointer to the devices dev structure. * @clk: Pointer to the device's clock control. * @pwr_regulator: Pointer to the device's power control. @@ -232,6 +236,7 @@ struct cryp_dma { */ struct cryp_device_data { struct cryp_register __iomem *base; + phys_addr_t phybase; struct device *dev; struct clk *clk; struct regulator *pwr_regulator; diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 8c2777cf02f..83d79b964d1 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -475,6 +475,19 @@ static int cryp_get_device_data(struct cryp_ctx *ctx, static void cryp_dma_setup_channel(struct cryp_device_data *device_data, struct device *dev) { + struct dma_slave_config mem2cryp = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .dst_maxburst = 4, + }; + struct dma_slave_config cryp2mem = { + .direction = DMA_DEV_TO_MEM, + .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO, + .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .src_maxburst = 4, + }; + dma_cap_zero(device_data->dma.mask); dma_cap_set(DMA_SLAVE, device_data->dma.mask); @@ -490,6 +503,9 @@ static void cryp_dma_setup_channel(struct cryp_device_data *device_data, stedma40_filter, device_data->dma.cfg_cryp2mem); + dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp); + dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem); + init_completion(&device_data->dma.cryp_dma_complete); } @@ -537,10 +553,10 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " "(TO_DEVICE)", __func__); - desc = channel->device->device_prep_slave_sg(channel, - ctx->device->dma.sg_src, - ctx->device->dma.sg_src_len, - direction, DMA_CTRL_ACK, NULL); + desc = dmaengine_prep_slave_sg(channel, + ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, + direction, DMA_CTRL_ACK); break; case DMA_FROM_DEVICE: @@ -561,12 +577,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " "(FROM_DEVICE)", __func__); - desc = channel->device->device_prep_slave_sg(channel, - ctx->device->dma.sg_dst, - ctx->device->dma.sg_dst_len, - direction, - DMA_CTRL_ACK | - DMA_PREP_INTERRUPT, NULL); + desc = dmaengine_prep_slave_sg(channel, + ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, + direction, + DMA_CTRL_ACK | + DMA_PREP_INTERRUPT); desc->callback = cryp_dma_out_callback; desc->callback_param = ctx; @@ -578,7 +594,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, return -EFAULT; } - cookie = desc->tx_submit(desc); + cookie = dmaengine_submit(desc); dma_async_issue_pending(channel); return 0; @@ -591,12 +607,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx) dev_dbg(ctx->device->dev, "[%s]: ", __func__); chan = ctx->device->dma.chan_mem2cryp; - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, ctx->device->dma.sg_src_len, DMA_TO_DEVICE); chan = ctx->device->dma.chan_cryp2mem; - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); } @@ -1431,6 +1447,7 @@ static int ux500_cryp_probe(struct platform_device *pdev) goto out_kfree; } + device_data->phybase = res->start; device_data->base = ioremap(res->start, resource_size(res)); if (!device_data->base) { dev_err(dev, "[%s]: ioremap failed!", __func__); @@ -1458,11 +1475,17 @@ static int ux500_cryp_probe(struct platform_device *pdev) goto out_regulator; } + ret = clk_prepare(device_data->clk); + if (ret) { + dev_err(dev, "[%s]: clk_prepare() failed!", __func__); + goto out_clk; + } + /* Enable device power (and clock) */ ret = cryp_enable_power(device_data->dev, device_data, false); if (ret) { dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); - goto out_clk; + goto out_clk_unprepare; } cryp_error = cryp_check(device_data); @@ -1518,11 +1541,16 @@ static int ux500_cryp_probe(struct platform_device *pdev) goto out_power; } + dev_info(dev, "successfully registered\n"); + return 0; out_power: cryp_disable_power(device_data->dev, device_data, false); +out_clk_unprepare: + clk_unprepare(device_data->clk); + out_clk: clk_put(device_data->clk); @@ -1593,6 +1621,7 @@ static int ux500_cryp_remove(struct platform_device *pdev) dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", __func__); + clk_unprepare(device_data->clk); clk_put(device_data->clk); regulator_put(device_data->pwr_regulator); diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index cd9351cb24d..be6eb54da40 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -11,6 +11,7 @@ #include <linux/bitops.h> #define HASH_BLOCK_SIZE 64 +#define HASH_DMA_FIFO 4 #define HASH_DMA_ALIGN_SIZE 4 #define HASH_DMA_PERFORMANCE_MIN_SIZE 1024 #define HASH_BYTES_PER_WORD 4 @@ -347,7 +348,8 @@ struct hash_req_ctx { /** * struct hash_device_data - structure for a hash device. - * @base: Pointer to the hardware base address. + * @base: Pointer to virtual base address of the hash device. + * @phybase: Pointer to physical memory location of the hash device. * @list_node: For inclusion in klist. * @dev: Pointer to the device dev structure. * @ctx_lock: Spinlock for current_ctx. @@ -361,6 +363,7 @@ struct hash_req_ctx { */ struct hash_device_data { struct hash_register __iomem *base; + phys_addr_t phybase; struct klist_node list_node; struct device *dev; struct spinlock ctx_lock; diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 3b8f661d0ed..496ae6aae31 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -122,6 +122,13 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, struct device *dev) { struct hash_platform_data *platform_data = dev->platform_data; + struct dma_slave_config conf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = device_data->phybase + HASH_DMA_FIFO, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .dst_maxburst = 16, + }; + dma_cap_zero(device_data->dma.mask); dma_cap_set(DMA_SLAVE, device_data->dma.mask); @@ -131,6 +138,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, platform_data->dma_filter, device_data->dma.cfg_mem2hash); + dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); + init_completion(&device_data->dma.complete); } @@ -171,9 +180,9 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " "(TO_DEVICE)", __func__); - desc = channel->device->device_prep_slave_sg(channel, + desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg, ctx->device->dma.sg_len, - direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL); + direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(ctx->device->dev, "[%s]: device_prep_slave_sg() failed!", __func__); @@ -183,7 +192,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, desc->callback = hash_dma_callback; desc->callback_param = ctx; - cookie = desc->tx_submit(desc); + cookie = dmaengine_submit(desc); dma_async_issue_pending(channel); return 0; @@ -194,7 +203,7 @@ static void hash_dma_done(struct hash_ctx *ctx) struct dma_chan *chan; chan = ctx->device->dma.chan_mem2hash; - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, ctx->device->dma.sg_len, DMA_TO_DEVICE); @@ -464,12 +473,12 @@ static void hash_hw_write_key(struct hash_device_data *device_data, HASH_SET_DIN(&word, nwords); } - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); HASH_SET_DCAL; - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); } @@ -652,7 +661,7 @@ static void hash_messagepad(struct hash_device_data *device_data, if (index_bytes) HASH_SET_DIN(message, nwords); - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ @@ -667,7 +676,7 @@ static void hash_messagepad(struct hash_device_data *device_data, (int)(readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK)); - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); } @@ -767,7 +776,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) /* HW and SW initializations */ /* Note: there is no need to initialize buffer and digest members */ - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); /* @@ -783,8 +792,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); } -int hash_process_data( - struct hash_device_data *device_data, +static int hash_process_data(struct hash_device_data *device_data, struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) { @@ -953,7 +961,7 @@ static int hash_dma_final(struct ahash_request *req) wait_for_completion(&ctx->device->dma.complete); hash_dma_done(ctx); - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { @@ -983,7 +991,7 @@ out: * hash_hw_final - The final hash calculation function * @req: The hash request for the job. */ -int hash_hw_final(struct ahash_request *req) +static int hash_hw_final(struct ahash_request *req) { int ret = 0; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -1051,7 +1059,7 @@ int hash_hw_final(struct ahash_request *req) req_ctx->state.index); } else { HASH_SET_DCAL; - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); } @@ -1180,7 +1188,7 @@ int hash_resume_state(struct hash_device_data *device_data, temp_cr = device_state->temp_cr; writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); - if (device_data->base->cr & HASH_CR_MODE_MASK) + if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) hash_mode = HASH_OPER_MODE_HMAC; else hash_mode = HASH_OPER_MODE_HASH; @@ -1224,7 +1232,7 @@ int hash_save_state(struct hash_device_data *device_data, * actually makes sure that there isn't any ongoing calculation in the * hardware. */ - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); temp_cr = readl_relaxed(&device_data->base->cr); @@ -1233,7 +1241,7 @@ int hash_save_state(struct hash_device_data *device_data, device_state->din_reg = readl_relaxed(&device_data->base->din); - if (device_data->base->cr & HASH_CR_MODE_MASK) + if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) hash_mode = HASH_OPER_MODE_HMAC; else hash_mode = HASH_OPER_MODE_HASH; @@ -1699,6 +1707,7 @@ static int ux500_hash_probe(struct platform_device *pdev) goto out_kfree; } + device_data->phybase = res->start; device_data->base = ioremap(res->start, resource_size(res)); if (!device_data->base) { dev_err(dev, "[%s] ioremap() failed!", @@ -1726,11 +1735,17 @@ static int ux500_hash_probe(struct platform_device *pdev) goto out_regulator; } + ret = clk_prepare(device_data->clk); + if (ret) { + dev_err(dev, "[%s] clk_prepare() failed!", __func__); + goto out_clk; + } + /* Enable device power (and clock) */ ret = hash_enable_power(device_data, false); if (ret) { dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); - goto out_clk; + goto out_clk_unprepare; } ret = hash_check_hw(device_data); @@ -1756,12 +1771,15 @@ static int ux500_hash_probe(struct platform_device *pdev) goto out_power; } - dev_info(dev, "[%s] successfully probed\n", __func__); + dev_info(dev, "successfully registered\n"); return 0; out_power: hash_disable_power(device_data, false); +out_clk_unprepare: + clk_unprepare(device_data->clk); + out_clk: clk_put(device_data->clk); @@ -1826,6 +1844,7 @@ static int ux500_hash_remove(struct platform_device *pdev) dev_err(dev, "[%s]: hash_disable_power() failed", __func__); + clk_unprepare(device_data->clk); clk_put(device_data->clk); regulator_put(device_data->regulator); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 71bf4ec300e..5ab5880d5c9 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -17,6 +17,8 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> +#include <linux/of.h> +#include <linux/of_dma.h> #include <linux/amba/bus.h> #include <linux/regulator/consumer.h> #include <linux/platform_data/dma-ste-dma40.h> @@ -45,15 +47,63 @@ #define D40_LCLA_LINK_PER_EVENT_GRP 128 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP +/* Max number of logical channels per physical channel */ +#define D40_MAX_LOG_CHAN_PER_PHY 32 + /* Attempts before giving up to trying to get pages that are aligned */ #define MAX_LCLA_ALLOC_ATTEMPTS 256 /* Bit markings for allocation map */ -#define D40_ALLOC_FREE (1 << 31) -#define D40_ALLOC_PHY (1 << 30) +#define D40_ALLOC_FREE BIT(31) +#define D40_ALLOC_PHY BIT(30) #define D40_ALLOC_LOG_FREE 0 -#define MAX(a, b) (((a) < (b)) ? (b) : (a)) +#define D40_MEMCPY_MAX_CHANS 8 + +/* Reserved event lines for memcpy only. */ +#define DB8500_DMA_MEMCPY_EV_0 51 +#define DB8500_DMA_MEMCPY_EV_1 56 +#define DB8500_DMA_MEMCPY_EV_2 57 +#define DB8500_DMA_MEMCPY_EV_3 58 +#define DB8500_DMA_MEMCPY_EV_4 59 +#define DB8500_DMA_MEMCPY_EV_5 60 + +static int dma40_memcpy_channels[] = { + DB8500_DMA_MEMCPY_EV_0, + DB8500_DMA_MEMCPY_EV_1, + DB8500_DMA_MEMCPY_EV_2, + DB8500_DMA_MEMCPY_EV_3, + DB8500_DMA_MEMCPY_EV_4, + DB8500_DMA_MEMCPY_EV_5, +}; + +/* Default configuration for physcial memcpy */ +static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { + .mode = STEDMA40_MODE_PHYSICAL, + .dir = DMA_MEM_TO_MEM, + + .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .src_info.psize = STEDMA40_PSIZE_PHY_1, + .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, + + .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_info.psize = STEDMA40_PSIZE_PHY_1, + .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, +}; + +/* Default configuration for logical memcpy */ +static struct stedma40_chan_cfg dma40_memcpy_conf_log = { + .mode = STEDMA40_MODE_LOGICAL, + .dir = DMA_MEM_TO_MEM, + + .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .src_info.psize = STEDMA40_PSIZE_LOG_1, + .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, + + .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_info.psize = STEDMA40_PSIZE_LOG_1, + .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, +}; /** * enum 40_command - The different commands and/or statuses. @@ -171,6 +221,9 @@ static u32 d40_backup_regs_chan[] = { D40_CHAN_REG_SDLNK, }; +#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ + BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) + /** * struct d40_interrupt_lookup - lookup table for interrupt handler * @@ -471,6 +524,8 @@ struct d40_gen_dmac { * @phy_start: Physical memory start of the DMA registers. * @phy_size: Size of the DMA register map. * @irq: The IRQ number. + * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem + * transfers). * @num_phy_chans: The number of physical channels. Read from HW. This * is the number of available channels for this driver, not counting "Secure * mode" allocated physical channels. @@ -514,6 +569,7 @@ struct d40_base { phys_addr_t phy_start; resource_size_t phy_size; int irq; + int num_memcpy_chans; int num_phy_chans; int num_log_chans; struct device_dma_parameters dma_parms; @@ -534,7 +590,7 @@ struct d40_base { resource_size_t lcpa_size; struct kmem_cache *desc_slab; u32 reg_val_backup[BACKUP_REGS_SZ]; - u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; + u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; u32 *reg_val_backup_chan; u16 gcc_pwr_off_mask; bool initialized; @@ -792,7 +848,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) * that uses linked lists. */ if (!(chan->phy_chan->use_soft_lli && - chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) + chan->dma_cfg.dir == DMA_DEV_TO_MEM)) curr_lcla = d40_lcla_alloc_one(chan, desc); first_lcla = curr_lcla; @@ -954,20 +1010,21 @@ static int d40_psize_2_burst_size(bool is_log, int psize) /* * The dma only supports transmitting packages up to - * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of - * dma elements required to send the entire sg list + * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. + * + * Calculate the total number of dma elements required to send the entire sg list. */ static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) { int dmalen; u32 max_w = max(data_width1, data_width2); u32 min_w = min(data_width1, data_width2); - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); + u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); if (seg_max > STEDMA40_MAX_SEG_SIZE) - seg_max -= (1 << max_w); + seg_max -= max_w; - if (!IS_ALIGNED(size, 1 << max_w)) + if (!IS_ALIGNED(size, max_w)) return -EINVAL; if (size <= seg_max) @@ -1257,21 +1314,17 @@ static void __d40_config_set_event(struct d40_chan *d40c, static void d40_config_set_event(struct d40_chan *d40c, enum d40_events event_type) { - /* Enable event line connected to device (or memcpy) */ - if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { - u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); + /* Enable event line connected to device (or memcpy) */ + if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SSLNK); - } - - if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { - u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SDLNK); - } } static u32 d40_chan_has_events(struct d40_chan *d40c) @@ -1417,7 +1470,7 @@ static u32 d40_residue(struct d40_chan *d40c) >> D40_SREG_ELEM_PHY_ECNT_POS; } - return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); + return num_elt * d40c->dma_cfg.dst_info.data_width; } static bool d40_tx_is_linked(struct d40_chan *d40c) @@ -1693,7 +1746,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) } /* ACK interrupt */ - writel(1 << idx, base->virtbase + il[row].clr); + writel(BIT(idx), base->virtbase + il[row].clr); spin_lock(&d40c->lock); @@ -1715,8 +1768,6 @@ static int d40_validate_conf(struct d40_chan *d40c, struct stedma40_chan_cfg *conf) { int res = 0; - u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); - u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; if (!conf->dir) { @@ -1724,48 +1775,14 @@ static int d40_validate_conf(struct d40_chan *d40c, res = -EINVAL; } - if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && - d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && - d40c->runtime_addr == 0) { - - chan_err(d40c, "Invalid TX channel address (%d)\n", - conf->dst_dev_type); - res = -EINVAL; - } - - if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && - d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && - d40c->runtime_addr == 0) { - chan_err(d40c, "Invalid RX channel address (%d)\n", - conf->src_dev_type); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_MEM_TO_PERIPH && - dst_event_group == STEDMA40_DEV_DST_MEMORY) { - chan_err(d40c, "Invalid dst\n"); + if ((is_log && conf->dev_type > d40c->base->num_log_chans) || + (!is_log && conf->dev_type > d40c->base->num_phy_chans) || + (conf->dev_type < 0)) { + chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); res = -EINVAL; } - if (conf->dir == STEDMA40_PERIPH_TO_MEM && - src_event_group == STEDMA40_DEV_SRC_MEMORY) { - chan_err(d40c, "Invalid src\n"); - res = -EINVAL; - } - - if (src_event_group == STEDMA40_DEV_SRC_MEMORY && - dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { - chan_err(d40c, "No event line\n"); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && - (src_event_group != dst_event_group)) { - chan_err(d40c, "Invalid event group\n"); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { + if (conf->dir == DMA_DEV_TO_DEV) { /* * DMAC HW supports it. Will be added to this driver, * in case any dma client requires it. @@ -1775,9 +1792,9 @@ static int d40_validate_conf(struct d40_chan *d40c, } if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * - (1 << conf->src_info.data_width) != + conf->src_info.data_width != d40_psize_2_burst_size(is_log, conf->dst_info.psize) * - (1 << conf->dst_info.data_width)) { + conf->dst_info.data_width) { /* * The DMAC hardware only supports * src (burst x width) == dst (burst x width) @@ -1819,8 +1836,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy, if (phy->allocated_src == D40_ALLOC_FREE) phy->allocated_src = D40_ALLOC_LOG_FREE; - if (!(phy->allocated_src & (1 << log_event_line))) { - phy->allocated_src |= 1 << log_event_line; + if (!(phy->allocated_src & BIT(log_event_line))) { + phy->allocated_src |= BIT(log_event_line); goto found; } else goto not_found; @@ -1831,8 +1848,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy, if (phy->allocated_dst == D40_ALLOC_FREE) phy->allocated_dst = D40_ALLOC_LOG_FREE; - if (!(phy->allocated_dst & (1 << log_event_line))) { - phy->allocated_dst |= 1 << log_event_line; + if (!(phy->allocated_dst & BIT(log_event_line))) { + phy->allocated_dst |= BIT(log_event_line); goto found; } else goto not_found; @@ -1862,11 +1879,11 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, /* Logical channel */ if (is_src) { - phy->allocated_src &= ~(1 << log_event_line); + phy->allocated_src &= ~BIT(log_event_line); if (phy->allocated_src == D40_ALLOC_LOG_FREE) phy->allocated_src = D40_ALLOC_FREE; } else { - phy->allocated_dst &= ~(1 << log_event_line); + phy->allocated_dst &= ~BIT(log_event_line); if (phy->allocated_dst == D40_ALLOC_LOG_FREE) phy->allocated_dst = D40_ALLOC_FREE; } @@ -1882,7 +1899,7 @@ out: static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) { - int dev_type; + int dev_type = d40c->dma_cfg.dev_type; int event_group; int event_line; struct d40_phy_res *phys; @@ -1896,14 +1913,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) phys = d40c->base->phy_res; num_phy_chans = d40c->base->num_phy_chans; - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - dev_type = d40c->dma_cfg.src_dev_type; + if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { log_num = 2 * dev_type; is_src = true; - } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { + } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { /* dst event lines are used for logical memcpy */ - dev_type = d40c->dma_cfg.dst_dev_type; log_num = 2 * dev_type + 1; is_src = false; } else @@ -1913,7 +1928,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) event_line = D40_TYPE_TO_EVENT(dev_type); if (!is_log) { - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { + if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { /* Find physical half channel */ if (d40c->dma_cfg.use_fixed_channel) { i = d40c->dma_cfg.phy_channel; @@ -2014,14 +2029,23 @@ static int d40_config_memcpy(struct d40_chan *d40c) dma_cap_mask_t cap = d40c->chan.device->cap_mask; if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { - d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; - d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; - d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> - memcpy[d40c->chan.chan_id]; + d40c->dma_cfg = dma40_memcpy_conf_log; + d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; + + d40_log_cfg(&d40c->dma_cfg, + &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); } else if (dma_has_cap(DMA_MEMCPY, cap) && dma_has_cap(DMA_SLAVE, cap)) { - d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; + d40c->dma_cfg = dma40_memcpy_conf_phy; + + /* Generate interrrupt at end of transfer or relink. */ + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); + + /* Generate interrupt on error. */ + d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); + } else { chan_err(d40c, "No memcpy\n"); return -EINVAL; @@ -2034,7 +2058,7 @@ static int d40_free_dma(struct d40_chan *d40c) { int res = 0; - u32 event; + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); struct d40_phy_res *phy = d40c->phy_chan; bool is_src; @@ -2052,14 +2076,12 @@ static int d40_free_dma(struct d40_chan *d40c) return -EINVAL; } - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) is_src = false; - } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); + else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) is_src = true; - } else { + else { chan_err(d40c, "Unknown direction\n"); return -EINVAL; } @@ -2100,7 +2122,7 @@ static bool d40_is_paused(struct d40_chan *d40c) unsigned long flags; void __iomem *active_reg; u32 status; - u32 event; + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); spin_lock_irqsave(&d40c->lock, flags); @@ -2119,12 +2141,10 @@ static bool d40_is_paused(struct d40_chan *d40c) goto _exit; } - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { status = readl(chanbase + D40_CHAN_REG_SDLNK); - } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); + } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { chan_err(d40c, "Unknown direction\n"); @@ -2255,24 +2275,6 @@ err: return NULL; } -static dma_addr_t -d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) -{ - struct stedma40_platform_data *plat = chan->base->plat_data; - struct stedma40_chan_cfg *cfg = &chan->dma_cfg; - dma_addr_t addr = 0; - - if (chan->runtime_addr) - return chan->runtime_addr; - - if (direction == DMA_DEV_TO_MEM) - addr = plat->dev_rx[cfg->src_dev_type]; - else if (direction == DMA_MEM_TO_DEV) - addr = plat->dev_tx[cfg->dst_dev_type]; - - return addr; -} - static struct dma_async_tx_descriptor * d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, struct scatterlist *sg_dst, unsigned int sg_len, @@ -2299,14 +2301,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (sg_next(&sg_src[sg_len - 1]) == sg_src) desc->cyclic = true; - if (direction != DMA_TRANS_NONE) { - dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); - - if (direction == DMA_DEV_TO_MEM) - src_dev_addr = dev_addr; - else if (direction == DMA_MEM_TO_DEV) - dst_dev_addr = dev_addr; - } + if (direction == DMA_DEV_TO_MEM) + src_dev_addr = chan->runtime_addr; + else if (direction == DMA_MEM_TO_DEV) + dst_dev_addr = chan->runtime_addr; if (chan_is_logical(chan)) ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, @@ -2366,7 +2364,7 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) u32 rtreg; u32 event = D40_TYPE_TO_EVENT(dev_type); u32 group = D40_TYPE_TO_GROUP(dev_type); - u32 bit = 1 << event; + u32 bit = BIT(event); u32 prioreg; struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; @@ -2397,13 +2395,57 @@ static void d40_set_prio_realtime(struct d40_chan *d40c) if (d40c->base->rev < 3) return; - if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) - __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); + if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); - if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) - __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); + if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); +} + +#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) +#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) +#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) +#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) + +static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct stedma40_chan_cfg cfg; + dma_cap_mask_t cap; + u32 flags; + + memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); + + dma_cap_zero(cap); + dma_cap_set(DMA_SLAVE, cap); + + cfg.dev_type = dma_spec->args[0]; + flags = dma_spec->args[2]; + + switch (D40_DT_FLAGS_MODE(flags)) { + case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; + case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; + } + + switch (D40_DT_FLAGS_DIR(flags)) { + case 0: + cfg.dir = DMA_MEM_TO_DEV; + cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); + break; + case 1: + cfg.dir = DMA_DEV_TO_MEM; + cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); + break; + } + + if (D40_DT_FLAGS_FIXED_CHAN(flags)) { + cfg.phy_channel = dma_spec->args[1]; + cfg.use_fixed_channel = true; + } + + return dma_request_channel(cap, stedma40_filter, &cfg); } /* DMA ENGINE functions */ @@ -2435,23 +2477,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) } pm_runtime_get_sync(d40c->base->dev); - /* Fill in basic CFG register values */ - d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, chan_is_logical(d40c)); d40_set_prio_realtime(d40c); if (chan_is_logical(d40c)) { - d40_log_cfg(&d40c->dma_cfg, - &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); - - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) + if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; + d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; else d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.dst_dev_type * + d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; + + /* Unmask the Global Interrupt Mask. */ + d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); } dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", @@ -2641,33 +2681,10 @@ static void d40_terminate_all(struct dma_chan *chan) static int dma40_config_to_halfchannel(struct d40_chan *d40c, struct stedma40_half_channel_info *info, - enum dma_slave_buswidth width, u32 maxburst) { - enum stedma40_periph_data_width addr_width; int psize; - switch (width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - addr_width = STEDMA40_BYTE_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_2_BYTES: - addr_width = STEDMA40_HALFWORD_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - addr_width = STEDMA40_WORD_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_8_BYTES: - addr_width = STEDMA40_DOUBLEWORD_WIDTH; - break; - default: - dev_err(d40c->base->dev, - "illegal peripheral address width " - "requested (%d)\n", - width); - return -EINVAL; - } - if (chan_is_logical(d40c)) { if (maxburst >= 16) psize = STEDMA40_PSIZE_LOG_16; @@ -2688,7 +2705,6 @@ dma40_config_to_halfchannel(struct d40_chan *d40c, psize = STEDMA40_PSIZE_PHY_1; } - info->data_width = addr_width; info->psize = psize; info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; @@ -2712,21 +2728,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_maxburst = config->dst_maxburst; if (config->direction == DMA_DEV_TO_MEM) { - dma_addr_t dev_addr_rx = - d40c->base->plat_data->dev_rx[cfg->src_dev_type]; - config_addr = config->src_addr; - if (dev_addr_rx) - dev_dbg(d40c->base->dev, - "channel has a pre-wired RX address %08x " - "overriding with %08x\n", - dev_addr_rx, config_addr); - if (cfg->dir != STEDMA40_PERIPH_TO_MEM) + + if (cfg->dir != DMA_DEV_TO_MEM) dev_dbg(d40c->base->dev, "channel was not configured for peripheral " "to memory transfer (%d) overriding\n", cfg->dir); - cfg->dir = STEDMA40_PERIPH_TO_MEM; + cfg->dir = DMA_DEV_TO_MEM; /* Configure the memory side */ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) @@ -2735,21 +2744,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_maxburst = src_maxburst; } else if (config->direction == DMA_MEM_TO_DEV) { - dma_addr_t dev_addr_tx = - d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; - config_addr = config->dst_addr; - if (dev_addr_tx) - dev_dbg(d40c->base->dev, - "channel has a pre-wired TX address %08x " - "overriding with %08x\n", - dev_addr_tx, config_addr); - if (cfg->dir != STEDMA40_MEM_TO_PERIPH) + + if (cfg->dir != DMA_MEM_TO_DEV) dev_dbg(d40c->base->dev, "channel was not configured for memory " "to peripheral transfer (%d) overriding\n", cfg->dir); - cfg->dir = STEDMA40_MEM_TO_PERIPH; + cfg->dir = DMA_MEM_TO_DEV; /* Configure the memory side */ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) @@ -2763,6 +2765,11 @@ static int d40_set_runtime_config(struct dma_chan *chan, return -EINVAL; } + if (config_addr <= 0) { + dev_err(d40c->base->dev, "no address supplied\n"); + return -EINVAL; + } + if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { dev_err(d40c->base->dev, "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", @@ -2781,14 +2788,24 @@ static int d40_set_runtime_config(struct dma_chan *chan, src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; } + /* Only valid widths are; 1, 2, 4 and 8. */ + if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || + src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || + dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || + dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || + ((src_addr_width > 1) && (src_addr_width & 1)) || + ((dst_addr_width > 1) && (dst_addr_width & 1))) + return -EINVAL; + + cfg->src_info.data_width = src_addr_width; + cfg->dst_info.data_width = dst_addr_width; + ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, - src_addr_width, src_maxburst); if (ret) return ret; ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, - dst_addr_width, dst_maxburst); if (ret) return ret; @@ -2797,8 +2814,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, if (chan_is_logical(d40c)) d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); else - d40_phy_cfg(cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, false); + d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); /* These settings will take precedence later */ d40c->runtime_addr = config_addr; @@ -2929,7 +2945,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, } d40_chan_init(base, &base->dma_memcpy, base->log_chans, - base->num_log_chans, base->plat_data->memcpy_len); + base->num_log_chans, base->num_memcpy_chans); dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); @@ -3123,13 +3139,14 @@ static int __init d40_phy_res_init(struct d40_base *base) static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { - struct stedma40_platform_data *plat_data; + struct stedma40_platform_data *plat_data = pdev->dev.platform_data; struct clk *clk = NULL; void __iomem *virtbase = NULL; struct resource *res = NULL; struct d40_base *base = NULL; int num_log_chans = 0; int num_phy_chans; + int num_memcpy_chans; int clk_ret = -EINVAL; int i; u32 pid; @@ -3189,8 +3206,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) * DB8540v1 has revision 4 */ rev = AMBA_REV_BITS(pid); - - plat_data = pdev->dev.platform_data; + if (rev < 2) { + d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); + goto failure; + } /* The number of physical channels on this HW */ if (plat_data->num_of_phy_chans) @@ -3198,26 +3217,20 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) else num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; - dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", - rev, res->start, num_phy_chans); - - if (rev < 2) { - d40_err(&pdev->dev, "hardware revision: %d is not supported", - rev); - goto failure; - } + /* The number of channels used for memcpy */ + if (plat_data->num_of_memcpy_chans) + num_memcpy_chans = plat_data->num_of_memcpy_chans; + else + num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); - /* Count the number of logical channels in use */ - for (i = 0; i < plat_data->dev_len; i++) - if (plat_data->dev_rx[i] != 0) - num_log_chans++; + num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; - for (i = 0; i < plat_data->dev_len; i++) - if (plat_data->dev_tx[i] != 0) - num_log_chans++; + dev_info(&pdev->dev, + "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n", + rev, res->start, num_phy_chans, num_log_chans); base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + - (num_phy_chans + num_log_chans + plat_data->memcpy_len) * + (num_phy_chans + num_log_chans + num_memcpy_chans) * sizeof(struct d40_chan), GFP_KERNEL); if (base == NULL) { @@ -3227,6 +3240,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) base->rev = rev; base->clk = clk; + base->num_memcpy_chans = num_memcpy_chans; base->num_phy_chans = num_phy_chans; base->num_log_chans = num_log_chans; base->phy_start = res->start; @@ -3278,17 +3292,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!base->lookup_phy_chans) goto failure; - if (num_log_chans + plat_data->memcpy_len) { - /* - * The max number of logical channels are event lines for all - * src devices and dst devices - */ - base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * - sizeof(struct d40_chan *), - GFP_KERNEL); - if (!base->lookup_log_chans) - goto failure; - } + base->lookup_log_chans = kzalloc(num_log_chans * + sizeof(struct d40_chan *), + GFP_KERNEL); + if (!base->lookup_log_chans) + goto failure; base->reg_val_backup_chan = kmalloc(base->num_phy_chans * sizeof(d40_backup_regs_chan), @@ -3472,17 +3480,82 @@ failure: return ret; } +static int __init d40_of_probe(struct platform_device *pdev, + struct device_node *np) +{ + struct stedma40_platform_data *pdata; + int num_phy = 0, num_memcpy = 0, num_disabled = 0; + const const __be32 *list; + + pdata = devm_kzalloc(&pdev->dev, + sizeof(struct stedma40_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* If absent this value will be obtained from h/w. */ + of_property_read_u32(np, "dma-channels", &num_phy); + if (num_phy > 0) + pdata->num_of_phy_chans = num_phy; + + list = of_get_property(np, "memcpy-channels", &num_memcpy); + num_memcpy /= sizeof(*list); + + if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { + d40_err(&pdev->dev, + "Invalid number of memcpy channels specified (%d)\n", + num_memcpy); + return -EINVAL; + } + pdata->num_of_memcpy_chans = num_memcpy; + + of_property_read_u32_array(np, "memcpy-channels", + dma40_memcpy_channels, + num_memcpy); + + list = of_get_property(np, "disabled-channels", &num_disabled); + num_disabled /= sizeof(*list); + + if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) { + d40_err(&pdev->dev, + "Invalid number of disabled channels specified (%d)\n", + num_disabled); + return -EINVAL; + } + + of_property_read_u32_array(np, "disabled-channels", + pdata->disabled_channels, + num_disabled); + pdata->disabled_channels[num_disabled] = -1; + + pdev->dev.platform_data = pdata; + + return 0; +} + static int __init d40_probe(struct platform_device *pdev) { - int err; + struct stedma40_platform_data *plat_data = pdev->dev.platform_data; + struct device_node *np = pdev->dev.of_node; int ret = -ENOENT; - struct d40_base *base; + struct d40_base *base = NULL; struct resource *res = NULL; int num_reserved_chans; u32 val; - base = d40_hw_detect_init(pdev); + if (!plat_data) { + if (np) { + if(d40_of_probe(pdev, np)) { + ret = -ENOMEM; + goto failure; + } + } else { + d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); + goto failure; + } + } + base = d40_hw_detect_init(pdev); if (!base) goto failure; @@ -3575,6 +3648,7 @@ static int __init d40_probe(struct platform_device *pdev) base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); if (IS_ERR(base->lcpa_regulator)) { d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); + ret = PTR_ERR(base->lcpa_regulator); base->lcpa_regulator = NULL; goto failure; } @@ -3590,19 +3664,26 @@ static int __init d40_probe(struct platform_device *pdev) } base->initialized = true; - err = d40_dmaengine_init(base, num_reserved_chans); - if (err) + ret = d40_dmaengine_init(base, num_reserved_chans); + if (ret) goto failure; base->dev->dma_parms = &base->dma_parms; - err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); - if (err) { + ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); + if (ret) { d40_err(&pdev->dev, "Failed to set dma max seg size\n"); goto failure; } d40_hw_init(base); + if (np) { + ret = of_dma_controller_register(np, d40_xlate, NULL); + if (ret) + dev_err(&pdev->dev, + "could not register of_dma_controller\n"); + } + dev_info(base->dev, "initialized\n"); return 0; @@ -3656,11 +3737,17 @@ failure: return ret; } +static const struct of_device_id d40_match[] = { + { .compatible = "stericsson,dma40", }, + {} +}; + static struct platform_driver d40_driver = { .driver = { .owner = THIS_MODULE, .name = D40_NAME, .pm = DMA40_PM_OPS, + .of_match_table = d40_match, }, }; diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 7180e0d4172..27b818dee7c 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -10,6 +10,18 @@ #include "ste_dma40_ll.h" +u8 d40_width_to_bits(enum dma_slave_buswidth width) +{ + if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) + return STEDMA40_ESIZE_8_BIT; + else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) + return STEDMA40_ESIZE_16_BIT; + else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return STEDMA40_ESIZE_64_BIT; + else + return STEDMA40_ESIZE_32_BIT; +} + /* Sets up proper LCSP1 and LCSP3 register for a logical channel */ void d40_log_cfg(struct stedma40_chan_cfg *cfg, u32 *lcsp1, u32 *lcsp3) @@ -18,106 +30,100 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg, u32 l1 = 0; /* src */ /* src is mem? -> increase address pos */ - if (cfg->dir == STEDMA40_MEM_TO_PERIPH || - cfg->dir == STEDMA40_MEM_TO_MEM) - l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS; + if (cfg->dir == DMA_MEM_TO_DEV || + cfg->dir == DMA_MEM_TO_MEM) + l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS); /* dst is mem? -> increase address pos */ - if (cfg->dir == STEDMA40_PERIPH_TO_MEM || - cfg->dir == STEDMA40_MEM_TO_MEM) - l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS; + if (cfg->dir == DMA_DEV_TO_MEM || + cfg->dir == DMA_MEM_TO_MEM) + l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS); /* src is hw? -> master port 1 */ - if (cfg->dir == STEDMA40_PERIPH_TO_MEM || - cfg->dir == STEDMA40_PERIPH_TO_PERIPH) - l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS; + if (cfg->dir == DMA_DEV_TO_MEM || + cfg->dir == DMA_DEV_TO_DEV) + l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS); /* dst is hw? -> master port 1 */ - if (cfg->dir == STEDMA40_MEM_TO_PERIPH || - cfg->dir == STEDMA40_PERIPH_TO_PERIPH) - l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; + if (cfg->dir == DMA_MEM_TO_DEV || + cfg->dir == DMA_DEV_TO_DEV) + l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS); - l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; + l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS); l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; - l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; + l3 |= d40_width_to_bits(cfg->dst_info.data_width) + << D40_MEM_LCSP3_DCFG_ESIZE_POS; - l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; + l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS); l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; - l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; + l1 |= d40_width_to_bits(cfg->src_info.data_width) + << D40_MEM_LCSP1_SCFG_ESIZE_POS; *lcsp1 = l1; *lcsp3 = l3; } -/* Sets up SRC and DST CFG register for both logical and physical channels */ -void d40_phy_cfg(struct stedma40_chan_cfg *cfg, - u32 *src_cfg, u32 *dst_cfg, bool is_log) +void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg) { u32 src = 0; u32 dst = 0; - if (!is_log) { - /* Physical channel */ - if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) || - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { - /* Set master port to 1 */ - src |= 1 << D40_SREG_CFG_MST_POS; - src |= D40_TYPE_TO_EVENT(cfg->src_dev_type); - - if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) - src |= 1 << D40_SREG_CFG_PHY_TM_POS; - else - src |= 3 << D40_SREG_CFG_PHY_TM_POS; - } - if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) || - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { - /* Set master port to 1 */ - dst |= 1 << D40_SREG_CFG_MST_POS; - dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type); - - if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) - dst |= 1 << D40_SREG_CFG_PHY_TM_POS; - else - dst |= 3 << D40_SREG_CFG_PHY_TM_POS; - } - /* Interrupt on end of transfer for destination */ - dst |= 1 << D40_SREG_CFG_TIM_POS; - - /* Generate interrupt on error */ - src |= 1 << D40_SREG_CFG_EIM_POS; - dst |= 1 << D40_SREG_CFG_EIM_POS; - - /* PSIZE */ - if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { - src |= 1 << D40_SREG_CFG_PHY_PEN_POS; - src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; - } - if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { - dst |= 1 << D40_SREG_CFG_PHY_PEN_POS; - dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; - } - - /* Element size */ - src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; - dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; - - /* Set the priority bit to high for the physical channel */ - if (cfg->high_priority) { - src |= 1 << D40_SREG_CFG_PRI_POS; - dst |= 1 << D40_SREG_CFG_PRI_POS; - } - - } else { - /* Logical channel */ - dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; - src |= 1 << D40_SREG_CFG_LOG_GIM_POS; + if ((cfg->dir == DMA_DEV_TO_MEM) || + (cfg->dir == DMA_DEV_TO_DEV)) { + /* Set master port to 1 */ + src |= BIT(D40_SREG_CFG_MST_POS); + src |= D40_TYPE_TO_EVENT(cfg->dev_type); + + if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) + src |= BIT(D40_SREG_CFG_PHY_TM_POS); + else + src |= 3 << D40_SREG_CFG_PHY_TM_POS; + } + if ((cfg->dir == DMA_MEM_TO_DEV) || + (cfg->dir == DMA_DEV_TO_DEV)) { + /* Set master port to 1 */ + dst |= BIT(D40_SREG_CFG_MST_POS); + dst |= D40_TYPE_TO_EVENT(cfg->dev_type); + + if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) + dst |= BIT(D40_SREG_CFG_PHY_TM_POS); + else + dst |= 3 << D40_SREG_CFG_PHY_TM_POS; + } + /* Interrupt on end of transfer for destination */ + dst |= BIT(D40_SREG_CFG_TIM_POS); + + /* Generate interrupt on error */ + src |= BIT(D40_SREG_CFG_EIM_POS); + dst |= BIT(D40_SREG_CFG_EIM_POS); + + /* PSIZE */ + if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { + src |= BIT(D40_SREG_CFG_PHY_PEN_POS); + src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; + } + if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { + dst |= BIT(D40_SREG_CFG_PHY_PEN_POS); + dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; + } + + /* Element size */ + src |= d40_width_to_bits(cfg->src_info.data_width) + << D40_SREG_CFG_ESIZE_POS; + dst |= d40_width_to_bits(cfg->dst_info.data_width) + << D40_SREG_CFG_ESIZE_POS; + + /* Set the priority bit to high for the physical channel */ + if (cfg->high_priority) { + src |= BIT(D40_SREG_CFG_PRI_POS); + dst |= BIT(D40_SREG_CFG_PRI_POS); } if (cfg->src_info.big_endian) - src |= 1 << D40_SREG_CFG_LBE_POS; + src |= BIT(D40_SREG_CFG_LBE_POS); if (cfg->dst_info.big_endian) - dst |= 1 << D40_SREG_CFG_LBE_POS; + dst |= BIT(D40_SREG_CFG_LBE_POS); *src_cfg = src; *dst_cfg = dst; @@ -143,23 +149,22 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, num_elems = 2 << psize; /* Must be aligned */ - if (!IS_ALIGNED(data, 0x1 << data_width)) + if (!IS_ALIGNED(data, data_width)) return -EINVAL; /* Transfer size can't be smaller than (num_elms * elem_size) */ - if (data_size < num_elems * (0x1 << data_width)) + if (data_size < num_elems * data_width) return -EINVAL; /* The number of elements. IE now many chunks */ - lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS; + lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS; /* * Distance to next element sized entry. * Usually the size of the element unless you want gaps. */ if (addr_inc) - lli->reg_elt |= (0x1 << data_width) << - D40_SREG_ELEM_PHY_EIDX_POS; + lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS; /* Where the data is */ lli->reg_ptr = data; @@ -167,18 +172,20 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, /* If this scatter list entry is the last one, no next link */ if (next_lli == 0) - lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS; + lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS); else lli->reg_lnk = next_lli; /* Set/clear interrupt generation on this link item.*/ if (term_int) - lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS; + lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS); else - lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS); + lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS); - /* Post link */ - lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS; + /* + * Post link - D40_SREG_LNK_PHY_PRE_POS = 0 + * Relink happens after transfer completion. + */ return 0; } @@ -187,16 +194,16 @@ static int d40_seg_size(int size, int data_width1, int data_width2) { u32 max_w = max(data_width1, data_width2); u32 min_w = min(data_width1, data_width2); - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); + u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); if (seg_max > STEDMA40_MAX_SEG_SIZE) - seg_max -= (1 << max_w); + seg_max -= max_w; if (size <= seg_max) return size; if (size <= 2 * seg_max) - return ALIGN(size / 2, 1 << max_w); + return ALIGN(size / 2, max_w); return seg_max; } @@ -362,10 +369,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, lli->lcsp13 = reg_cfg; /* The number of elements to transfer */ - lli->lcsp02 = ((data_size >> data_width) << + lli->lcsp02 = ((data_size / data_width) << D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; - BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); + BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE); /* 16 LSBs address of the current element */ lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index fdde8ef7754..1b47312bc57 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -432,8 +432,7 @@ enum d40_lli_flags { void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, - u32 *dst_cfg, - bool is_log); + u32 *dst_cfg); void d40_log_cfg(struct stedma40_chan_cfg *cfg, u32 *lcsp1, diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index d173d56dbb8..6ec82f76f01 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -51,6 +51,8 @@ struct gpio_rcar_priv { #define FILONOFF 0x28 #define BOTHEDGE 0x4c +#define RCAR_MAX_GPIO_PER_BANK 32 + static inline u32 gpio_rcar_read(struct gpio_rcar_priv *p, int offs) { return ioread32(p->base + offs); @@ -274,9 +276,35 @@ static struct irq_domain_ops gpio_rcar_irq_domain_ops = { .map = gpio_rcar_irq_domain_map, }; +static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p) +{ + struct gpio_rcar_config *pdata = p->pdev->dev.platform_data; + struct device_node *np = p->pdev->dev.of_node; + struct of_phandle_args args; + int ret; + + if (pdata) { + p->config = *pdata; + } else if (IS_ENABLED(CONFIG_OF) && np) { + ret = of_parse_phandle_with_args(np, "gpio-ranges", + "#gpio-range-cells", 0, &args); + p->config.number_of_pins = ret == 0 && args.args_count == 3 + ? args.args[2] + : RCAR_MAX_GPIO_PER_BANK; + p->config.gpio_base = -1; + } + + if (p->config.number_of_pins == 0 || + p->config.number_of_pins > RCAR_MAX_GPIO_PER_BANK) { + dev_warn(&p->pdev->dev, + "Invalid number of gpio lines %u, using %u\n", + p->config.number_of_pins, RCAR_MAX_GPIO_PER_BANK); + p->config.number_of_pins = RCAR_MAX_GPIO_PER_BANK; + } +} + static int gpio_rcar_probe(struct platform_device *pdev) { - struct gpio_rcar_config *pdata = pdev->dev.platform_data; struct gpio_rcar_priv *p; struct resource *io, *irq; struct gpio_chip *gpio_chip; @@ -291,14 +319,14 @@ static int gpio_rcar_probe(struct platform_device *pdev) goto err0; } - /* deal with driver instance configuration */ - if (pdata) - p->config = *pdata; - p->pdev = pdev; - platform_set_drvdata(pdev, p); spin_lock_init(&p->lock); + /* Get device configuration from DT node or platform data. */ + gpio_rcar_parse_pdata(p); + + platform_set_drvdata(pdev, p); + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -325,6 +353,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) gpio_chip->set = gpio_rcar_set; gpio_chip->to_irq = gpio_rcar_to_irq; gpio_chip->label = name; + gpio_chip->dev = &pdev->dev; gpio_chip->owner = THIS_MODULE; gpio_chip->base = p->config.gpio_base; gpio_chip->ngpio = p->config.number_of_pins; @@ -371,10 +400,12 @@ static int gpio_rcar_probe(struct platform_device *pdev) p->config.irq_base, ret); } - ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0, - gpio_chip->base, gpio_chip->ngpio); - if (ret < 0) - dev_warn(&pdev->dev, "failed to add pin range\n"); + if (p->config.pctl_name) { + ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0, + gpio_chip->base, gpio_chip->ngpio); + if (ret < 0) + dev_warn(&pdev->dev, "failed to add pin range\n"); + } return 0; @@ -397,11 +428,23 @@ static int gpio_rcar_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id gpio_rcar_of_table[] = { + { + .compatible = "renesas,gpio-rcar", + }, + { }, +}; + +MODULE_DEVICE_TABLE(of, gpio_rcar_of_table); +#endif + static struct platform_driver gpio_rcar_device_driver = { .probe = gpio_rcar_probe, .remove = gpio_rcar_remove, .driver = { .name = "gpio_rcar", + .of_match_table = of_match_ptr(gpio_rcar_of_table), } }; diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index b22ca793374..a1392f47bbd 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -933,67 +933,6 @@ static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip) s3c_gpiolib_track(chip); } -#if defined(CONFIG_PLAT_S3C24XX) && defined(CONFIG_OF) -static int s3c24xx_gpio_xlate(struct gpio_chip *gc, - const struct of_phandle_args *gpiospec, u32 *flags) -{ - unsigned int pin; - - if (WARN_ON(gc->of_gpio_n_cells < 3)) - return -EINVAL; - - if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) - return -EINVAL; - - if (gpiospec->args[0] > gc->ngpio) - return -EINVAL; - - pin = gc->base + gpiospec->args[0]; - - if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1]))) - pr_warn("gpio_xlate: failed to set pin function\n"); - if (s3c_gpio_setpull(pin, gpiospec->args[2] & 0xffff)) - pr_warn("gpio_xlate: failed to set pin pull up/down\n"); - - if (flags) - *flags = gpiospec->args[2] >> 16; - - return gpiospec->args[0]; -} - -static const struct of_device_id s3c24xx_gpio_dt_match[] __initdata = { - { .compatible = "samsung,s3c24xx-gpio", }, - {} -}; - -static __init void s3c24xx_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip, - u64 base, u64 offset) -{ - struct gpio_chip *gc = &chip->chip; - u64 address; - - if (!of_have_populated_dt()) - return; - - address = chip->base ? base + ((u32)chip->base & 0xfff) : base + offset; - gc->of_node = of_find_matching_node_by_address(NULL, - s3c24xx_gpio_dt_match, address); - if (!gc->of_node) { - pr_info("gpio: device tree node not found for gpio controller" - " with base address %08llx\n", address); - return; - } - gc->of_gpio_n_cells = 3; - gc->of_xlate = s3c24xx_gpio_xlate; -} -#else -static __init void s3c24xx_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip, - u64 base, u64 offset) -{ - return; -} -#endif /* defined(CONFIG_PLAT_S3C24XX) && defined(CONFIG_OF) */ - static void __init s3c24xx_gpiolib_add_chips(struct samsung_gpio_chip *chip, int nr_chips, void __iomem *base) { @@ -1018,8 +957,6 @@ static void __init s3c24xx_gpiolib_add_chips(struct samsung_gpio_chip *chip, gc->direction_output = samsung_gpiolib_2bit_output; samsung_gpiolib_add(chip); - - s3c24xx_gpiolib_attach_ofnode(chip, S3C24XX_PA_GPIO, i * 0x10); } } @@ -3026,6 +2963,10 @@ static __init int samsung_gpiolib_init(void) */ struct device_node *pctrl_np; static const struct of_device_id exynos_pinctrl_ids[] = { + { .compatible = "samsung,s3c2412-pinctrl", }, + { .compatible = "samsung,s3c2416-pinctrl", }, + { .compatible = "samsung,s3c2440-pinctrl", }, + { .compatible = "samsung,s3c2450-pinctrl", }, { .compatible = "samsung,exynos4210-pinctrl", }, { .compatible = "samsung,exynos4x12-pinctrl", }, { .compatible = "samsung,exynos5250-pinctrl", }, diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 9545c9f0380..c8b5c13bcd0 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -16,4 +16,38 @@ config PL320_MBOX Management Engine, primarily for cpufreq. Say Y here if you want to use the PL320 IPCM support. +config OMAP_MBOX + tristate + help + This option is selected by any OMAP architecture specific mailbox + driver such as CONFIG_OMAP1_MBOX or CONFIG_OMAP2PLUS_MBOX. This + enables the common OMAP mailbox framework code. + +config OMAP1_MBOX + tristate "OMAP1 Mailbox framework support" + depends on ARCH_OMAP1 + select OMAP_MBOX + help + Mailbox implementation for OMAP chips with hardware for + interprocessor communication involving DSP in OMAP1. Say Y here + if you want to use OMAP1 Mailbox framework support. + +config OMAP2PLUS_MBOX + tristate "OMAP2+ Mailbox framework support" + depends on ARCH_OMAP2PLUS + select OMAP_MBOX + help + Mailbox implementation for OMAP family chips with hardware for + interprocessor communication involving DSP, IVA1.0 and IVA2 in + OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you + want to use OMAP2+ Mailbox framework support. + +config OMAP_MBOX_KFIFO_SIZE + int "Mailbox kfifo default buffer size (bytes)" + depends on OMAP2PLUS_MBOX || OMAP1_MBOX + default 256 + help + Specify the default size of mailbox's kfifo buffers (bytes). + This can also be changed at runtime (via the mbox_kfifo_size + module parameter). endif diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 543ad6a7950..e0facb34084 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -1 +1,7 @@ obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o + +obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o +obj-$(CONFIG_OMAP1_MBOX) += mailbox_omap1.o +mailbox_omap1-objs := mailbox-omap1.o +obj-$(CONFIG_OMAP2PLUS_MBOX) += mailbox_omap2.o +mailbox_omap2-objs := mailbox-omap2.o diff --git a/drivers/mailbox/mailbox-omap1.c b/drivers/mailbox/mailbox-omap1.c new file mode 100644 index 00000000000..9001b7633f1 --- /dev/null +++ b/drivers/mailbox/mailbox-omap1.c @@ -0,0 +1,203 @@ +/* + * Mailbox reservation modules for OMAP1 + * + * Copyright (C) 2006-2009 Nokia Corporation + * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/io.h> + +#include "omap-mbox.h" + +#define MAILBOX_ARM2DSP1 0x00 +#define MAILBOX_ARM2DSP1b 0x04 +#define MAILBOX_DSP2ARM1 0x08 +#define MAILBOX_DSP2ARM1b 0x0c +#define MAILBOX_DSP2ARM2 0x10 +#define MAILBOX_DSP2ARM2b 0x14 +#define MAILBOX_ARM2DSP1_Flag 0x18 +#define MAILBOX_DSP2ARM1_Flag 0x1c +#define MAILBOX_DSP2ARM2_Flag 0x20 + +static void __iomem *mbox_base; + +struct omap_mbox1_fifo { + unsigned long cmd; + unsigned long data; + unsigned long flag; +}; + +struct omap_mbox1_priv { + struct omap_mbox1_fifo tx_fifo; + struct omap_mbox1_fifo rx_fifo; +}; + +static inline int mbox_read_reg(size_t ofs) +{ + return __raw_readw(mbox_base + ofs); +} + +static inline void mbox_write_reg(u32 val, size_t ofs) +{ + __raw_writew(val, mbox_base + ofs); +} + +/* msg */ +static mbox_msg_t omap1_mbox_fifo_read(struct omap_mbox *mbox) +{ + struct omap_mbox1_fifo *fifo = + &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo; + mbox_msg_t msg; + + msg = mbox_read_reg(fifo->data); + msg |= ((mbox_msg_t) mbox_read_reg(fifo->cmd)) << 16; + + return msg; +} + +static void +omap1_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) +{ + struct omap_mbox1_fifo *fifo = + &((struct omap_mbox1_priv *)mbox->priv)->tx_fifo; + + mbox_write_reg(msg & 0xffff, fifo->data); + mbox_write_reg(msg >> 16, fifo->cmd); +} + +static int omap1_mbox_fifo_empty(struct omap_mbox *mbox) +{ + return 0; +} + +static int omap1_mbox_fifo_full(struct omap_mbox *mbox) +{ + struct omap_mbox1_fifo *fifo = + &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo; + + return mbox_read_reg(fifo->flag); +} + +/* irq */ +static void +omap1_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + if (irq == IRQ_RX) + enable_irq(mbox->irq); +} + +static void +omap1_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + if (irq == IRQ_RX) + disable_irq(mbox->irq); +} + +static int +omap1_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + if (irq == IRQ_TX) + return 0; + return 1; +} + +static struct omap_mbox_ops omap1_mbox_ops = { + .type = OMAP_MBOX_TYPE1, + .fifo_read = omap1_mbox_fifo_read, + .fifo_write = omap1_mbox_fifo_write, + .fifo_empty = omap1_mbox_fifo_empty, + .fifo_full = omap1_mbox_fifo_full, + .enable_irq = omap1_mbox_enable_irq, + .disable_irq = omap1_mbox_disable_irq, + .is_irq = omap1_mbox_is_irq, +}; + +/* FIXME: the following struct should be created automatically by the user id */ + +/* DSP */ +static struct omap_mbox1_priv omap1_mbox_dsp_priv = { + .tx_fifo = { + .cmd = MAILBOX_ARM2DSP1b, + .data = MAILBOX_ARM2DSP1, + .flag = MAILBOX_ARM2DSP1_Flag, + }, + .rx_fifo = { + .cmd = MAILBOX_DSP2ARM1b, + .data = MAILBOX_DSP2ARM1, + .flag = MAILBOX_DSP2ARM1_Flag, + }, +}; + +static struct omap_mbox mbox_dsp_info = { + .name = "dsp", + .ops = &omap1_mbox_ops, + .priv = &omap1_mbox_dsp_priv, +}; + +static struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL }; + +static int omap1_mbox_probe(struct platform_device *pdev) +{ + struct resource *mem; + int ret; + struct omap_mbox **list; + + list = omap1_mboxes; + list[0]->irq = platform_get_irq_byname(pdev, "dsp"); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -ENOENT; + + mbox_base = ioremap(mem->start, resource_size(mem)); + if (!mbox_base) + return -ENOMEM; + + ret = omap_mbox_register(&pdev->dev, list); + if (ret) { + iounmap(mbox_base); + return ret; + } + + return 0; +} + +static int omap1_mbox_remove(struct platform_device *pdev) +{ + omap_mbox_unregister(); + iounmap(mbox_base); + return 0; +} + +static struct platform_driver omap1_mbox_driver = { + .probe = omap1_mbox_probe, + .remove = omap1_mbox_remove, + .driver = { + .name = "omap-mailbox", + }, +}; + +static int __init omap1_mbox_init(void) +{ + return platform_driver_register(&omap1_mbox_driver); +} + +static void __exit omap1_mbox_exit(void) +{ + platform_driver_unregister(&omap1_mbox_driver); +} + +module_init(omap1_mbox_init); +module_exit(omap1_mbox_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions"); +MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); +MODULE_ALIAS("platform:omap1-mailbox"); diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c new file mode 100644 index 00000000000..eba380d7b17 --- /dev/null +++ b/drivers/mailbox/mailbox-omap2.c @@ -0,0 +1,358 @@ +/* + * Mailbox reservation modules for OMAP2/3 + * + * Copyright (C) 2006-2009 Nokia Corporation + * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * and Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <linux/platform_data/mailbox-omap.h> + +#include "omap-mbox.h" + +#define MAILBOX_REVISION 0x000 +#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m)) +#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m)) +#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m)) +#define MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u)) +#define MAILBOX_IRQENABLE(u) (0x104 + 8 * (u)) + +#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u)) +#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u)) +#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u)) + +#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m))) +#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1)) + +#define MBOX_REG_SIZE 0x120 + +#define OMAP4_MBOX_REG_SIZE 0x130 + +#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32)) +#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32)) + +static void __iomem *mbox_base; + +struct omap_mbox2_fifo { + unsigned long msg; + unsigned long fifo_stat; + unsigned long msg_stat; +}; + +struct omap_mbox2_priv { + struct omap_mbox2_fifo tx_fifo; + struct omap_mbox2_fifo rx_fifo; + unsigned long irqenable; + unsigned long irqstatus; + u32 newmsg_bit; + u32 notfull_bit; + u32 ctx[OMAP4_MBOX_NR_REGS]; + unsigned long irqdisable; + u32 intr_type; +}; + +static inline unsigned int mbox_read_reg(size_t ofs) +{ + return __raw_readl(mbox_base + ofs); +} + +static inline void mbox_write_reg(u32 val, size_t ofs) +{ + __raw_writel(val, mbox_base + ofs); +} + +/* Mailbox H/W preparations */ +static int omap2_mbox_startup(struct omap_mbox *mbox) +{ + u32 l; + + pm_runtime_enable(mbox->dev->parent); + pm_runtime_get_sync(mbox->dev->parent); + + l = mbox_read_reg(MAILBOX_REVISION); + pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f)); + + return 0; +} + +static void omap2_mbox_shutdown(struct omap_mbox *mbox) +{ + pm_runtime_put_sync(mbox->dev->parent); + pm_runtime_disable(mbox->dev->parent); +} + +/* Mailbox FIFO handle functions */ +static mbox_msg_t omap2_mbox_fifo_read(struct omap_mbox *mbox) +{ + struct omap_mbox2_fifo *fifo = + &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo; + return (mbox_msg_t) mbox_read_reg(fifo->msg); +} + +static void omap2_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) +{ + struct omap_mbox2_fifo *fifo = + &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo; + mbox_write_reg(msg, fifo->msg); +} + +static int omap2_mbox_fifo_empty(struct omap_mbox *mbox) +{ + struct omap_mbox2_fifo *fifo = + &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo; + return (mbox_read_reg(fifo->msg_stat) == 0); +} + +static int omap2_mbox_fifo_full(struct omap_mbox *mbox) +{ + struct omap_mbox2_fifo *fifo = + &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo; + return mbox_read_reg(fifo->fifo_stat); +} + +/* Mailbox IRQ handle functions */ +static void omap2_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + struct omap_mbox2_priv *p = mbox->priv; + u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; + + l = mbox_read_reg(p->irqenable); + l |= bit; + mbox_write_reg(l, p->irqenable); +} + +static void omap2_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + struct omap_mbox2_priv *p = mbox->priv; + u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; + + /* + * Read and update the interrupt configuration register for pre-OMAP4. + * OMAP4 and later SoCs have a dedicated interrupt disabling register. + */ + if (!p->intr_type) + bit = mbox_read_reg(p->irqdisable) & ~bit; + + mbox_write_reg(bit, p->irqdisable); +} + +static void omap2_mbox_ack_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + struct omap_mbox2_priv *p = mbox->priv; + u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; + + mbox_write_reg(bit, p->irqstatus); + + /* Flush posted write for irq status to avoid spurious interrupts */ + mbox_read_reg(p->irqstatus); +} + +static int omap2_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + struct omap_mbox2_priv *p = mbox->priv; + u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; + u32 enable = mbox_read_reg(p->irqenable); + u32 status = mbox_read_reg(p->irqstatus); + + return (int)(enable & status & bit); +} + +static void omap2_mbox_save_ctx(struct omap_mbox *mbox) +{ + int i; + struct omap_mbox2_priv *p = mbox->priv; + int nr_regs; + + if (p->intr_type) + nr_regs = OMAP4_MBOX_NR_REGS; + else + nr_regs = MBOX_NR_REGS; + for (i = 0; i < nr_regs; i++) { + p->ctx[i] = mbox_read_reg(i * sizeof(u32)); + + dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__, + i, p->ctx[i]); + } +} + +static void omap2_mbox_restore_ctx(struct omap_mbox *mbox) +{ + int i; + struct omap_mbox2_priv *p = mbox->priv; + int nr_regs; + + if (p->intr_type) + nr_regs = OMAP4_MBOX_NR_REGS; + else + nr_regs = MBOX_NR_REGS; + for (i = 0; i < nr_regs; i++) { + mbox_write_reg(p->ctx[i], i * sizeof(u32)); + + dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__, + i, p->ctx[i]); + } +} + +static struct omap_mbox_ops omap2_mbox_ops = { + .type = OMAP_MBOX_TYPE2, + .startup = omap2_mbox_startup, + .shutdown = omap2_mbox_shutdown, + .fifo_read = omap2_mbox_fifo_read, + .fifo_write = omap2_mbox_fifo_write, + .fifo_empty = omap2_mbox_fifo_empty, + .fifo_full = omap2_mbox_fifo_full, + .enable_irq = omap2_mbox_enable_irq, + .disable_irq = omap2_mbox_disable_irq, + .ack_irq = omap2_mbox_ack_irq, + .is_irq = omap2_mbox_is_irq, + .save_ctx = omap2_mbox_save_ctx, + .restore_ctx = omap2_mbox_restore_ctx, +}; + +static int omap2_mbox_probe(struct platform_device *pdev) +{ + struct resource *mem; + int ret; + struct omap_mbox **list, *mbox, *mboxblk; + struct omap_mbox2_priv *priv, *privblk; + struct omap_mbox_pdata *pdata = pdev->dev.platform_data; + struct omap_mbox_dev_info *info; + int i; + + if (!pdata || !pdata->info_cnt || !pdata->info) { + pr_err("%s: platform not supported\n", __func__); + return -ENODEV; + } + + /* allocate one extra for marking end of list */ + list = kzalloc((pdata->info_cnt + 1) * sizeof(*list), GFP_KERNEL); + if (!list) + return -ENOMEM; + + mboxblk = mbox = kzalloc(pdata->info_cnt * sizeof(*mbox), GFP_KERNEL); + if (!mboxblk) { + ret = -ENOMEM; + goto free_list; + } + + privblk = priv = kzalloc(pdata->info_cnt * sizeof(*priv), GFP_KERNEL); + if (!privblk) { + ret = -ENOMEM; + goto free_mboxblk; + } + + info = pdata->info; + for (i = 0; i < pdata->info_cnt; i++, info++, priv++) { + priv->tx_fifo.msg = MAILBOX_MESSAGE(info->tx_id); + priv->tx_fifo.fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id); + priv->rx_fifo.msg = MAILBOX_MESSAGE(info->rx_id); + priv->rx_fifo.msg_stat = MAILBOX_MSGSTATUS(info->rx_id); + priv->notfull_bit = MAILBOX_IRQ_NOTFULL(info->tx_id); + priv->newmsg_bit = MAILBOX_IRQ_NEWMSG(info->rx_id); + if (pdata->intr_type) { + priv->irqenable = OMAP4_MAILBOX_IRQENABLE(info->usr_id); + priv->irqstatus = OMAP4_MAILBOX_IRQSTATUS(info->usr_id); + priv->irqdisable = + OMAP4_MAILBOX_IRQENABLE_CLR(info->usr_id); + } else { + priv->irqenable = MAILBOX_IRQENABLE(info->usr_id); + priv->irqstatus = MAILBOX_IRQSTATUS(info->usr_id); + priv->irqdisable = MAILBOX_IRQENABLE(info->usr_id); + } + priv->intr_type = pdata->intr_type; + + mbox->priv = priv; + mbox->name = info->name; + mbox->ops = &omap2_mbox_ops; + mbox->irq = platform_get_irq(pdev, info->irq_id); + if (mbox->irq < 0) { + ret = mbox->irq; + goto free_privblk; + } + list[i] = mbox++; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + ret = -ENOENT; + goto free_privblk; + } + + mbox_base = ioremap(mem->start, resource_size(mem)); + if (!mbox_base) { + ret = -ENOMEM; + goto free_privblk; + } + + ret = omap_mbox_register(&pdev->dev, list); + if (ret) + goto unmap_mbox; + platform_set_drvdata(pdev, list); + + return 0; + +unmap_mbox: + iounmap(mbox_base); +free_privblk: + kfree(privblk); +free_mboxblk: + kfree(mboxblk); +free_list: + kfree(list); + return ret; +} + +static int omap2_mbox_remove(struct platform_device *pdev) +{ + struct omap_mbox2_priv *privblk; + struct omap_mbox **list = platform_get_drvdata(pdev); + struct omap_mbox *mboxblk = list[0]; + + privblk = mboxblk->priv; + omap_mbox_unregister(); + iounmap(mbox_base); + kfree(privblk); + kfree(mboxblk); + kfree(list); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver omap2_mbox_driver = { + .probe = omap2_mbox_probe, + .remove = omap2_mbox_remove, + .driver = { + .name = "omap-mailbox", + }, +}; + +static int __init omap2_mbox_init(void) +{ + return platform_driver_register(&omap2_mbox_driver); +} + +static void __exit omap2_mbox_exit(void) +{ + platform_driver_unregister(&omap2_mbox_driver); +} + +module_init(omap2_mbox_init); +module_exit(omap2_mbox_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("omap mailbox: omap2/3/4 architecture specific functions"); +MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); +MODULE_AUTHOR("Paul Mundt"); +MODULE_ALIAS("platform:omap2-mailbox"); diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c new file mode 100644 index 00000000000..d79a646b904 --- /dev/null +++ b/drivers/mailbox/omap-mailbox.c @@ -0,0 +1,469 @@ +/* + * OMAP mailbox driver + * + * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved. + * + * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/kfifo.h> +#include <linux/err.h> +#include <linux/notifier.h> +#include <linux/module.h> + +#include "omap-mbox.h" + +static struct omap_mbox **mboxes; + +static int mbox_configured; +static DEFINE_MUTEX(mbox_configured_lock); + +static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE; +module_param(mbox_kfifo_size, uint, S_IRUGO); +MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)"); + +/* Mailbox FIFO handle functions */ +static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox) +{ + return mbox->ops->fifo_read(mbox); +} +static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) +{ + mbox->ops->fifo_write(mbox, msg); +} +static inline int mbox_fifo_empty(struct omap_mbox *mbox) +{ + return mbox->ops->fifo_empty(mbox); +} +static inline int mbox_fifo_full(struct omap_mbox *mbox) +{ + return mbox->ops->fifo_full(mbox); +} + +/* Mailbox IRQ handle functions */ +static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + if (mbox->ops->ack_irq) + mbox->ops->ack_irq(mbox, irq); +} +static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + return mbox->ops->is_irq(mbox, irq); +} + +/* + * message sender + */ +static int __mbox_poll_for_space(struct omap_mbox *mbox) +{ + int ret = 0, i = 1000; + + while (mbox_fifo_full(mbox)) { + if (mbox->ops->type == OMAP_MBOX_TYPE2) + return -1; + if (--i == 0) + return -1; + udelay(1); + } + return ret; +} + +int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg) +{ + struct omap_mbox_queue *mq = mbox->txq; + int ret = 0, len; + + spin_lock_bh(&mq->lock); + + if (kfifo_avail(&mq->fifo) < sizeof(msg)) { + ret = -ENOMEM; + goto out; + } + + if (kfifo_is_empty(&mq->fifo) && !__mbox_poll_for_space(mbox)) { + mbox_fifo_write(mbox, msg); + goto out; + } + + len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); + WARN_ON(len != sizeof(msg)); + + tasklet_schedule(&mbox->txq->tasklet); + +out: + spin_unlock_bh(&mq->lock); + return ret; +} +EXPORT_SYMBOL(omap_mbox_msg_send); + +void omap_mbox_save_ctx(struct omap_mbox *mbox) +{ + if (!mbox->ops->save_ctx) { + dev_err(mbox->dev, "%s:\tno save\n", __func__); + return; + } + + mbox->ops->save_ctx(mbox); +} +EXPORT_SYMBOL(omap_mbox_save_ctx); + +void omap_mbox_restore_ctx(struct omap_mbox *mbox) +{ + if (!mbox->ops->restore_ctx) { + dev_err(mbox->dev, "%s:\tno restore\n", __func__); + return; + } + + mbox->ops->restore_ctx(mbox); +} +EXPORT_SYMBOL(omap_mbox_restore_ctx); + +void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + mbox->ops->enable_irq(mbox, irq); +} +EXPORT_SYMBOL(omap_mbox_enable_irq); + +void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +{ + mbox->ops->disable_irq(mbox, irq); +} +EXPORT_SYMBOL(omap_mbox_disable_irq); + +static void mbox_tx_tasklet(unsigned long tx_data) +{ + struct omap_mbox *mbox = (struct omap_mbox *)tx_data; + struct omap_mbox_queue *mq = mbox->txq; + mbox_msg_t msg; + int ret; + + while (kfifo_len(&mq->fifo)) { + if (__mbox_poll_for_space(mbox)) { + omap_mbox_enable_irq(mbox, IRQ_TX); + break; + } + + ret = kfifo_out(&mq->fifo, (unsigned char *)&msg, + sizeof(msg)); + WARN_ON(ret != sizeof(msg)); + + mbox_fifo_write(mbox, msg); + } +} + +/* + * Message receiver(workqueue) + */ +static void mbox_rx_work(struct work_struct *work) +{ + struct omap_mbox_queue *mq = + container_of(work, struct omap_mbox_queue, work); + mbox_msg_t msg; + int len; + + while (kfifo_len(&mq->fifo) >= sizeof(msg)) { + len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); + WARN_ON(len != sizeof(msg)); + + blocking_notifier_call_chain(&mq->mbox->notifier, len, + (void *)msg); + spin_lock_irq(&mq->lock); + if (mq->full) { + mq->full = false; + omap_mbox_enable_irq(mq->mbox, IRQ_RX); + } + spin_unlock_irq(&mq->lock); + } +} + +/* + * Mailbox interrupt handler + */ +static void __mbox_tx_interrupt(struct omap_mbox *mbox) +{ + omap_mbox_disable_irq(mbox, IRQ_TX); + ack_mbox_irq(mbox, IRQ_TX); + tasklet_schedule(&mbox->txq->tasklet); +} + +static void __mbox_rx_interrupt(struct omap_mbox *mbox) +{ + struct omap_mbox_queue *mq = mbox->rxq; + mbox_msg_t msg; + int len; + + while (!mbox_fifo_empty(mbox)) { + if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) { + omap_mbox_disable_irq(mbox, IRQ_RX); + mq->full = true; + goto nomem; + } + + msg = mbox_fifo_read(mbox); + + len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); + WARN_ON(len != sizeof(msg)); + + if (mbox->ops->type == OMAP_MBOX_TYPE1) + break; + } + + /* no more messages in the fifo. clear IRQ source. */ + ack_mbox_irq(mbox, IRQ_RX); +nomem: + schedule_work(&mbox->rxq->work); +} + +static irqreturn_t mbox_interrupt(int irq, void *p) +{ + struct omap_mbox *mbox = p; + + if (is_mbox_irq(mbox, IRQ_TX)) + __mbox_tx_interrupt(mbox); + + if (is_mbox_irq(mbox, IRQ_RX)) + __mbox_rx_interrupt(mbox); + + return IRQ_HANDLED; +} + +static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, + void (*work) (struct work_struct *), + void (*tasklet)(unsigned long)) +{ + struct omap_mbox_queue *mq; + + mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL); + if (!mq) + return NULL; + + spin_lock_init(&mq->lock); + + if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL)) + goto error; + + if (work) + INIT_WORK(&mq->work, work); + + if (tasklet) + tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox); + return mq; +error: + kfree(mq); + return NULL; +} + +static void mbox_queue_free(struct omap_mbox_queue *q) +{ + kfifo_free(&q->fifo); + kfree(q); +} + +static int omap_mbox_startup(struct omap_mbox *mbox) +{ + int ret = 0; + struct omap_mbox_queue *mq; + + mutex_lock(&mbox_configured_lock); + if (!mbox_configured++) { + if (likely(mbox->ops->startup)) { + ret = mbox->ops->startup(mbox); + if (unlikely(ret)) + goto fail_startup; + } else + goto fail_startup; + } + + if (!mbox->use_count++) { + mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet); + if (!mq) { + ret = -ENOMEM; + goto fail_alloc_txq; + } + mbox->txq = mq; + + mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL); + if (!mq) { + ret = -ENOMEM; + goto fail_alloc_rxq; + } + mbox->rxq = mq; + mq->mbox = mbox; + ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED, + mbox->name, mbox); + if (unlikely(ret)) { + pr_err("failed to register mailbox interrupt:%d\n", + ret); + goto fail_request_irq; + } + + omap_mbox_enable_irq(mbox, IRQ_RX); + } + mutex_unlock(&mbox_configured_lock); + return 0; + +fail_request_irq: + mbox_queue_free(mbox->rxq); +fail_alloc_rxq: + mbox_queue_free(mbox->txq); +fail_alloc_txq: + if (mbox->ops->shutdown) + mbox->ops->shutdown(mbox); + mbox->use_count--; +fail_startup: + mbox_configured--; + mutex_unlock(&mbox_configured_lock); + return ret; +} + +static void omap_mbox_fini(struct omap_mbox *mbox) +{ + mutex_lock(&mbox_configured_lock); + + if (!--mbox->use_count) { + omap_mbox_disable_irq(mbox, IRQ_RX); + free_irq(mbox->irq, mbox); + tasklet_kill(&mbox->txq->tasklet); + flush_work(&mbox->rxq->work); + mbox_queue_free(mbox->txq); + mbox_queue_free(mbox->rxq); + } + + if (likely(mbox->ops->shutdown)) { + if (!--mbox_configured) + mbox->ops->shutdown(mbox); + } + + mutex_unlock(&mbox_configured_lock); +} + +struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) +{ + struct omap_mbox *_mbox, *mbox = NULL; + int i, ret; + + if (!mboxes) + return ERR_PTR(-EINVAL); + + for (i = 0; (_mbox = mboxes[i]); i++) { + if (!strcmp(_mbox->name, name)) { + mbox = _mbox; + break; + } + } + + if (!mbox) + return ERR_PTR(-ENOENT); + + if (nb) + blocking_notifier_chain_register(&mbox->notifier, nb); + + ret = omap_mbox_startup(mbox); + if (ret) { + blocking_notifier_chain_unregister(&mbox->notifier, nb); + return ERR_PTR(-ENODEV); + } + + return mbox; +} +EXPORT_SYMBOL(omap_mbox_get); + +void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&mbox->notifier, nb); + omap_mbox_fini(mbox); +} +EXPORT_SYMBOL(omap_mbox_put); + +static struct class omap_mbox_class = { .name = "mbox", }; + +int omap_mbox_register(struct device *parent, struct omap_mbox **list) +{ + int ret; + int i; + + mboxes = list; + if (!mboxes) + return -EINVAL; + + for (i = 0; mboxes[i]; i++) { + struct omap_mbox *mbox = mboxes[i]; + mbox->dev = device_create(&omap_mbox_class, + parent, 0, mbox, "%s", mbox->name); + if (IS_ERR(mbox->dev)) { + ret = PTR_ERR(mbox->dev); + goto err_out; + } + + BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier); + } + return 0; + +err_out: + while (i--) + device_unregister(mboxes[i]->dev); + return ret; +} +EXPORT_SYMBOL(omap_mbox_register); + +int omap_mbox_unregister(void) +{ + int i; + + if (!mboxes) + return -EINVAL; + + for (i = 0; mboxes[i]; i++) + device_unregister(mboxes[i]->dev); + mboxes = NULL; + return 0; +} +EXPORT_SYMBOL(omap_mbox_unregister); + +static int __init omap_mbox_init(void) +{ + int err; + + err = class_register(&omap_mbox_class); + if (err) + return err; + + /* kfifo size sanity check: alignment and minimal size */ + mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); + mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, + sizeof(mbox_msg_t)); + + return 0; +} +subsys_initcall(omap_mbox_init); + +static void __exit omap_mbox_exit(void) +{ + class_unregister(&omap_mbox_class); +} +module_exit(omap_mbox_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging"); +MODULE_AUTHOR("Toshihiro Kobayashi"); +MODULE_AUTHOR("Hiroshi DOYU"); diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h new file mode 100644 index 00000000000..6cd38fc6859 --- /dev/null +++ b/drivers/mailbox/omap-mbox.h @@ -0,0 +1,67 @@ +/* + * omap-mbox.h: OMAP mailbox internal definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OMAP_MBOX_H +#define OMAP_MBOX_H + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/kfifo.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/omap-mailbox.h> + +typedef int __bitwise omap_mbox_type_t; +#define OMAP_MBOX_TYPE1 ((__force omap_mbox_type_t) 1) +#define OMAP_MBOX_TYPE2 ((__force omap_mbox_type_t) 2) + +struct omap_mbox_ops { + omap_mbox_type_t type; + int (*startup)(struct omap_mbox *mbox); + void (*shutdown)(struct omap_mbox *mbox); + /* fifo */ + mbox_msg_t (*fifo_read)(struct omap_mbox *mbox); + void (*fifo_write)(struct omap_mbox *mbox, mbox_msg_t msg); + int (*fifo_empty)(struct omap_mbox *mbox); + int (*fifo_full)(struct omap_mbox *mbox); + /* irq */ + void (*enable_irq)(struct omap_mbox *mbox, + omap_mbox_irq_t irq); + void (*disable_irq)(struct omap_mbox *mbox, + omap_mbox_irq_t irq); + void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); + int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); + /* ctx */ + void (*save_ctx)(struct omap_mbox *mbox); + void (*restore_ctx)(struct omap_mbox *mbox); +}; + +struct omap_mbox_queue { + spinlock_t lock; + struct kfifo fifo; + struct work_struct work; + struct tasklet_struct tasklet; + struct omap_mbox *mbox; + bool full; +}; + +struct omap_mbox { + const char *name; + unsigned int irq; + struct omap_mbox_queue *txq, *rxq; + struct omap_mbox_ops *ops; + struct device *dev; + void *priv; + int use_count; + struct blocking_notifier_head notifier; +}; + +int omap_mbox_register(struct device *parent, struct omap_mbox **); +int omap_mbox_unregister(void); + +#endif /* OMAP_MBOX_H */ diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index a1c6dd32e14..901a388dbea 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -217,6 +217,11 @@ config PINCTRL_EXYNOS5440 select PINMUX select PINCONF +config PINCTRL_S3C24XX + bool "Samsung S3C24XX SoC pinctrl driver" + depends on ARCH_S3C24XX + select PINCTRL_SAMSUNG + config PINCTRL_S3C64XX bool "Samsung S3C64XX SoC pinctrl driver" depends on ARCH_S3C64XX diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 9bdaeb8785c..f90b645fb60 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o +obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o diff --git a/drivers/pinctrl/pinctrl-s3c24xx.c b/drivers/pinctrl/pinctrl-s3c24xx.c new file mode 100644 index 00000000000..24446daaad7 --- /dev/null +++ b/drivers/pinctrl/pinctrl-s3c24xx.c @@ -0,0 +1,651 @@ +/* + * S3C24XX specific support for Samsung pinctrl/gpiolib driver. + * + * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This file contains the SamsungS3C24XX specific information required by the + * Samsung pinctrl/gpiolib driver. It also includes the implementation of + * external gpio and wakeup interrupt support. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/of_irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/err.h> + +#include "pinctrl-samsung.h" + +#define NUM_EINT 24 +#define NUM_EINT_IRQ 6 +#define EINT_MAX_PER_GROUP 8 + +#define EINTPEND_REG 0xa8 +#define EINTMASK_REG 0xa4 + +#define EINT_GROUP(i) ((int)((i) / EINT_MAX_PER_GROUP)) +#define EINT_REG(i) ((EINT_GROUP(i) * 4) + 0x88) +#define EINT_OFFS(i) ((i) % EINT_MAX_PER_GROUP * 4) + +#define EINT_LEVEL_LOW 0 +#define EINT_LEVEL_HIGH 1 +#define EINT_EDGE_FALLING 2 +#define EINT_EDGE_RISING 4 +#define EINT_EDGE_BOTH 6 +#define EINT_MASK 0xf + +static struct samsung_pin_bank_type bank_type_1bit = { + .fld_width = { 1, 1, }, + .reg_offset = { 0x00, 0x04, }, +}; + +static struct samsung_pin_bank_type bank_type_2bit = { + .fld_width = { 2, 1, 2, }, + .reg_offset = { 0x00, 0x04, 0x08, }, +}; + +#define PIN_BANK_A(pins, reg, id) \ + { \ + .type = &bank_type_1bit, \ + .pctl_offset = reg, \ + .nr_pins = pins, \ + .eint_type = EINT_TYPE_NONE, \ + .name = id \ + } + +#define PIN_BANK_2BIT(pins, reg, id) \ + { \ + .type = &bank_type_2bit, \ + .pctl_offset = reg, \ + .nr_pins = pins, \ + .eint_type = EINT_TYPE_NONE, \ + .name = id \ + } + +#define PIN_BANK_2BIT_EINTW(pins, reg, id, eoffs, emask)\ + { \ + .type = &bank_type_2bit, \ + .pctl_offset = reg, \ + .nr_pins = pins, \ + .eint_type = EINT_TYPE_WKUP, \ + .eint_func = 2, \ + .eint_mask = emask, \ + .eint_offset = eoffs, \ + .name = id \ + } + +/** + * struct s3c24xx_eint_data: EINT common data + * @drvdata: pin controller driver data + * @domains: IRQ domains of particular EINT interrupts + * @parents: mapped parent irqs in the main interrupt controller + */ +struct s3c24xx_eint_data { + struct samsung_pinctrl_drv_data *drvdata; + struct irq_domain *domains[NUM_EINT]; + int parents[NUM_EINT_IRQ]; +}; + +/** + * struct s3c24xx_eint_domain_data: per irq-domain data + * @bank: pin bank related to the domain + * @eint_data: common data + * eint0_3_parent_only: live eints 0-3 only in the main intc + */ +struct s3c24xx_eint_domain_data { + struct samsung_pin_bank *bank; + struct s3c24xx_eint_data *eint_data; + bool eint0_3_parent_only; +}; + +static int s3c24xx_eint_get_trigger(unsigned int type) +{ + switch (type) { + case IRQ_TYPE_EDGE_RISING: + return EINT_EDGE_RISING; + break; + case IRQ_TYPE_EDGE_FALLING: + return EINT_EDGE_FALLING; + break; + case IRQ_TYPE_EDGE_BOTH: + return EINT_EDGE_BOTH; + break; + case IRQ_TYPE_LEVEL_HIGH: + return EINT_LEVEL_HIGH; + break; + case IRQ_TYPE_LEVEL_LOW: + return EINT_LEVEL_LOW; + break; + default: + return -EINVAL; + } +} + +static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type) +{ + /* Edge- and level-triggered interrupts need different handlers */ + if (type & IRQ_TYPE_EDGE_BOTH) + __irq_set_handler_locked(irq, handle_edge_irq); + else + __irq_set_handler_locked(irq, handle_level_irq); +} + +static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d, + struct samsung_pin_bank *bank, int pin) +{ + struct samsung_pin_bank_type *bank_type = bank->type; + unsigned long flags; + void __iomem *reg; + u8 shift; + u32 mask; + u32 val; + + /* Make sure that pin is configured as interrupt */ + reg = d->virt_base + bank->pctl_offset; + shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC]; + mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; + + spin_lock_irqsave(&bank->slock, flags); + + val = readl(reg); + val &= ~(mask << shift); + val |= bank->eint_func << shift; + writel(val, reg); + + spin_unlock_irqrestore(&bank->slock, flags); +} + +static int s3c24xx_eint_type(struct irq_data *data, unsigned int type) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct samsung_pinctrl_drv_data *d = bank->drvdata; + int index = bank->eint_offset + data->hwirq; + void __iomem *reg; + int trigger; + u8 shift; + u32 val; + + trigger = s3c24xx_eint_get_trigger(type); + if (trigger < 0) { + dev_err(d->dev, "unsupported external interrupt type\n"); + return -EINVAL; + } + + s3c24xx_eint_set_handler(data->irq, type); + + /* Set up interrupt trigger */ + reg = d->virt_base + EINT_REG(index); + shift = EINT_OFFS(index); + + val = readl(reg); + val &= ~(EINT_MASK << shift); + val |= trigger << shift; + writel(val, reg); + + s3c24xx_eint_set_function(d, bank, data->hwirq); + + return 0; +} + +/* Handling of EINTs 0-3 on all except S3C2412 and S3C2413 */ + +static void s3c2410_eint0_3_ack(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data; + struct s3c24xx_eint_data *eint_data = ddata->eint_data; + int parent_irq = eint_data->parents[data->hwirq]; + struct irq_chip *parent_chip = irq_get_chip(parent_irq); + + parent_chip->irq_ack(irq_get_irq_data(parent_irq)); +} + +static void s3c2410_eint0_3_mask(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data; + struct s3c24xx_eint_data *eint_data = ddata->eint_data; + int parent_irq = eint_data->parents[data->hwirq]; + struct irq_chip *parent_chip = irq_get_chip(parent_irq); + + parent_chip->irq_mask(irq_get_irq_data(parent_irq)); +} + +static void s3c2410_eint0_3_unmask(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data; + struct s3c24xx_eint_data *eint_data = ddata->eint_data; + int parent_irq = eint_data->parents[data->hwirq]; + struct irq_chip *parent_chip = irq_get_chip(parent_irq); + + parent_chip->irq_unmask(irq_get_irq_data(parent_irq)); +} + +static struct irq_chip s3c2410_eint0_3_chip = { + .name = "s3c2410-eint0_3", + .irq_ack = s3c2410_eint0_3_ack, + .irq_mask = s3c2410_eint0_3_mask, + .irq_unmask = s3c2410_eint0_3_unmask, + .irq_set_type = s3c24xx_eint_type, +}; + +static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +{ + struct irq_data *data = irq_desc_get_irq_data(desc); + struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq); + unsigned int virq; + + /* the first 4 eints have a simple 1 to 1 mapping */ + virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq); + /* Something must be really wrong if an unmapped EINT is unmasked */ + BUG_ON(!virq); + + generic_handle_irq(virq); +} + +/* Handling of EINTs 0-3 on S3C2412 and S3C2413 */ + +static void s3c2412_eint0_3_ack(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct samsung_pinctrl_drv_data *d = bank->drvdata; + + unsigned long bitval = 1UL << data->hwirq; + writel(bitval, d->virt_base + EINTPEND_REG); +} + +static void s3c2412_eint0_3_mask(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct samsung_pinctrl_drv_data *d = bank->drvdata; + unsigned long mask; + + mask = readl(d->virt_base + EINTMASK_REG); + mask |= (1UL << data->hwirq); + writel(mask, d->virt_base + EINTMASK_REG); +} + +static void s3c2412_eint0_3_unmask(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct samsung_pinctrl_drv_data *d = bank->drvdata; + unsigned long mask; + + mask = readl(d->virt_base + EINTMASK_REG); + mask &= ~(1UL << data->hwirq); + writel(mask, d->virt_base + EINTMASK_REG); +} + +static struct irq_chip s3c2412_eint0_3_chip = { + .name = "s3c2412-eint0_3", + .irq_ack = s3c2412_eint0_3_ack, + .irq_mask = s3c2412_eint0_3_mask, + .irq_unmask = s3c2412_eint0_3_unmask, + .irq_set_type = s3c24xx_eint_type, +}; + +static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_get_chip(irq); + struct irq_data *data = irq_desc_get_irq_data(desc); + struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq); + unsigned int virq; + + chained_irq_enter(chip, desc); + + /* the first 4 eints have a simple 1 to 1 mapping */ + virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq); + /* Something must be really wrong if an unmapped EINT is unmasked */ + BUG_ON(!virq); + + generic_handle_irq(virq); + + chained_irq_exit(chip, desc); +} + +/* Handling of all other eints */ + +static void s3c24xx_eint_ack(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct samsung_pinctrl_drv_data *d = bank->drvdata; + unsigned char index = bank->eint_offset + data->hwirq; + + writel(1UL << index, d->virt_base + EINTPEND_REG); +} + +static void s3c24xx_eint_mask(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct samsung_pinctrl_drv_data *d = bank->drvdata; + unsigned char index = bank->eint_offset + data->hwirq; + unsigned long mask; + + mask = readl(d->virt_base + EINTMASK_REG); + mask |= (1UL << index); + writel(mask, d->virt_base + EINTMASK_REG); +} + +static void s3c24xx_eint_unmask(struct irq_data *data) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); + struct samsung_pinctrl_drv_data *d = bank->drvdata; + unsigned char index = bank->eint_offset + data->hwirq; + unsigned long mask; + + mask = readl(d->virt_base + EINTMASK_REG); + mask &= ~(1UL << index); + writel(mask, d->virt_base + EINTMASK_REG); +} + +static struct irq_chip s3c24xx_eint_chip = { + .name = "s3c-eint", + .irq_ack = s3c24xx_eint_ack, + .irq_mask = s3c24xx_eint_mask, + .irq_unmask = s3c24xx_eint_unmask, + .irq_set_type = s3c24xx_eint_type, +}; + +static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc, + u32 offset, u32 range) +{ + struct irq_chip *chip = irq_get_chip(irq); + struct s3c24xx_eint_data *data = irq_get_handler_data(irq); + struct samsung_pinctrl_drv_data *d = data->drvdata; + unsigned int pend, mask; + + chained_irq_enter(chip, desc); + + pend = readl(d->virt_base + EINTPEND_REG); + mask = readl(d->virt_base + EINTMASK_REG); + + pend &= ~mask; + pend &= range; + + while (pend) { + unsigned int virq; + + irq = __ffs(pend); + pend &= ~(1 << irq); + virq = irq_linear_revmap(data->domains[irq], irq - offset); + /* Something is really wrong if an unmapped EINT is unmasked */ + BUG_ON(!virq); + + generic_handle_irq(virq); + } + + chained_irq_exit(chip, desc); +} + +static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc) +{ + s3c24xx_demux_eint(irq, desc, 0, 0xf0); +} + +static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc) +{ + s3c24xx_demux_eint(irq, desc, 8, 0xffff00); +} + +static irq_flow_handler_t s3c2410_eint_handlers[NUM_EINT_IRQ] = { + s3c2410_demux_eint0_3, + s3c2410_demux_eint0_3, + s3c2410_demux_eint0_3, + s3c2410_demux_eint0_3, + s3c24xx_demux_eint4_7, + s3c24xx_demux_eint8_23, +}; + +static irq_flow_handler_t s3c2412_eint_handlers[NUM_EINT_IRQ] = { + s3c2412_demux_eint0_3, + s3c2412_demux_eint0_3, + s3c2412_demux_eint0_3, + s3c2412_demux_eint0_3, + s3c24xx_demux_eint4_7, + s3c24xx_demux_eint8_23, +}; + +static int s3c24xx_gpf_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct s3c24xx_eint_domain_data *ddata = h->host_data; + struct samsung_pin_bank *bank = ddata->bank; + + if (!(bank->eint_mask & (1 << (bank->eint_offset + hw)))) + return -EINVAL; + + if (hw <= 3) { + if (ddata->eint0_3_parent_only) + irq_set_chip_and_handler(virq, &s3c2410_eint0_3_chip, + handle_edge_irq); + else + irq_set_chip_and_handler(virq, &s3c2412_eint0_3_chip, + handle_edge_irq); + } else { + irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, + handle_edge_irq); + } + irq_set_chip_data(virq, bank); + set_irq_flags(virq, IRQF_VALID); + return 0; +} + +static const struct irq_domain_ops s3c24xx_gpf_irq_ops = { + .map = s3c24xx_gpf_irq_map, + .xlate = irq_domain_xlate_twocell, +}; + +static int s3c24xx_gpg_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct s3c24xx_eint_domain_data *ddata = h->host_data; + struct samsung_pin_bank *bank = ddata->bank; + + if (!(bank->eint_mask & (1 << (bank->eint_offset + hw)))) + return -EINVAL; + + irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, handle_edge_irq); + irq_set_chip_data(virq, bank); + set_irq_flags(virq, IRQF_VALID); + return 0; +} + +static const struct irq_domain_ops s3c24xx_gpg_irq_ops = { + .map = s3c24xx_gpg_irq_map, + .xlate = irq_domain_xlate_twocell, +}; + +static const struct of_device_id s3c24xx_eint_irq_ids[] = { + { .compatible = "samsung,s3c2410-wakeup-eint", .data = (void *)1 }, + { .compatible = "samsung,s3c2412-wakeup-eint", .data = (void *)0 }, + { } +}; + +static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d) +{ + struct device *dev = d->dev; + const struct of_device_id *match; + struct device_node *eint_np = NULL; + struct device_node *np; + struct samsung_pin_bank *bank; + struct s3c24xx_eint_data *eint_data; + const struct irq_domain_ops *ops; + unsigned int i; + bool eint0_3_parent_only; + irq_flow_handler_t *handlers; + + for_each_child_of_node(dev->of_node, np) { + match = of_match_node(s3c24xx_eint_irq_ids, np); + if (match) { + eint_np = np; + eint0_3_parent_only = (bool)match->data; + break; + } + } + if (!eint_np) + return -ENODEV; + + eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL); + if (!eint_data) + return -ENOMEM; + + eint_data->drvdata = d; + + handlers = eint0_3_parent_only ? s3c2410_eint_handlers + : s3c2412_eint_handlers; + for (i = 0; i < NUM_EINT_IRQ; ++i) { + unsigned int irq; + + irq = irq_of_parse_and_map(eint_np, i); + if (!irq) { + dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i); + return -ENXIO; + } + + eint_data->parents[i] = irq; + irq_set_chained_handler(irq, handlers[i]); + irq_set_handler_data(irq, eint_data); + } + + bank = d->ctrl->pin_banks; + for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { + struct s3c24xx_eint_domain_data *ddata; + unsigned int mask; + unsigned int irq; + unsigned int pin; + + if (bank->eint_type != EINT_TYPE_WKUP) + continue; + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + ddata->bank = bank; + ddata->eint_data = eint_data; + ddata->eint0_3_parent_only = eint0_3_parent_only; + + ops = (bank->eint_offset == 0) ? &s3c24xx_gpf_irq_ops + : &s3c24xx_gpg_irq_ops; + + bank->irq_domain = irq_domain_add_linear(bank->of_node, + bank->nr_pins, ops, ddata); + if (!bank->irq_domain) { + dev_err(dev, "wkup irq domain add failed\n"); + return -ENXIO; + } + + irq = bank->eint_offset; + mask = bank->eint_mask; + for (pin = 0; mask; ++pin, mask >>= 1) { + if (irq > NUM_EINT) + break; + if (!(mask & 1)) + continue; + eint_data->domains[irq] = bank->irq_domain; + ++irq; + } + } + + return 0; +} + +static struct samsung_pin_bank s3c2412_pin_banks[] = { + PIN_BANK_A(23, 0x000, "gpa"), + PIN_BANK_2BIT(11, 0x010, "gpb"), + PIN_BANK_2BIT(16, 0x020, "gpc"), + PIN_BANK_2BIT(16, 0x030, "gpd"), + PIN_BANK_2BIT(16, 0x040, "gpe"), + PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff), + PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00), + PIN_BANK_2BIT(11, 0x070, "gph"), + PIN_BANK_2BIT(13, 0x080, "gpj"), +}; + +struct samsung_pin_ctrl s3c2412_pin_ctrl[] = { + { + .pin_banks = s3c2412_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), + .eint_wkup_init = s3c24xx_eint_init, + .label = "S3C2412-GPIO", + }, +}; + +static struct samsung_pin_bank s3c2416_pin_banks[] = { + PIN_BANK_A(27, 0x000, "gpa"), + PIN_BANK_2BIT(11, 0x010, "gpb"), + PIN_BANK_2BIT(16, 0x020, "gpc"), + PIN_BANK_2BIT(16, 0x030, "gpd"), + PIN_BANK_2BIT(16, 0x040, "gpe"), + PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff), + PIN_BANK_2BIT_EINTW(8, 0x060, "gpg", 8, 0xff00), + PIN_BANK_2BIT(15, 0x070, "gph"), + PIN_BANK_2BIT(16, 0x0e0, "gpk"), + PIN_BANK_2BIT(14, 0x0f0, "gpl"), + PIN_BANK_2BIT(2, 0x100, "gpm"), +}; + +struct samsung_pin_ctrl s3c2416_pin_ctrl[] = { + { + .pin_banks = s3c2416_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), + .eint_wkup_init = s3c24xx_eint_init, + .label = "S3C2416-GPIO", + }, +}; + +static struct samsung_pin_bank s3c2440_pin_banks[] = { + PIN_BANK_A(25, 0x000, "gpa"), + PIN_BANK_2BIT(11, 0x010, "gpb"), + PIN_BANK_2BIT(16, 0x020, "gpc"), + PIN_BANK_2BIT(16, 0x030, "gpd"), + PIN_BANK_2BIT(16, 0x040, "gpe"), + PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff), + PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00), + PIN_BANK_2BIT(11, 0x070, "gph"), + PIN_BANK_2BIT(13, 0x0d0, "gpj"), +}; + +struct samsung_pin_ctrl s3c2440_pin_ctrl[] = { + { + .pin_banks = s3c2440_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), + .eint_wkup_init = s3c24xx_eint_init, + .label = "S3C2440-GPIO", + }, +}; + +static struct samsung_pin_bank s3c2450_pin_banks[] = { + PIN_BANK_A(28, 0x000, "gpa"), + PIN_BANK_2BIT(11, 0x010, "gpb"), + PIN_BANK_2BIT(16, 0x020, "gpc"), + PIN_BANK_2BIT(16, 0x030, "gpd"), + PIN_BANK_2BIT(16, 0x040, "gpe"), + PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff), + PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00), + PIN_BANK_2BIT(15, 0x070, "gph"), + PIN_BANK_2BIT(16, 0x0d0, "gpj"), + PIN_BANK_2BIT(16, 0x0e0, "gpk"), + PIN_BANK_2BIT(15, 0x0f0, "gpl"), + PIN_BANK_2BIT(2, 0x100, "gpm"), +}; + +struct samsung_pin_ctrl s3c2450_pin_ctrl[] = { + { + .pin_banks = s3c2450_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), + .eint_wkup_init = s3c24xx_eint_init, + .label = "S3C2450-GPIO", + }, +}; diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c index 63ac22e8967..e67ff1b8042 100644 --- a/drivers/pinctrl/pinctrl-samsung.c +++ b/drivers/pinctrl/pinctrl-samsung.c @@ -1118,6 +1118,16 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = { { .compatible = "samsung,s3c64xx-pinctrl", .data = s3c64xx_pin_ctrl }, #endif +#ifdef CONFIG_PINCTRL_S3C24XX + { .compatible = "samsung,s3c2412-pinctrl", + .data = s3c2412_pin_ctrl }, + { .compatible = "samsung,s3c2416-pinctrl", + .data = s3c2416_pin_ctrl }, + { .compatible = "samsung,s3c2440-pinctrl", + .data = s3c2440_pin_ctrl }, + { .compatible = "samsung,s3c2450-pinctrl", + .data = s3c2450_pin_ctrl }, +#endif {}, }; MODULE_DEVICE_TABLE(of, samsung_pinctrl_dt_match); diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h index 26d3519240c..79fcc2076c0 100644 --- a/drivers/pinctrl/pinctrl-samsung.h +++ b/drivers/pinctrl/pinctrl-samsung.h @@ -255,5 +255,9 @@ extern struct samsung_pin_ctrl exynos4210_pin_ctrl[]; extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; extern struct samsung_pin_ctrl exynos5250_pin_ctrl[]; extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; +extern struct samsung_pin_ctrl s3c2412_pin_ctrl[]; +extern struct samsung_pin_ctrl s3c2416_pin_ctrl[]; +extern struct samsung_pin_ctrl s3c2440_pin_ctrl[]; +extern struct samsung_pin_ctrl s3c2450_pin_ctrl[]; #endif /* __PINCTRL_SAMSUNG_H */ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c index bbff5596e92..82bf6aba007 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c @@ -1488,6 +1488,66 @@ IRQC_PINS_MUX(326, 54); IRQC_PINS_MUX(327, 55); IRQC_PINS_MUX(328, 56); IRQC_PINS_MUX(329, 57); +/* - MMCIF0 ----------------------------------------------------------------- */ +static const unsigned int mmc0_data1_pins[] = { + /* D[0] */ + 164, +}; +static const unsigned int mmc0_data1_mux[] = { + MMCD0_0_MARK, +}; +static const unsigned int mmc0_data4_pins[] = { + /* D[0:3] */ + 164, 165, 166, 167, +}; +static const unsigned int mmc0_data4_mux[] = { + MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK, +}; +static const unsigned int mmc0_data8_pins[] = { + /* D[0:7] */ + 164, 165, 166, 167, 168, 169, 170, 171, +}; +static const unsigned int mmc0_data8_mux[] = { + MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK, + MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK, +}; +static const unsigned int mmc0_ctrl_pins[] = { + /* CMD, CLK */ + 172, 173, +}; +static const unsigned int mmc0_ctrl_mux[] = { + MMCCMD0_MARK, MMCCLK0_MARK, +}; +/* - MMCIF1 ----------------------------------------------------------------- */ +static const unsigned int mmc1_data1_pins[] = { + /* D[0] */ + 199, +}; +static const unsigned int mmc1_data1_mux[] = { + MMCD1_0_MARK, +}; +static const unsigned int mmc1_data4_pins[] = { + /* D[0:3] */ + 199, 198, 197, 196, +}; +static const unsigned int mmc1_data4_mux[] = { + MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK, +}; +static const unsigned int mmc1_data8_pins[] = { + /* D[0:7] */ + 199, 198, 197, 196, 195, 194, 193, 192, +}; +static const unsigned int mmc1_data8_mux[] = { + MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK, + MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK, +}; +static const unsigned int mmc1_ctrl_pins[] = { + /* CMD, CLK */ + 200, 203, +}; +static const unsigned int mmc1_ctrl_mux[] = { + MMCCMD1_MARK, MMCCLK1_MARK, +}; /* - SCIFA0 ----------------------------------------------------------------- */ static const unsigned int scifa0_data_pins[] = { /* SCIFA0_RXD, SCIFA0_TXD */ @@ -1683,6 +1743,86 @@ static const unsigned int scifb3_ctrl_b_pins[] = { static const unsigned int scifb3_ctrl_b_mux[] = { SCIFB3_RTS_38_MARK, SCIFB3_CTS_39_MARK, }; +/* - SDHI0 ------------------------------------------------------------------ */ +static const unsigned int sdhi0_data1_pins[] = { + /* D0 */ + 302, +}; +static const unsigned int sdhi0_data1_mux[] = { + SDHID0_0_MARK, +}; +static const unsigned int sdhi0_data4_pins[] = { + /* D[0:3] */ + 302, 303, 304, 305, +}; +static const unsigned int sdhi0_data4_mux[] = { + SDHID0_0_MARK, SDHID0_1_MARK, SDHID0_2_MARK, SDHID0_3_MARK, +}; +static const unsigned int sdhi0_ctrl_pins[] = { + /* CLK, CMD */ + 308, 306, +}; +static const unsigned int sdhi0_ctrl_mux[] = { + SDHICLK0_MARK, SDHICMD0_MARK, +}; +static const unsigned int sdhi0_cd_pins[] = { + /* CD */ + 301, +}; +static const unsigned int sdhi0_cd_mux[] = { + SDHICD0_MARK, +}; +static const unsigned int sdhi0_wp_pins[] = { + /* WP */ + 307, +}; +static const unsigned int sdhi0_wp_mux[] = { + SDHIWP0_MARK, +}; +/* - SDHI1 ------------------------------------------------------------------ */ +static const unsigned int sdhi1_data1_pins[] = { + /* D0 */ + 289, +}; +static const unsigned int sdhi1_data1_mux[] = { + SDHID1_0_MARK, +}; +static const unsigned int sdhi1_data4_pins[] = { + /* D[0:3] */ + 289, 290, 291, 292, +}; +static const unsigned int sdhi1_data4_mux[] = { + SDHID1_0_MARK, SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK, +}; +static const unsigned int sdhi1_ctrl_pins[] = { + /* CLK, CMD */ + 293, 294, +}; +static const unsigned int sdhi1_ctrl_mux[] = { + SDHICLK1_MARK, SDHICMD1_MARK, +}; +/* - SDHI2 ------------------------------------------------------------------ */ +static const unsigned int sdhi2_data1_pins[] = { + /* D0 */ + 295, +}; +static const unsigned int sdhi2_data1_mux[] = { + SDHID2_0_MARK, +}; +static const unsigned int sdhi2_data4_pins[] = { + /* D[0:3] */ + 295, 296, 297, 298, +}; +static const unsigned int sdhi2_data4_mux[] = { + SDHID2_0_MARK, SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK, +}; +static const unsigned int sdhi2_ctrl_pins[] = { + /* CLK, CMD */ + 299, 300, +}; +static const unsigned int sdhi2_ctrl_mux[] = { + SDHICLK2_MARK, SDHICMD2_MARK, +}; static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(irqc_irq0), @@ -1743,6 +1883,14 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(irqc_irq55), SH_PFC_PIN_GROUP(irqc_irq56), SH_PFC_PIN_GROUP(irqc_irq57), + SH_PFC_PIN_GROUP(mmc0_data1), + SH_PFC_PIN_GROUP(mmc0_data4), + SH_PFC_PIN_GROUP(mmc0_data8), + SH_PFC_PIN_GROUP(mmc0_ctrl), + SH_PFC_PIN_GROUP(mmc1_data1), + SH_PFC_PIN_GROUP(mmc1_data4), + SH_PFC_PIN_GROUP(mmc1_data8), + SH_PFC_PIN_GROUP(mmc1_ctrl), SH_PFC_PIN_GROUP(scifa0_data), SH_PFC_PIN_GROUP(scifa0_clk), SH_PFC_PIN_GROUP(scifa0_ctrl), @@ -1770,6 +1918,17 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scifb3_data_b), SH_PFC_PIN_GROUP(scifb3_clk_b), SH_PFC_PIN_GROUP(scifb3_ctrl_b), + SH_PFC_PIN_GROUP(sdhi0_data1), + SH_PFC_PIN_GROUP(sdhi0_data4), + SH_PFC_PIN_GROUP(sdhi0_ctrl), + SH_PFC_PIN_GROUP(sdhi0_cd), + SH_PFC_PIN_GROUP(sdhi0_wp), + SH_PFC_PIN_GROUP(sdhi1_data1), + SH_PFC_PIN_GROUP(sdhi1_data4), + SH_PFC_PIN_GROUP(sdhi1_ctrl), + SH_PFC_PIN_GROUP(sdhi2_data1), + SH_PFC_PIN_GROUP(sdhi2_data4), + SH_PFC_PIN_GROUP(sdhi2_ctrl), }; static const char * const irqc_groups[] = { @@ -1833,6 +1992,20 @@ static const char * const irqc_groups[] = { "irqc_irq57", }; +static const char * const mmc0_groups[] = { + "mmc0_data1", + "mmc0_data4", + "mmc0_data8", + "mmc0_ctrl", +}; + +static const char * const mmc1_groups[] = { + "mmc1_data1", + "mmc1_data4", + "mmc1_data8", + "mmc1_ctrl", +}; + static const char * const scifa0_groups[] = { "scifa0_data", "scifa0_clk", @@ -1878,14 +2051,39 @@ static const char * const scifb3_groups[] = { "scifb3_ctrl_b", }; +static const char * const sdhi0_groups[] = { + "sdhi0_data1", + "sdhi0_data4", + "sdhi0_ctrl", + "sdhi0_cd", + "sdhi0_wp", +}; + +static const char * const sdhi1_groups[] = { + "sdhi1_data1", + "sdhi1_data4", + "sdhi1_ctrl", +}; + +static const char * const sdhi2_groups[] = { + "sdhi2_data1", + "sdhi2_data4", + "sdhi2_ctrl", +}; + static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(irqc), + SH_PFC_FUNCTION(mmc0), + SH_PFC_FUNCTION(mmc1), SH_PFC_FUNCTION(scifa0), SH_PFC_FUNCTION(scifa1), SH_PFC_FUNCTION(scifb0), SH_PFC_FUNCTION(scifb1), SH_PFC_FUNCTION(scifb2), SH_PFC_FUNCTION(scifb3), + SH_PFC_FUNCTION(sdhi0), + SH_PFC_FUNCTION(sdhi1), + SH_PFC_FUNCTION(sdhi2), }; #undef PORTCR diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c index 1dcbabcd7b3..f9039102bb4 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c @@ -1447,11 +1447,11 @@ MMC_PFC_PINS(mmc_ctrl, RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6)); MMC_PFC_CTRL(mmc_ctrl, MMC_CLK, MMC_CMD); MMC_PFC_PINS(mmc_data1, RCAR_GP_PIN(1, 7)); MMC_PFC_DAT1(mmc_data1, MMC_D0); -MMC_PFC_PINS(mmc_data4, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(2, 8), +MMC_PFC_PINS(mmc_data4, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 8), RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6)); MMC_PFC_DAT4(mmc_data4, MMC_D0, MMC_D1, MMC_D2, MMC_D3); -MMC_PFC_PINS(mmc_data8, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(2, 8), +MMC_PFC_PINS(mmc_data8, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 8), RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6), RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0), RCAR_GP_PIN(0, 30), RCAR_GP_PIN(0, 31)); diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c index 85d77a417c0..14f3ec267e1 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c @@ -1979,6 +1979,141 @@ static const unsigned int scif1_clk_e_pins[] = { static const unsigned int scif1_clk_e_mux[] = { SCK1_E_MARK, }; +/* - HSCIF0 ----------------------------------------------------------------- */ +static const unsigned int hscif0_data_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9), +}; +static const unsigned int hscif0_data_mux[] = { + HRX0_MARK, HTX0_MARK, +}; +static const unsigned int hscif0_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 7), +}; +static const unsigned int hscif0_clk_mux[] = { + HSCK0_MARK, +}; +static const unsigned int hscif0_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10), +}; +static const unsigned int hscif0_ctrl_mux[] = { + HRTS0_N_MARK, HCTS0_N_MARK, +}; +static const unsigned int hscif0_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 12), +}; +static const unsigned int hscif0_data_b_mux[] = { + HRX0_B_MARK, HTX0_B_MARK, +}; +static const unsigned int hscif0_ctrl_b_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(1, 29), RCAR_GP_PIN(1, 28), +}; +static const unsigned int hscif0_ctrl_b_mux[] = { + HRTS0_N_B_MARK, HCTS0_N_B_MARK, +}; +static const unsigned int hscif0_data_c_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 16), +}; +static const unsigned int hscif0_data_c_mux[] = { + HRX0_C_MARK, HTX0_C_MARK, +}; +static const unsigned int hscif0_ctrl_c_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 7), +}; +static const unsigned int hscif0_ctrl_c_mux[] = { + HRTS0_N_C_MARK, HCTS0_N_C_MARK, +}; +static const unsigned int hscif0_data_d_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21), +}; +static const unsigned int hscif0_data_d_mux[] = { + HRX0_D_MARK, HTX0_D_MARK, +}; +static const unsigned int hscif0_ctrl_d_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 22), +}; +static const unsigned int hscif0_ctrl_d_mux[] = { + HRTS0_N_D_MARK, HCTS0_N_D_MARK, +}; +static const unsigned int hscif0_data_e_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(2, 21), RCAR_GP_PIN(2, 22), +}; +static const unsigned int hscif0_data_e_mux[] = { + HRX0_E_MARK, HTX0_E_MARK, +}; +static const unsigned int hscif0_ctrl_e_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(2, 24), RCAR_GP_PIN(2, 23), +}; +static const unsigned int hscif0_ctrl_e_mux[] = { + HRTS0_N_E_MARK, HCTS0_N_E_MARK, +}; +static const unsigned int hscif0_data_f_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 25), +}; +static const unsigned int hscif0_data_f_mux[] = { + HRX0_F_MARK, HTX0_F_MARK, +}; +static const unsigned int hscif0_ctrl_f_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(2, 26), RCAR_GP_PIN(2, 24), +}; +static const unsigned int hscif0_ctrl_f_mux[] = { + HRTS0_N_F_MARK, HCTS0_N_F_MARK, +}; +/* - HSCIF1 ----------------------------------------------------------------- */ +static const unsigned int hscif1_data_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 29), +}; +static const unsigned int hscif1_data_mux[] = { + HRX1_MARK, HTX1_MARK, +}; +static const unsigned int hscif1_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(4, 27), +}; +static const unsigned int hscif1_clk_mux[] = { + HSCK1_MARK, +}; +static const unsigned int hscif1_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(4, 31), RCAR_GP_PIN(4, 30), +}; +static const unsigned int hscif1_ctrl_mux[] = { + HRTS1_N_MARK, HCTS1_N_MARK, +}; +static const unsigned int hscif1_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 18), +}; +static const unsigned int hscif1_data_b_mux[] = { + HRX1_B_MARK, HTX1_B_MARK, +}; +static const unsigned int hscif1_clk_b_pins[] = { + /* SCK */ + RCAR_GP_PIN(1, 28), +}; +static const unsigned int hscif1_clk_b_mux[] = { + HSCK1_B_MARK, +}; +static const unsigned int hscif1_ctrl_b_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), +}; +static const unsigned int hscif1_ctrl_b_mux[] = { + HRTS1_N_B_MARK, HCTS1_N_B_MARK, +}; /* - SCIFA0 ----------------------------------------------------------------- */ static const unsigned int scifa0_data_pins[] = { /* RXD, TXD */ @@ -2371,8 +2506,7 @@ static const unsigned int tpu0_to3_pins[] = { static const unsigned int tpu0_to3_mux[] = { TPU0TO3_MARK, }; - -/* - MMCIF ------------------------------------------------------------------ */ +/* - MMCIF0 ----------------------------------------------------------------- */ static const unsigned int mmc0_data1_pins[] = { /* D[0] */ RCAR_GP_PIN(3, 18), @@ -2406,7 +2540,7 @@ static const unsigned int mmc0_ctrl_pins[] = { static const unsigned int mmc0_ctrl_mux[] = { MMC0_CLK_MARK, MMC0_CMD_MARK, }; - +/* - MMCIF1 ----------------------------------------------------------------- */ static const unsigned int mmc1_data1_pins[] = { /* D[0] */ RCAR_GP_PIN(3, 26), @@ -2427,7 +2561,7 @@ static const unsigned int mmc1_data8_pins[] = { RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31), - RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 14), + RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), }; static const unsigned int mmc1_data8_mux[] = { MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK, @@ -2440,8 +2574,7 @@ static const unsigned int mmc1_ctrl_pins[] = { static const unsigned int mmc1_ctrl_mux[] = { MMC1_CLK_MARK, MMC1_CMD_MARK, }; - -/* - SDHI ------------------------------------------------------------------- */ +/* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* D0 */ RCAR_GP_PIN(3, 2), @@ -2477,7 +2610,7 @@ static const unsigned int sdhi0_wp_pins[] = { static const unsigned int sdhi0_wp_mux[] = { SD0_WP_MARK, }; - +/* - SDHI1 ------------------------------------------------------------------ */ static const unsigned int sdhi1_data1_pins[] = { /* D0 */ RCAR_GP_PIN(3, 10), @@ -2513,7 +2646,7 @@ static const unsigned int sdhi1_wp_pins[] = { static const unsigned int sdhi1_wp_mux[] = { SD1_WP_MARK, }; - +/* - SDHI2 ------------------------------------------------------------------ */ static const unsigned int sdhi2_data1_pins[] = { /* D0 */ RCAR_GP_PIN(3, 18), @@ -2549,7 +2682,7 @@ static const unsigned int sdhi2_wp_pins[] = { static const unsigned int sdhi2_wp_mux[] = { SD2_WP_MARK, }; - +/* - SDHI3 ------------------------------------------------------------------ */ static const unsigned int sdhi3_data1_pins[] = { /* D0 */ RCAR_GP_PIN(3, 26), @@ -2591,10 +2724,37 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(eth_magic), SH_PFC_PIN_GROUP(eth_mdio), SH_PFC_PIN_GROUP(eth_rmii), + SH_PFC_PIN_GROUP(hscif0_data), + SH_PFC_PIN_GROUP(hscif0_clk), + SH_PFC_PIN_GROUP(hscif0_ctrl), + SH_PFC_PIN_GROUP(hscif0_data_b), + SH_PFC_PIN_GROUP(hscif0_ctrl_b), + SH_PFC_PIN_GROUP(hscif0_data_c), + SH_PFC_PIN_GROUP(hscif0_ctrl_c), + SH_PFC_PIN_GROUP(hscif0_data_d), + SH_PFC_PIN_GROUP(hscif0_ctrl_d), + SH_PFC_PIN_GROUP(hscif0_data_e), + SH_PFC_PIN_GROUP(hscif0_ctrl_e), + SH_PFC_PIN_GROUP(hscif0_data_f), + SH_PFC_PIN_GROUP(hscif0_ctrl_f), + SH_PFC_PIN_GROUP(hscif1_data), + SH_PFC_PIN_GROUP(hscif1_clk), + SH_PFC_PIN_GROUP(hscif1_ctrl), + SH_PFC_PIN_GROUP(hscif1_data_b), + SH_PFC_PIN_GROUP(hscif1_clk_b), + SH_PFC_PIN_GROUP(hscif1_ctrl_b), SH_PFC_PIN_GROUP(intc_irq0), SH_PFC_PIN_GROUP(intc_irq1), SH_PFC_PIN_GROUP(intc_irq2), SH_PFC_PIN_GROUP(intc_irq3), + SH_PFC_PIN_GROUP(mmc0_data1), + SH_PFC_PIN_GROUP(mmc0_data4), + SH_PFC_PIN_GROUP(mmc0_data8), + SH_PFC_PIN_GROUP(mmc0_ctrl), + SH_PFC_PIN_GROUP(mmc1_data1), + SH_PFC_PIN_GROUP(mmc1_data4), + SH_PFC_PIN_GROUP(mmc1_data8), + SH_PFC_PIN_GROUP(mmc1_ctrl), SH_PFC_PIN_GROUP(scif0_data), SH_PFC_PIN_GROUP(scif0_clk), SH_PFC_PIN_GROUP(scif0_ctrl), @@ -2659,18 +2819,6 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scifb2_clk_b), SH_PFC_PIN_GROUP(scifb2_ctrl_b), SH_PFC_PIN_GROUP(scifb2_data_c), - SH_PFC_PIN_GROUP(tpu0_to0), - SH_PFC_PIN_GROUP(tpu0_to1), - SH_PFC_PIN_GROUP(tpu0_to2), - SH_PFC_PIN_GROUP(tpu0_to3), - SH_PFC_PIN_GROUP(mmc0_data1), - SH_PFC_PIN_GROUP(mmc0_data4), - SH_PFC_PIN_GROUP(mmc0_data8), - SH_PFC_PIN_GROUP(mmc0_ctrl), - SH_PFC_PIN_GROUP(mmc1_data1), - SH_PFC_PIN_GROUP(mmc1_data4), - SH_PFC_PIN_GROUP(mmc1_data8), - SH_PFC_PIN_GROUP(mmc1_ctrl), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), @@ -2691,6 +2839,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(sdhi3_ctrl), SH_PFC_PIN_GROUP(sdhi3_cd), SH_PFC_PIN_GROUP(sdhi3_wp), + SH_PFC_PIN_GROUP(tpu0_to0), + SH_PFC_PIN_GROUP(tpu0_to1), + SH_PFC_PIN_GROUP(tpu0_to2), + SH_PFC_PIN_GROUP(tpu0_to3), }; static const char * const eth_groups[] = { @@ -2726,6 +2878,31 @@ static const char * const scif1_groups[] = { "scif1_clk_e", }; +static const char * const hscif0_groups[] = { + "hscif0_data", + "hscif0_clk", + "hscif0_ctrl", + "hscif0_data_b", + "hscif0_ctrl_b", + "hscif0_data_c", + "hscif0_ctrl_c", + "hscif0_data_d", + "hscif0_ctrl_d", + "hscif0_data_e", + "hscif0_ctrl_e", + "hscif0_data_f", + "hscif0_ctrl_f", +}; + +static const char * const hscif1_groups[] = { + "hscif1_data", + "hscif1_clk", + "hscif1_ctrl", + "hscif1_data_b", + "hscif1_clk_b", + "hscif1_ctrl_b", +}; + static const char * const scifa0_groups[] = { "scifa0_data", "scifa0_clk", @@ -2850,7 +3027,11 @@ static const char * const sdhi3_groups[] = { static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(eth), + SH_PFC_FUNCTION(hscif0), + SH_PFC_FUNCTION(hscif1), SH_PFC_FUNCTION(intc), + SH_PFC_FUNCTION(mmc0), + SH_PFC_FUNCTION(mmc1), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), SH_PFC_FUNCTION(scifa0), @@ -2859,13 +3040,11 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(scifb0), SH_PFC_FUNCTION(scifb1), SH_PFC_FUNCTION(scifb2), - SH_PFC_FUNCTION(tpu0), - SH_PFC_FUNCTION(mmc0), - SH_PFC_FUNCTION(mmc1), SH_PFC_FUNCTION(sdhi0), SH_PFC_FUNCTION(sdhi1), SH_PFC_FUNCTION(sdhi2), SH_PFC_FUNCTION(sdhi3), + SH_PFC_FUNCTION(tpu0), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index d4d377c40ec..ce1743d0b67 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -14,8 +14,9 @@ config OMAP_REMOTEPROC depends on HAS_DMA depends on ARCH_OMAP4 || SOC_OMAP5 depends on OMAP_IOMMU - depends on OMAP_MBOX_FWK select REMOTEPROC + select MAILBOX + select OMAP2PLUS_MBOX select RPMSG help Say y here to support OMAP's remote processors (dual M3 diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index 0e396c155b3..51689721ea7 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -27,8 +27,8 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/remoteproc.h> +#include <linux/omap-mailbox.h> -#include <plat/mailbox.h> #include <linux/platform_data/remoteproc-omap.h> #include "omap_remoteproc.h" diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig index 60848f198b4..165b918b817 100644 --- a/drivers/staging/tidspbridge/Kconfig +++ b/drivers/staging/tidspbridge/Kconfig @@ -5,7 +5,8 @@ menuconfig TIDSPBRIDGE tristate "DSP Bridge driver" depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM - select OMAP_MBOX_FWK + select MAILBOX + select OMAP2PLUS_MBOX help DSP/BIOS Bridge is designed for platforms that contain a GPP and one or more attached DSPs. The GPP is considered the master or diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h index 7f3a1db3161..d1441db469f 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h +++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h @@ -41,7 +41,7 @@ #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/clk.h> -#include <plat/mailbox.h> +#include <linux/omap-mailbox.h> #include <linux/pagemap.h> #include <asm/cacheflush.h> #include <linux/dma-mapping.h> diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 01b8229fa86..62f6802f6e0 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -155,7 +155,7 @@ config USB_LPC32XX config USB_ATMEL_USBA tristate "Atmel USBA" - depends on AVR32 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 + depends on AVR32 || ARCH_AT91 help USBA is the integrated high-speed USB Device controller on the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel. diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index 5a5128a226f..1d9722203ca 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c @@ -22,15 +22,13 @@ #include <linux/usb/atmel_usba_udc.h> #include <linux/delay.h> #include <linux/platform_data/atmel.h> +#include <linux/of.h> +#include <linux/of_gpio.h> #include <asm/gpio.h> #include "atmel_usba_udc.h" - -static struct usba_udc the_udc; -static struct usba_ep *usba_ep; - #ifdef CONFIG_USB_GADGET_DEBUG_FS #include <linux/debugfs.h> #include <linux/uaccess.h> @@ -1014,16 +1012,13 @@ static void nop_release(struct device *dev) } -static struct usba_udc the_udc = { - .gadget = { - .ops = &usba_udc_ops, - .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list), - .max_speed = USB_SPEED_HIGH, - .name = "atmel_usba_udc", - .dev = { - .init_name = "gadget", - .release = nop_release, - }, +struct usb_gadget usba_gadget_template = { + .ops = &usba_udc_ops, + .max_speed = USB_SPEED_HIGH, + .name = "atmel_usba_udc", + .dev = { + .init_name = "gadget", + .release = nop_release, }, }; @@ -1147,7 +1142,7 @@ static int do_test_mode(struct usba_udc *udc) * Test_SE0_NAK: Force high-speed mode and set up ep0 * for Bulk IN transfers */ - ep = &usba_ep[0]; + ep = &udc->usba_ep[0]; usba_writel(udc, TST, USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); usba_ep_writel(ep, CFG, @@ -1165,7 +1160,7 @@ static int do_test_mode(struct usba_udc *udc) break; case 0x0400: /* Test_Packet */ - ep = &usba_ep[0]; + ep = &udc->usba_ep[0]; usba_ep_writel(ep, CFG, USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) | USBA_EPT_DIR_IN @@ -1668,7 +1663,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) for (i = 1; i < USBA_NR_ENDPOINTS; i++) if (dma_status & (1 << i)) - usba_dma_irq(udc, &usba_ep[i]); + usba_dma_irq(udc, &udc->usba_ep[i]); } ep_status = USBA_BFEXT(EPT_INT, status); @@ -1677,10 +1672,10 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) for (i = 0; i < USBA_NR_ENDPOINTS; i++) if (ep_status & (1 << i)) { - if (ep_is_control(&usba_ep[i])) - usba_control_irq(udc, &usba_ep[i]); + if (ep_is_control(&udc->usba_ep[i])) + usba_control_irq(udc, &udc->usba_ep[i]); else - usba_ep_irq(udc, &usba_ep[i]); + usba_ep_irq(udc, &udc->usba_ep[i]); } } @@ -1705,7 +1700,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) DBG(DBG_BUS, "%s bus reset detected\n", usb_speed_string(udc->gadget.speed)); - ep0 = &usba_ep[0]; + ep0 = &udc->usba_ep[0]; ep0->ep.desc = &usba_ep0_desc; ep0->state = WAIT_FOR_SETUP; usba_ep_writel(ep0, CFG, @@ -1835,17 +1830,158 @@ static int atmel_usba_stop(struct usb_gadget *gadget, return 0; } -static int __init usba_udc_probe(struct platform_device *pdev) +#ifdef CONFIG_OF +static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, + struct usba_udc *udc) +{ + u32 val; + const char *name; + enum of_gpio_flags flags; + struct device_node *np = pdev->dev.of_node; + struct device_node *pp; + int i, ret; + struct usba_ep *eps, *ep; + + udc->num_ep = 0; + + udc->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0, + &flags); + udc->vbus_pin_inverted = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0; + + pp = NULL; + while ((pp = of_get_next_child(np, pp))) + udc->num_ep++; + + eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * udc->num_ep, + GFP_KERNEL); + if (!eps) + return ERR_PTR(-ENOMEM); + + udc->gadget.ep0 = &eps[0].ep; + + INIT_LIST_HEAD(&eps[0].ep.ep_list); + + pp = NULL; + i = 0; + while ((pp = of_get_next_child(np, pp))) { + ep = &eps[i]; + + ret = of_property_read_u32(pp, "reg", &val); + if (ret) { + dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret); + goto err; + } + ep->index = val; + + ret = of_property_read_u32(pp, "atmel,fifo-size", &val); + if (ret) { + dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret); + goto err; + } + ep->fifo_size = val; + + ret = of_property_read_u32(pp, "atmel,nb-banks", &val); + if (ret) { + dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret); + goto err; + } + ep->nr_banks = val; + + ep->can_dma = of_property_read_bool(pp, "atmel,can-dma"); + ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc"); + + ret = of_property_read_string(pp, "name", &name); + ep->ep.name = name; + + ep->ep_regs = udc->regs + USBA_EPT_BASE(i); + ep->dma_regs = udc->regs + USBA_DMA_BASE(i); + ep->fifo = udc->fifo + USBA_FIFO_BASE(i); + ep->ep.ops = &usba_ep_ops; + ep->ep.maxpacket = ep->fifo_size; + ep->udc = udc; + INIT_LIST_HEAD(&ep->queue); + + if (i) + list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); + + i++; + } + + return eps; +err: + return ERR_PTR(ret); +} +#else +static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, + struct usba_udc *udc) +{ + return ERR_PTR(-ENOSYS); +} +#endif + +static struct usba_ep * usba_udc_pdata(struct platform_device *pdev, + struct usba_udc *udc) { struct usba_platform_data *pdata = pdev->dev.platform_data; + struct usba_ep *eps; + int i; + + if (!pdata) + return ERR_PTR(-ENXIO); + + eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * pdata->num_ep, + GFP_KERNEL); + if (!eps) + return ERR_PTR(-ENOMEM); + + udc->gadget.ep0 = &eps[0].ep; + + udc->vbus_pin = pdata->vbus_pin; + udc->vbus_pin_inverted = pdata->vbus_pin_inverted; + udc->num_ep = pdata->num_ep; + + INIT_LIST_HEAD(&eps[0].ep.ep_list); + + for (i = 0; i < pdata->num_ep; i++) { + struct usba_ep *ep = &eps[i]; + + ep->ep_regs = udc->regs + USBA_EPT_BASE(i); + ep->dma_regs = udc->regs + USBA_DMA_BASE(i); + ep->fifo = udc->fifo + USBA_FIFO_BASE(i); + ep->ep.ops = &usba_ep_ops; + ep->ep.name = pdata->ep[i].name; + ep->fifo_size = ep->ep.maxpacket = pdata->ep[i].fifo_size; + ep->udc = udc; + INIT_LIST_HEAD(&ep->queue); + ep->nr_banks = pdata->ep[i].nr_banks; + ep->index = pdata->ep[i].index; + ep->can_dma = pdata->ep[i].can_dma; + ep->can_isoc = pdata->ep[i].can_isoc; + + if (i) + list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); + } + + return eps; +} + +static int __init usba_udc_probe(struct platform_device *pdev) +{ struct resource *regs, *fifo; struct clk *pclk, *hclk; - struct usba_udc *udc = &the_udc; + struct usba_udc *udc; int irq, ret, i; + udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); + if (!udc) + return -ENOMEM; + + udc->gadget = usba_gadget_template; + INIT_LIST_HEAD(&udc->gadget.ep_list); + regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID); fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID); - if (!regs || !fifo || !pdata) + if (!regs || !fifo) return -ENXIO; irq = platform_get_irq(pdev, 0); @@ -1891,46 +2027,14 @@ static int __init usba_udc_probe(struct platform_device *pdev) usba_writel(udc, CTRL, USBA_DISABLE_MASK); clk_disable(pclk); - usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep, - GFP_KERNEL); - if (!usba_ep) - goto err_alloc_ep; - - the_udc.gadget.ep0 = &usba_ep[0].ep; - - INIT_LIST_HEAD(&usba_ep[0].ep.ep_list); - usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0); - usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0); - usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0); - usba_ep[0].ep.ops = &usba_ep_ops; - usba_ep[0].ep.name = pdata->ep[0].name; - usba_ep[0].ep.maxpacket = pdata->ep[0].fifo_size; - usba_ep[0].udc = &the_udc; - INIT_LIST_HEAD(&usba_ep[0].queue); - usba_ep[0].fifo_size = pdata->ep[0].fifo_size; - usba_ep[0].nr_banks = pdata->ep[0].nr_banks; - usba_ep[0].index = pdata->ep[0].index; - usba_ep[0].can_dma = pdata->ep[0].can_dma; - usba_ep[0].can_isoc = pdata->ep[0].can_isoc; - - for (i = 1; i < pdata->num_ep; i++) { - struct usba_ep *ep = &usba_ep[i]; - - ep->ep_regs = udc->regs + USBA_EPT_BASE(i); - ep->dma_regs = udc->regs + USBA_DMA_BASE(i); - ep->fifo = udc->fifo + USBA_FIFO_BASE(i); - ep->ep.ops = &usba_ep_ops; - ep->ep.name = pdata->ep[i].name; - ep->ep.maxpacket = pdata->ep[i].fifo_size; - ep->udc = &the_udc; - INIT_LIST_HEAD(&ep->queue); - ep->fifo_size = pdata->ep[i].fifo_size; - ep->nr_banks = pdata->ep[i].nr_banks; - ep->index = pdata->ep[i].index; - ep->can_dma = pdata->ep[i].can_dma; - ep->can_isoc = pdata->ep[i].can_isoc; + if (pdev->dev.of_node) + udc->usba_ep = atmel_udc_of_init(pdev, udc); + else + udc->usba_ep = usba_udc_pdata(pdev, udc); - list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); + if (IS_ERR(udc->usba_ep)) { + ret = PTR_ERR(udc->usba_ep); + goto err_alloc_ep; } ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc); @@ -1941,16 +2045,12 @@ static int __init usba_udc_probe(struct platform_device *pdev) } udc->irq = irq; - if (gpio_is_valid(pdata->vbus_pin)) { - if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) { - udc->vbus_pin = pdata->vbus_pin; - udc->vbus_pin_inverted = pdata->vbus_pin_inverted; - + if (gpio_is_valid(udc->vbus_pin)) { + if (!devm_gpio_request(&pdev->dev, udc->vbus_pin, "atmel_usba_udc")) { ret = request_irq(gpio_to_irq(udc->vbus_pin), usba_vbus_irq, 0, "atmel_usba_udc", udc); if (ret) { - gpio_free(udc->vbus_pin); udc->vbus_pin = -ENODEV; dev_warn(&udc->pdev->dev, "failed to request vbus irq; " @@ -1969,20 +2069,17 @@ static int __init usba_udc_probe(struct platform_device *pdev) goto err_add_udc; usba_init_debugfs(udc); - for (i = 1; i < pdata->num_ep; i++) - usba_ep_init_debugfs(udc, &usba_ep[i]); + for (i = 1; i < udc->num_ep; i++) + usba_ep_init_debugfs(udc, &udc->usba_ep[i]); return 0; err_add_udc: - if (gpio_is_valid(pdata->vbus_pin)) { + if (gpio_is_valid(udc->vbus_pin)) free_irq(gpio_to_irq(udc->vbus_pin), udc); - gpio_free(udc->vbus_pin); - } free_irq(irq, udc); err_request_irq: - kfree(usba_ep); err_alloc_ep: iounmap(udc->fifo); err_map_fifo: @@ -1999,23 +2096,20 @@ static int __exit usba_udc_remove(struct platform_device *pdev) { struct usba_udc *udc; int i; - struct usba_platform_data *pdata = pdev->dev.platform_data; udc = platform_get_drvdata(pdev); usb_del_gadget_udc(&udc->gadget); - for (i = 1; i < pdata->num_ep; i++) - usba_ep_cleanup_debugfs(&usba_ep[i]); + for (i = 1; i < udc->num_ep; i++) + usba_ep_cleanup_debugfs(&udc->usba_ep[i]); usba_cleanup_debugfs(udc); if (gpio_is_valid(udc->vbus_pin)) { free_irq(gpio_to_irq(udc->vbus_pin), udc); - gpio_free(udc->vbus_pin); } free_irq(udc->irq, udc); - kfree(usba_ep); iounmap(udc->fifo); iounmap(udc->regs); clk_put(udc->hclk); @@ -2024,11 +2118,21 @@ static int __exit usba_udc_remove(struct platform_device *pdev) return 0; } +#if defined(CONFIG_OF) +static const struct of_device_id atmel_udc_dt_ids[] = { + { .compatible = "atmel,at91sam9rl-udc" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids); +#endif + static struct platform_driver udc_driver = { .remove = __exit_p(usba_udc_remove), .driver = { .name = "atmel_usba_udc", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(atmel_udc_dt_ids), }, }; diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h index d65a61851d3..2922db50bef 100644 --- a/drivers/usb/gadget/atmel_usba_udc.h +++ b/drivers/usb/gadget/atmel_usba_udc.h @@ -317,8 +317,10 @@ struct usba_udc { int irq; int vbus_pin; int vbus_pin_inverted; + int num_ep; struct clk *pclk; struct clk *hclk; + struct usba_ep *usba_ep; u16 devstatus; diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index 028ff4d07dc..fce71b60593 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -25,11 +25,19 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/usb/musb-ux500.h> #include "musb_core.h" +static struct musb_hdrc_config ux500_musb_hdrc_config = { + .multipoint = true, + .dyn_fifo = true, + .num_eps = 16, + .ram_bits = 16, +}; + struct ux500_glue { struct device *dev; struct platform_device *musb; @@ -187,15 +195,58 @@ static const struct musb_platform_ops ux500_ops = { .set_vbus = ux500_musb_set_vbus, }; +static struct musb_hdrc_platform_data * +ux500_of_probe(struct platform_device *pdev, struct device_node *np) +{ + struct musb_hdrc_platform_data *pdata; + const char *mode; + int strlen; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + mode = of_get_property(np, "dr_mode", &strlen); + if (!mode) { + dev_err(&pdev->dev, "No 'dr_mode' property found\n"); + return NULL; + } + + if (strlen > 0) { + if (!strcmp(mode, "host")) + pdata->mode = MUSB_HOST; + if (!strcmp(mode, "otg")) + pdata->mode = MUSB_OTG; + if (!strcmp(mode, "peripheral")) + pdata->mode = MUSB_PERIPHERAL; + } + + return pdata; +} + static int ux500_probe(struct platform_device *pdev) { struct resource musb_resources[2]; struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; + struct device_node *np = pdev->dev.of_node; struct platform_device *musb; struct ux500_glue *glue; struct clk *clk; int ret = -ENOMEM; + if (!pdata) { + if (np) { + pdata = ux500_of_probe(pdev, np); + if (!pdata) + goto err0; + + pdev->dev.platform_data = pdata; + } else { + dev_err(&pdev->dev, "no pdata or device tree found\n"); + goto err0; + } + } + glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "failed to allocate glue context\n"); @@ -222,14 +273,16 @@ static int ux500_probe(struct platform_device *pdev) } musb->dev.parent = &pdev->dev; - musb->dev.dma_mask = pdev->dev.dma_mask; + musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; + musb->dev.of_node = pdev->dev.of_node; glue->dev = &pdev->dev; glue->musb = musb; glue->clk = clk; pdata->platform_ops = &ux500_ops; + pdata->config = &ux500_musb_hdrc_config; platform_set_drvdata(pdev, glue); @@ -334,12 +387,18 @@ static const struct dev_pm_ops ux500_pm_ops = { #define DEV_PM_OPS NULL #endif +static const struct of_device_id ux500_match[] = { + { .compatible = "stericsson,db8500-musb", }, + {} +}; + static struct platform_driver ux500_driver = { .probe = ux500_probe, .remove = ux500_remove, .driver = { .name = "musb-ux500", .pm = DEV_PM_OPS, + .of_match_table = ux500_match, }, }; diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index 63e7c8a6b12..bfb7a65d83c 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c @@ -34,6 +34,11 @@ #include <linux/platform_data/usb-musb-ux500.h> #include "musb_core.h" +static const char *iep_chan_names[] = { "iep_1_9", "iep_2_10", "iep_3_11", "iep_4_12", + "iep_5_13", "iep_6_14", "iep_7_15", "iep_8" }; +static const char *oep_chan_names[] = { "oep_1_9", "oep_2_10", "oep_3_11", "oep_4_12", + "oep_5_13", "oep_6_14", "oep_7_15", "oep_8" }; + struct ux500_dma_channel { struct dma_channel channel; struct ux500_dma_controller *controller; @@ -48,10 +53,8 @@ struct ux500_dma_channel { struct ux500_dma_controller { struct dma_controller controller; - struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_CHANNELS]; - struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_TX_CHANNELS]; - u32 num_rx_channels; - u32 num_tx_channels; + struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]; + struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]; void *private_data; dma_addr_t phy_base; }; @@ -143,19 +146,15 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, struct ux500_dma_channel *ux500_channel = NULL; struct musb *musb = controller->private_data; u8 ch_num = hw_ep->epnum - 1; - u32 max_ch; - /* Max 8 DMA channels (0 - 7). Each DMA channel can only be allocated + /* 8 DMA channels (0 - 7). Each DMA channel can only be allocated * to specified hw_ep. For example DMA channel 0 can only be allocated * to hw_ep 1 and 9. */ if (ch_num > 7) ch_num -= 8; - max_ch = is_tx ? controller->num_tx_channels : - controller->num_rx_channels; - - if (ch_num >= max_ch) + if (ch_num >= UX500_MUSB_DMA_NUM_RX_TX_CHANNELS) return NULL; ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) : @@ -263,7 +262,7 @@ static int ux500_dma_controller_stop(struct dma_controller *c) struct dma_channel *channel; u8 ch_num; - for (ch_num = 0; ch_num < controller->num_rx_channels; ch_num++) { + for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) { channel = &controller->rx_channel[ch_num].channel; ux500_channel = channel->private_data; @@ -273,7 +272,7 @@ static int ux500_dma_controller_stop(struct dma_controller *c) dma_release_channel(ux500_channel->dma_chan); } - for (ch_num = 0; ch_num < controller->num_tx_channels; ch_num++) { + for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) { channel = &controller->tx_channel[ch_num].channel; ux500_channel = channel->private_data; @@ -294,34 +293,36 @@ static int ux500_dma_controller_start(struct dma_controller *c) struct musb *musb = controller->private_data; struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; - struct ux500_musb_board_data *data = plat->board_data; + struct ux500_musb_board_data *data; struct dma_channel *dma_channel = NULL; + char **chan_names; u32 ch_num; u8 dir; u8 is_tx = 0; void **param_array; struct ux500_dma_channel *channel_array; - u32 ch_count; dma_cap_mask_t mask; - if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) || - (data->num_tx_channels > UX500_MUSB_DMA_NUM_TX_CHANNELS)) + if (!plat) { + dev_err(musb->controller, "No platform data\n"); return -EINVAL; + } - controller->num_rx_channels = data->num_rx_channels; - controller->num_tx_channels = data->num_tx_channels; + data = plat->board_data; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* Prepare the loop for RX channels */ channel_array = controller->rx_channel; - ch_count = data->num_rx_channels; - param_array = data->dma_rx_param_array; + param_array = data ? data->dma_rx_param_array : NULL; + chan_names = (char **)iep_chan_names; for (dir = 0; dir < 2; dir++) { - for (ch_num = 0; ch_num < ch_count; ch_num++) { + for (ch_num = 0; + ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; + ch_num++) { ux500_channel = &channel_array[ch_num]; ux500_channel->controller = controller; ux500_channel->ch_num = ch_num; @@ -332,9 +333,15 @@ static int ux500_dma_controller_start(struct dma_controller *c) dma_channel->status = MUSB_DMA_STATUS_FREE; dma_channel->max_len = SZ_16M; - ux500_channel->dma_chan = dma_request_channel(mask, - data->dma_filter, - param_array[ch_num]); + ux500_channel->dma_chan = + dma_request_slave_channel(dev, chan_names[ch_num]); + + if (!ux500_channel->dma_chan) + ux500_channel->dma_chan = + dma_request_channel(mask, + data->dma_filter, + param_array[ch_num]); + if (!ux500_channel->dma_chan) { ERR("Dma pipe allocation error dir=%d ch=%d\n", dir, ch_num); @@ -349,8 +356,8 @@ static int ux500_dma_controller_start(struct dma_controller *c) /* Prepare the loop for TX channels */ channel_array = controller->tx_channel; - ch_count = data->num_tx_channels; - param_array = data->dma_tx_param_array; + param_array = data ? data->dma_tx_param_array : NULL; + chan_names = (char **)oep_chan_names; is_tx = 1; } |