diff options
Diffstat (limited to 'arch/arm/plat-omap')
37 files changed, 1453 insertions, 3910 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 5b605a9eb09..aa59f4247dc 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -15,6 +15,7 @@ config ARCH_OMAP1 select CLKSRC_MMIO select GENERIC_IRQ_CHIP select HAVE_IDE + select NEED_MACH_MEMORY_H help "Systems based on omap7xx, omap15xx or omap16xx" @@ -133,18 +134,6 @@ config OMAP_MBOX_KFIFO_SIZE This can also be changed at runtime (via the mbox_kfifo_size module parameter). -config OMAP_IOMMU - tristate - -config OMAP_IOMMU_DEBUG - tristate "Export OMAP IOMMU internals in DebugFS" - depends on OMAP_IOMMU && DEBUG_FS - help - Select this to see extensive information about - the internal state of OMAP IOMMU in debugfs. - - Say N unless you know you need this. - config OMAP_IOMMU_IVA2 bool diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile index f0233e6abcd..985262242f2 100644 --- a/arch/arm/plat-omap/Makefile +++ b/arch/arm/plat-omap/Makefile @@ -18,8 +18,6 @@ obj-$(CONFIG_ARCH_OMAP3) += omap_device.o obj-$(CONFIG_ARCH_OMAP4) += omap_device.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o -obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o -obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o obj-$(CONFIG_CPU_FREQ) += cpu-omap.o obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c index 3ba4d11ca73..567e4b54f24 100644 --- a/arch/arm/plat-omap/clock.c +++ b/arch/arm/plat-omap/clock.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/errno.h> +#include <linux/export.h> #include <linux/err.h> #include <linux/string.h> #include <linux/clk.h> diff --git a/arch/arm/plat-omap/debug-devices.c b/arch/arm/plat-omap/debug-devices.c index 923c9621096..caa1f7b6cc2 100644 --- a/arch/arm/plat-omap/debug-devices.c +++ b/arch/arm/plat-omap/debug-devices.c @@ -8,7 +8,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - +#include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> @@ -18,7 +18,6 @@ #include <mach/hardware.h> #include <plat/board.h> -#include <mach/gpio.h> /* Many OMAP development platforms reuse the same "debug board"; these diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c index fc05b102260..61a1ec2a6af 100644 --- a/arch/arm/plat-omap/debug-leds.c +++ b/arch/arm/plat-omap/debug-leds.c @@ -7,7 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - +#include <linux/gpio.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> @@ -19,7 +19,6 @@ #include <asm/mach-types.h> #include <plat/fpga.h> -#include <mach/gpio.h> /* Many OMAP development platforms reuse the same "debug board"; these diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c index ea28f98d5d6..19719329a47 100644 --- a/arch/arm/plat-omap/devices.c +++ b/arch/arm/plat-omap/devices.c @@ -8,7 +8,7 @@ * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ - +#include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -24,91 +24,9 @@ #include <plat/tc.h> #include <plat/board.h> #include <plat/mmc.h> -#include <mach/gpio.h> #include <plat/menelaus.h> -#include <plat/mcbsp.h> #include <plat/omap44xx.h> -/*-------------------------------------------------------------------------*/ - -#if defined(CONFIG_OMAP_MCBSP) || defined(CONFIG_OMAP_MCBSP_MODULE) - -static struct platform_device **omap_mcbsp_devices; - -void omap_mcbsp_register_board_cfg(struct resource *res, int res_count, - struct omap_mcbsp_platform_data *config, int size) -{ - int i; - - omap_mcbsp_devices = kzalloc(size * sizeof(struct platform_device *), - GFP_KERNEL); - if (!omap_mcbsp_devices) { - printk(KERN_ERR "Could not register McBSP devices\n"); - return; - } - - for (i = 0; i < size; i++) { - struct platform_device *new_mcbsp; - int ret; - - new_mcbsp = platform_device_alloc("omap-mcbsp", i + 1); - if (!new_mcbsp) - continue; - platform_device_add_resources(new_mcbsp, &res[i * res_count], - res_count); - new_mcbsp->dev.platform_data = &config[i]; - ret = platform_device_add(new_mcbsp); - if (ret) { - platform_device_put(new_mcbsp); - continue; - } - omap_mcbsp_devices[i] = new_mcbsp; - } -} - -#else -void omap_mcbsp_register_board_cfg(struct resource *res, int res_count, - struct omap_mcbsp_platform_data *config, int size) -{ } -#endif - -/*-------------------------------------------------------------------------*/ - -#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \ - defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE) - -static struct resource mcpdm_resources[] = { - { - .name = "mcpdm_mem", - .start = OMAP44XX_MCPDM_BASE, - .end = OMAP44XX_MCPDM_BASE + SZ_4K, - .flags = IORESOURCE_MEM, - }, - { - .name = "mcpdm_irq", - .start = OMAP44XX_IRQ_MCPDM, - .end = OMAP44XX_IRQ_MCPDM, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device omap_mcpdm_device = { - .name = "omap-mcpdm", - .id = -1, - .num_resources = ARRAY_SIZE(mcpdm_resources), - .resource = mcpdm_resources, -}; - -static void omap_init_mcpdm(void) -{ - (void) platform_device_register(&omap_mcpdm_device); -} -#else -static inline void omap_init_mcpdm(void) {} -#endif - -/*-------------------------------------------------------------------------*/ - #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \ defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE) @@ -291,7 +209,6 @@ static int __init omap_init_devices(void) * in alphabetical order so they're easier to sort through. */ omap_init_rng(); - omap_init_mcpdm(); omap_init_uwire(); return 0; } diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index 75a847dd776..af3b92be845 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -3,6 +3,12 @@ * * OMAP Dual-Mode Timers * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Tarun Kanti DebBarma <tarun.kanti@ti.com> + * Thara Gopinath <thara@ti.com> + * + * dmtimer adaptation to platform_driver. + * * Copyright (C) 2005 Nokia Corporation * OMAP2 support by Juha Yrjola * API improvements and OMAP2 clock framework support by Timo Teras @@ -29,168 +35,81 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/errno.h> -#include <linux/list.h> -#include <linux/clk.h> -#include <linux/delay.h> -#include <linux/io.h> #include <linux/module.h> -#include <mach/hardware.h> -#include <plat/dmtimer.h> -#include <mach/irqs.h> - -static int dm_timer_count; - -#ifdef CONFIG_ARCH_OMAP1 -static struct omap_dm_timer omap1_dm_timers[] = { - { .phys_base = 0xfffb1400, .irq = INT_1610_GPTIMER1 }, - { .phys_base = 0xfffb1c00, .irq = INT_1610_GPTIMER2 }, - { .phys_base = 0xfffb2400, .irq = INT_1610_GPTIMER3 }, - { .phys_base = 0xfffb2c00, .irq = INT_1610_GPTIMER4 }, - { .phys_base = 0xfffb3400, .irq = INT_1610_GPTIMER5 }, - { .phys_base = 0xfffb3c00, .irq = INT_1610_GPTIMER6 }, - { .phys_base = 0xfffb7400, .irq = INT_1610_GPTIMER7 }, - { .phys_base = 0xfffbd400, .irq = INT_1610_GPTIMER8 }, -}; - -static const int omap1_dm_timer_count = ARRAY_SIZE(omap1_dm_timers); - -#else -#define omap1_dm_timers NULL -#define omap1_dm_timer_count 0 -#endif /* CONFIG_ARCH_OMAP1 */ - -#ifdef CONFIG_ARCH_OMAP2 -static struct omap_dm_timer omap2_dm_timers[] = { - { .phys_base = 0x48028000, .irq = INT_24XX_GPTIMER1 }, - { .phys_base = 0x4802a000, .irq = INT_24XX_GPTIMER2 }, - { .phys_base = 0x48078000, .irq = INT_24XX_GPTIMER3 }, - { .phys_base = 0x4807a000, .irq = INT_24XX_GPTIMER4 }, - { .phys_base = 0x4807c000, .irq = INT_24XX_GPTIMER5 }, - { .phys_base = 0x4807e000, .irq = INT_24XX_GPTIMER6 }, - { .phys_base = 0x48080000, .irq = INT_24XX_GPTIMER7 }, - { .phys_base = 0x48082000, .irq = INT_24XX_GPTIMER8 }, - { .phys_base = 0x48084000, .irq = INT_24XX_GPTIMER9 }, - { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 }, - { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 }, - { .phys_base = 0x4808a000, .irq = INT_24XX_GPTIMER12 }, -}; - -static const char *omap2_dm_source_names[] __initdata = { - "sys_ck", - "func_32k_ck", - "alt_ck", - NULL -}; - -static struct clk *omap2_dm_source_clocks[3]; -static const int omap2_dm_timer_count = ARRAY_SIZE(omap2_dm_timers); - -#else -#define omap2_dm_timers NULL -#define omap2_dm_timer_count 0 -#define omap2_dm_source_names NULL -#define omap2_dm_source_clocks NULL -#endif /* CONFIG_ARCH_OMAP2 */ - -#ifdef CONFIG_ARCH_OMAP3 -static struct omap_dm_timer omap3_dm_timers[] = { - { .phys_base = 0x48318000, .irq = INT_24XX_GPTIMER1 }, - { .phys_base = 0x49032000, .irq = INT_24XX_GPTIMER2 }, - { .phys_base = 0x49034000, .irq = INT_24XX_GPTIMER3 }, - { .phys_base = 0x49036000, .irq = INT_24XX_GPTIMER4 }, - { .phys_base = 0x49038000, .irq = INT_24XX_GPTIMER5 }, - { .phys_base = 0x4903A000, .irq = INT_24XX_GPTIMER6 }, - { .phys_base = 0x4903C000, .irq = INT_24XX_GPTIMER7 }, - { .phys_base = 0x4903E000, .irq = INT_24XX_GPTIMER8 }, - { .phys_base = 0x49040000, .irq = INT_24XX_GPTIMER9 }, - { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 }, - { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 }, - { .phys_base = 0x48304000, .irq = INT_34XX_GPT12_IRQ }, -}; - -static const char *omap3_dm_source_names[] __initdata = { - "sys_ck", - "omap_32k_fck", - NULL -}; - -static struct clk *omap3_dm_source_clocks[2]; -static const int omap3_dm_timer_count = ARRAY_SIZE(omap3_dm_timers); - -#else -#define omap3_dm_timers NULL -#define omap3_dm_timer_count 0 -#define omap3_dm_source_names NULL -#define omap3_dm_source_clocks NULL -#endif /* CONFIG_ARCH_OMAP3 */ - -#ifdef CONFIG_ARCH_OMAP4 -static struct omap_dm_timer omap4_dm_timers[] = { - { .phys_base = 0x4a318000, .irq = OMAP44XX_IRQ_GPT1 }, - { .phys_base = 0x48032000, .irq = OMAP44XX_IRQ_GPT2 }, - { .phys_base = 0x48034000, .irq = OMAP44XX_IRQ_GPT3 }, - { .phys_base = 0x48036000, .irq = OMAP44XX_IRQ_GPT4 }, - { .phys_base = 0x40138000, .irq = OMAP44XX_IRQ_GPT5 }, - { .phys_base = 0x4013a000, .irq = OMAP44XX_IRQ_GPT6 }, - { .phys_base = 0x4013a000, .irq = OMAP44XX_IRQ_GPT7 }, - { .phys_base = 0x4013e000, .irq = OMAP44XX_IRQ_GPT8 }, - { .phys_base = 0x4803e000, .irq = OMAP44XX_IRQ_GPT9 }, - { .phys_base = 0x48086000, .irq = OMAP44XX_IRQ_GPT10 }, - { .phys_base = 0x48088000, .irq = OMAP44XX_IRQ_GPT11 }, - { .phys_base = 0x4a320000, .irq = OMAP44XX_IRQ_GPT12 }, -}; -static const char *omap4_dm_source_names[] __initdata = { - "sys_clkin_ck", - "sys_32k_ck", - NULL -}; -static struct clk *omap4_dm_source_clocks[2]; -static const int omap4_dm_timer_count = ARRAY_SIZE(omap4_dm_timers); - -#else -#define omap4_dm_timers NULL -#define omap4_dm_timer_count 0 -#define omap4_dm_source_names NULL -#define omap4_dm_source_clocks NULL -#endif /* CONFIG_ARCH_OMAP4 */ +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/pm_runtime.h> -static struct omap_dm_timer *dm_timers; -static const char **dm_source_names; -static struct clk **dm_source_clocks; +#include <plat/dmtimer.h> -static spinlock_t dm_timer_lock; +static LIST_HEAD(omap_timer_list); +static DEFINE_SPINLOCK(dm_timer_lock); -/* - * Reads timer registers in posted and non-posted mode. The posted mode bit - * is encoded in reg. Note that in posted mode write pending bit must be - * checked. Otherwise a read of a non completed write will produce an error. +/** + * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode + * @timer: timer pointer over which read operation to perform + * @reg: lowest byte holds the register offset + * + * The posted mode bit is encoded in reg. Note that in posted mode write + * pending bit must be checked. Otherwise a read of a non completed write + * will produce an error. */ static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg) { - return __omap_dm_timer_read(timer->io_base, reg, timer->posted); + WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET); + return __omap_dm_timer_read(timer, reg, timer->posted); } -/* - * Writes timer registers in posted and non-posted mode. The posted mode bit - * is encoded in reg. Note that in posted mode the write pending bit must be - * checked. Otherwise a write on a register which has a pending write will be - * lost. +/** + * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode + * @timer: timer pointer over which write operation is to perform + * @reg: lowest byte holds the register offset + * @value: data to write into the register + * + * The posted mode bit is encoded in reg. Note that in posted mode the write + * pending bit must be checked. Otherwise a write on a register which has a + * pending write will be lost. */ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg, u32 value) { - __omap_dm_timer_write(timer->io_base, reg, value, timer->posted); + WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET); + __omap_dm_timer_write(timer, reg, value, timer->posted); +} + +static void omap_timer_restore_context(struct omap_dm_timer *timer) +{ + omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_OFFSET, + timer->context.tiocp_cfg); + if (timer->revision > 1) + __raw_writel(timer->context.tistat, timer->sys_stat); + + __raw_writel(timer->context.tisr, timer->irq_stat); + omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, + timer->context.twer); + omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, + timer->context.tcrr); + omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, + timer->context.tldr); + omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, + timer->context.tmar); + omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, + timer->context.tsicr); + __raw_writel(timer->context.tier, timer->irq_ena); + omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, + timer->context.tclr); } static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer) { int c; + if (!timer->sys_stat) + return; + c = 0; - while (!(omap_dm_timer_read_reg(timer, OMAP_TIMER_SYS_STAT_REG) & 1)) { + while (!(__raw_readl(timer->sys_stat) & 1)) { c++; if (c > 100000) { printk(KERN_ERR "Timer failed to reset\n"); @@ -201,53 +120,65 @@ static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer) static void omap_dm_timer_reset(struct omap_dm_timer *timer) { - int autoidle = 0, wakeup = 0; - - if (!cpu_class_is_omap2() || timer != &dm_timers[0]) { + omap_dm_timer_enable(timer); + if (timer->pdev->id != 1) { omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06); omap_dm_timer_wait_for_reset(timer); } - omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ); - /* Enable autoidle on OMAP2+ */ - if (cpu_class_is_omap2()) - autoidle = 1; - - /* - * Enable wake-up on OMAP2 CPUs. - */ - if (cpu_class_is_omap2()) - wakeup = 1; - - __omap_dm_timer_reset(timer->io_base, autoidle, wakeup); + __omap_dm_timer_reset(timer, 0, 0); + omap_dm_timer_disable(timer); timer->posted = 1; } -void omap_dm_timer_prepare(struct omap_dm_timer *timer) +int omap_dm_timer_prepare(struct omap_dm_timer *timer) { - omap_dm_timer_enable(timer); - omap_dm_timer_reset(timer); + struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data; + int ret; + + timer->fclk = clk_get(&timer->pdev->dev, "fck"); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) { + timer->fclk = NULL; + dev_err(&timer->pdev->dev, ": No fclk handle.\n"); + return -EINVAL; + } + + if (pdata->needs_manual_reset) + omap_dm_timer_reset(timer); + + ret = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ); + + timer->posted = 1; + return ret; } struct omap_dm_timer *omap_dm_timer_request(void) { - struct omap_dm_timer *timer = NULL; + struct omap_dm_timer *timer = NULL, *t; unsigned long flags; - int i; + int ret = 0; spin_lock_irqsave(&dm_timer_lock, flags); - for (i = 0; i < dm_timer_count; i++) { - if (dm_timers[i].reserved) + list_for_each_entry(t, &omap_timer_list, node) { + if (t->reserved) continue; - timer = &dm_timers[i]; + timer = t; timer->reserved = 1; break; } + + if (timer) { + ret = omap_dm_timer_prepare(timer); + if (ret) { + timer->reserved = 0; + timer = NULL; + } + } spin_unlock_irqrestore(&dm_timer_lock, flags); - if (timer != NULL) - omap_dm_timer_prepare(timer); + if (!timer) + pr_debug("%s: timer request failed!\n", __func__); return timer; } @@ -255,74 +186,65 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_request); struct omap_dm_timer *omap_dm_timer_request_specific(int id) { - struct omap_dm_timer *timer; + struct omap_dm_timer *timer = NULL, *t; unsigned long flags; + int ret = 0; spin_lock_irqsave(&dm_timer_lock, flags); - if (id <= 0 || id > dm_timer_count || dm_timers[id-1].reserved) { - spin_unlock_irqrestore(&dm_timer_lock, flags); - printk("BUG: warning at %s:%d/%s(): unable to get timer %d\n", - __FILE__, __LINE__, __func__, id); - dump_stack(); - return NULL; + list_for_each_entry(t, &omap_timer_list, node) { + if (t->pdev->id == id && !t->reserved) { + timer = t; + timer->reserved = 1; + break; + } } - timer = &dm_timers[id-1]; - timer->reserved = 1; + if (timer) { + ret = omap_dm_timer_prepare(timer); + if (ret) { + timer->reserved = 0; + timer = NULL; + } + } spin_unlock_irqrestore(&dm_timer_lock, flags); - omap_dm_timer_prepare(timer); + if (!timer) + pr_debug("%s: timer%d request failed!\n", __func__, id); return timer; } EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific); -void omap_dm_timer_free(struct omap_dm_timer *timer) +int omap_dm_timer_free(struct omap_dm_timer *timer) { - omap_dm_timer_enable(timer); - omap_dm_timer_reset(timer); - omap_dm_timer_disable(timer); + if (unlikely(!timer)) + return -EINVAL; + + clk_put(timer->fclk); WARN_ON(!timer->reserved); timer->reserved = 0; + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_free); void omap_dm_timer_enable(struct omap_dm_timer *timer) { - if (timer->enabled) - return; - -#ifdef CONFIG_ARCH_OMAP2PLUS - if (cpu_class_is_omap2()) { - clk_enable(timer->fclk); - clk_enable(timer->iclk); - } -#endif - - timer->enabled = 1; + pm_runtime_get_sync(&timer->pdev->dev); } EXPORT_SYMBOL_GPL(omap_dm_timer_enable); void omap_dm_timer_disable(struct omap_dm_timer *timer) { - if (!timer->enabled) - return; - -#ifdef CONFIG_ARCH_OMAP2PLUS - if (cpu_class_is_omap2()) { - clk_disable(timer->iclk); - clk_disable(timer->fclk); - } -#endif - - timer->enabled = 0; + pm_runtime_put(&timer->pdev->dev); } EXPORT_SYMBOL_GPL(omap_dm_timer_disable); int omap_dm_timer_get_irq(struct omap_dm_timer *timer) { - return timer->irq; + if (timer) + return timer->irq; + return -EINVAL; } EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq); @@ -334,24 +256,29 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq); */ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask) { - int i; + int i = 0; + struct omap_dm_timer *timer = NULL; + unsigned long flags; /* If ARMXOR cannot be idled this function call is unnecessary */ if (!(inputmask & (1 << 1))) return inputmask; /* If any active timer is using ARMXOR return modified mask */ - for (i = 0; i < dm_timer_count; i++) { + spin_lock_irqsave(&dm_timer_lock, flags); + list_for_each_entry(timer, &omap_timer_list, node) { u32 l; - l = omap_dm_timer_read_reg(&dm_timers[i], OMAP_TIMER_CTRL_REG); + l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); if (l & OMAP_TIMER_CTRL_ST) { if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0) inputmask &= ~(1 << 1); else inputmask &= ~(1 << 2); } + i++; } + spin_unlock_irqrestore(&dm_timer_lock, flags); return inputmask; } @@ -361,7 +288,9 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask); struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer) { - return timer->fclk; + if (timer) + return timer->fclk; + return NULL; } EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk); @@ -375,70 +304,91 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask); #endif -void omap_dm_timer_trigger(struct omap_dm_timer *timer) +int omap_dm_timer_trigger(struct omap_dm_timer *timer) { + if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { + pr_err("%s: timer not available or enabled.\n", __func__); + return -EINVAL; + } + omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0); + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_trigger); -void omap_dm_timer_start(struct omap_dm_timer *timer) +int omap_dm_timer_start(struct omap_dm_timer *timer) { u32 l; + if (unlikely(!timer)) + return -EINVAL; + + omap_dm_timer_enable(timer); + + if (timer->loses_context) { + u32 ctx_loss_cnt_after = + timer->get_context_loss_count(&timer->pdev->dev); + if (ctx_loss_cnt_after != timer->ctx_loss_count) + omap_timer_restore_context(timer); + } + l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); if (!(l & OMAP_TIMER_CTRL_ST)) { l |= OMAP_TIMER_CTRL_ST; omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); } + + /* Save the context */ + timer->context.tclr = l; + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_start); -void omap_dm_timer_stop(struct omap_dm_timer *timer) +int omap_dm_timer_stop(struct omap_dm_timer *timer) { unsigned long rate = 0; + struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data; -#ifdef CONFIG_ARCH_OMAP2PLUS - rate = clk_get_rate(timer->fclk); -#endif + if (unlikely(!timer)) + return -EINVAL; + + if (!pdata->needs_manual_reset) + rate = clk_get_rate(timer->fclk); + + __omap_dm_timer_stop(timer, timer->posted, rate); - __omap_dm_timer_stop(timer->io_base, timer->posted, rate); + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_stop); -#ifdef CONFIG_ARCH_OMAP1 - int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) { - int n = (timer - dm_timers) << 1; - u32 l; + int ret; + struct dmtimer_platform_data *pdata; - l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); - l |= source << n; - omap_writel(l, MOD_CONF_CTRL_1); - - return 0; -} -EXPORT_SYMBOL_GPL(omap_dm_timer_set_source); + if (unlikely(!timer)) + return -EINVAL; -#else + pdata = timer->pdev->dev.platform_data; -int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) -{ if (source < 0 || source >= 3) return -EINVAL; - return __omap_dm_timer_set_source(timer->fclk, - dm_source_clocks[source]); + ret = pdata->set_timer_src(timer->pdev, source); + + return ret; } EXPORT_SYMBOL_GPL(omap_dm_timer_set_source); -#endif - -void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, +int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, unsigned int load) { u32 l; + if (unlikely(!timer)) + return -EINVAL; + + omap_dm_timer_enable(timer); l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); if (autoreload) l |= OMAP_TIMER_CTRL_AR; @@ -448,15 +398,32 @@ void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load); omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0); + /* Save the context */ + timer->context.tclr = l; + timer->context.tldr = load; + omap_dm_timer_disable(timer); + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_set_load); /* Optimized set_load which removes costly spin wait in timer_start */ -void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, +int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, unsigned int load) { u32 l; + if (unlikely(!timer)) + return -EINVAL; + + omap_dm_timer_enable(timer); + + if (timer->loses_context) { + u32 ctx_loss_cnt_after = + timer->get_context_loss_count(&timer->pdev->dev); + if (ctx_loss_cnt_after != timer->ctx_loss_count) + omap_timer_restore_context(timer); + } + l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); if (autoreload) { l |= OMAP_TIMER_CTRL_AR; @@ -466,15 +433,25 @@ void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, } l |= OMAP_TIMER_CTRL_ST; - __omap_dm_timer_load_start(timer->io_base, l, load, timer->posted); + __omap_dm_timer_load_start(timer, l, load, timer->posted); + + /* Save the context */ + timer->context.tclr = l; + timer->context.tldr = load; + timer->context.tcrr = load; + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start); -void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, +int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match) { u32 l; + if (unlikely(!timer)) + return -EINVAL; + + omap_dm_timer_enable(timer); l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); if (enable) l |= OMAP_TIMER_CTRL_CE; @@ -482,14 +459,24 @@ void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, l &= ~OMAP_TIMER_CTRL_CE; omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match); + + /* Save the context */ + timer->context.tclr = l; + timer->context.tmar = match; + omap_dm_timer_disable(timer); + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_set_match); -void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, +int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, int toggle, int trigger) { u32 l; + if (unlikely(!timer)) + return -EINVAL; + + omap_dm_timer_enable(timer); l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM | OMAP_TIMER_CTRL_PT | (0x03 << 10)); @@ -499,13 +486,22 @@ void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, l |= OMAP_TIMER_CTRL_PT; l |= trigger << 10; omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + + /* Save the context */ + timer->context.tclr = l; + omap_dm_timer_disable(timer); + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm); -void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler) +int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler) { u32 l; + if (unlikely(!timer)) + return -EINVAL; + + omap_dm_timer_enable(timer); l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2)); if (prescaler >= 0x00 && prescaler <= 0x07) { @@ -513,13 +509,28 @@ void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler) l |= prescaler << 2; } omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + + /* Save the context */ + timer->context.tclr = l; + omap_dm_timer_disable(timer); + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler); -void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, +int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value) { - __omap_dm_timer_int_enable(timer->io_base, value); + if (unlikely(!timer)) + return -EINVAL; + + omap_dm_timer_enable(timer); + __omap_dm_timer_int_enable(timer, value); + + /* Save the context */ + timer->context.tier = value; + timer->context.twer = value; + omap_dm_timer_disable(timer); + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable); @@ -527,40 +538,61 @@ unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer) { unsigned int l; - l = omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG); + if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { + pr_err("%s: timer not available or enabled.\n", __func__); + return 0; + } + + l = __raw_readl(timer->irq_stat); return l; } EXPORT_SYMBOL_GPL(omap_dm_timer_read_status); -void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value) +int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value) { - __omap_dm_timer_write_status(timer->io_base, value); + if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) + return -EINVAL; + + __omap_dm_timer_write_status(timer, value); + /* Save the context */ + timer->context.tisr = value; + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_write_status); unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer) { - return __omap_dm_timer_read_counter(timer->io_base, timer->posted); + if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { + pr_err("%s: timer not iavailable or enabled.\n", __func__); + return 0; + } + + return __omap_dm_timer_read_counter(timer, timer->posted); } EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter); -void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value) +int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value) { + if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { + pr_err("%s: timer not available or enabled.\n", __func__); + return -EINVAL; + } + omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value); + + /* Save the context */ + timer->context.tcrr = value; + return 0; } EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter); int omap_dm_timers_active(void) { - int i; - - for (i = 0; i < dm_timer_count; i++) { - struct omap_dm_timer *timer; - - timer = &dm_timers[i]; + struct omap_dm_timer *timer; - if (!timer->enabled) + list_for_each_entry(timer, &omap_timer_list, node) { + if (!timer->reserved) continue; if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) & @@ -572,69 +604,147 @@ int omap_dm_timers_active(void) } EXPORT_SYMBOL_GPL(omap_dm_timers_active); -static int __init omap_dm_timer_init(void) +/** + * omap_dm_timer_probe - probe function called for every registered device + * @pdev: pointer to current timer platform device + * + * Called by driver framework at the end of device registration for all + * timer devices. + */ +static int __devinit omap_dm_timer_probe(struct platform_device *pdev) { + int ret; + unsigned long flags; struct omap_dm_timer *timer; - int i, map_size = SZ_8K; /* Module 4KB + L4 4KB except on omap1 */ + struct resource *mem, *irq, *ioarea; + struct dmtimer_platform_data *pdata = pdev->dev.platform_data; - if (!(cpu_is_omap16xx() || cpu_class_is_omap2())) + if (!pdata) { + dev_err(&pdev->dev, "%s: no platform data.\n", __func__); return -ENODEV; + } - spin_lock_init(&dm_timer_lock); - - if (cpu_class_is_omap1()) { - dm_timers = omap1_dm_timers; - dm_timer_count = omap1_dm_timer_count; - map_size = SZ_2K; - } else if (cpu_is_omap24xx()) { - dm_timers = omap2_dm_timers; - dm_timer_count = omap2_dm_timer_count; - dm_source_names = omap2_dm_source_names; - dm_source_clocks = omap2_dm_source_clocks; - } else if (cpu_is_omap34xx()) { - dm_timers = omap3_dm_timers; - dm_timer_count = omap3_dm_timer_count; - dm_source_names = omap3_dm_source_names; - dm_source_clocks = omap3_dm_source_clocks; - } else if (cpu_is_omap44xx()) { - dm_timers = omap4_dm_timers; - dm_timer_count = omap4_dm_timer_count; - dm_source_names = omap4_dm_source_names; - dm_source_clocks = omap4_dm_source_clocks; + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (unlikely(!irq)) { + dev_err(&pdev->dev, "%s: no IRQ resource.\n", __func__); + return -ENODEV; } - if (cpu_class_is_omap2()) - for (i = 0; dm_source_names[i] != NULL; i++) - dm_source_clocks[i] = clk_get(NULL, dm_source_names[i]); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!mem)) { + dev_err(&pdev->dev, "%s: no memory resource.\n", __func__); + return -ENODEV; + } - if (cpu_is_omap243x()) - dm_timers[0].phys_base = 0x49018000; + ioarea = request_mem_region(mem->start, resource_size(mem), + pdev->name); + if (!ioarea) { + dev_err(&pdev->dev, "%s: region already claimed.\n", __func__); + return -EBUSY; + } - for (i = 0; i < dm_timer_count; i++) { - timer = &dm_timers[i]; + timer = kzalloc(sizeof(struct omap_dm_timer), GFP_KERNEL); + if (!timer) { + dev_err(&pdev->dev, "%s: no memory for omap_dm_timer.\n", + __func__); + ret = -ENOMEM; + goto err_free_ioregion; + } - /* Static mapping, never released */ - timer->io_base = ioremap(timer->phys_base, map_size); - BUG_ON(!timer->io_base); + timer->io_base = ioremap(mem->start, resource_size(mem)); + if (!timer->io_base) { + dev_err(&pdev->dev, "%s: ioremap failed.\n", __func__); + ret = -ENOMEM; + goto err_free_mem; + } -#ifdef CONFIG_ARCH_OMAP2PLUS - if (cpu_class_is_omap2()) { - char clk_name[16]; - sprintf(clk_name, "gpt%d_ick", i + 1); - timer->iclk = clk_get(NULL, clk_name); - sprintf(clk_name, "gpt%d_fck", i + 1); - timer->fclk = clk_get(NULL, clk_name); - } + timer->id = pdev->id; + timer->irq = irq->start; + timer->reserved = pdata->reserved; + timer->pdev = pdev; + timer->loses_context = pdata->loses_context; + timer->get_context_loss_count = pdata->get_context_loss_count; + + /* Skip pm_runtime_enable for OMAP1 */ + if (!pdata->needs_manual_reset) { + pm_runtime_enable(&pdev->dev); + pm_runtime_irq_safe(&pdev->dev); + } - /* One or two timers may be set up early for sys_timer */ - if (sys_timer_reserved & (1 << i)) { - timer->reserved = 1; - timer->posted = 1; - } -#endif + if (!timer->reserved) { + pm_runtime_get_sync(&pdev->dev); + __omap_dm_timer_init_regs(timer); + pm_runtime_put(&pdev->dev); } + /* add the timer element to the list */ + spin_lock_irqsave(&dm_timer_lock, flags); + list_add_tail(&timer->node, &omap_timer_list); + spin_unlock_irqrestore(&dm_timer_lock, flags); + + dev_dbg(&pdev->dev, "Device Probed.\n"); + return 0; + +err_free_mem: + kfree(timer); + +err_free_ioregion: + release_mem_region(mem->start, resource_size(mem)); + + return ret; } -arch_initcall(omap_dm_timer_init); +/** + * omap_dm_timer_remove - cleanup a registered timer device + * @pdev: pointer to current timer platform device + * + * Called by driver framework whenever a timer device is unregistered. + * In addition to freeing platform resources it also deletes the timer + * entry from the local list. + */ +static int __devexit omap_dm_timer_remove(struct platform_device *pdev) +{ + struct omap_dm_timer *timer; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dm_timer_lock, flags); + list_for_each_entry(timer, &omap_timer_list, node) + if (timer->pdev->id == pdev->id) { + list_del(&timer->node); + kfree(timer); + ret = 0; + break; + } + spin_unlock_irqrestore(&dm_timer_lock, flags); + + return ret; +} + +static struct platform_driver omap_dm_timer_driver = { + .probe = omap_dm_timer_probe, + .remove = __devexit_p(omap_dm_timer_remove), + .driver = { + .name = "omap_timer", + }, +}; + +static int __init omap_dm_timer_driver_init(void) +{ + return platform_driver_register(&omap_dm_timer_driver); +} + +static void __exit omap_dm_timer_driver_exit(void) +{ + platform_driver_unregister(&omap_dm_timer_driver); +} + +early_platform_init("earlytimer", &omap_dm_timer_driver); +module_init(omap_dm_timer_driver_init); +module_exit(omap_dm_timer_driver_exit); + +MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Texas Instruments Inc"); diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c index 3341ca4703e..db071bc71c4 100644 --- a/arch/arm/plat-omap/i2c.c +++ b/arch/arm/plat-omap/i2c.c @@ -108,6 +108,22 @@ static inline int omap1_i2c_add_bus(int bus_id) res[1].start = INT_I2C; pdata = &i2c_pdata[bus_id - 1]; + /* all OMAP1 have IP version 1 register set */ + pdata->rev = OMAP_I2C_IP_VERSION_1; + + /* all OMAP1 I2C are implemented like this */ + pdata->flags = OMAP_I2C_FLAG_NO_FIFO | + OMAP_I2C_FLAG_SIMPLE_CLOCK | + OMAP_I2C_FLAG_16BIT_DATA_REG | + OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK; + + /* how the cpu bus is wired up differs for 7xx only */ + + if (cpu_is_omap7xx()) + pdata->flags |= OMAP_I2C_FLAG_BUS_SHIFT_1; + else + pdata->flags |= OMAP_I2C_FLAG_BUS_SHIFT_2; + return platform_device_register(pdev); } @@ -123,21 +139,14 @@ static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t) omap_pm_set_max_mpu_wakeup_lat(dev, t); } -static struct omap_device_pm_latency omap_i2c_latency[] = { - [0] = { - .deactivate_func = omap_device_idle_hwmods, - .activate_func = omap_device_enable_hwmods, - .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, - }, -}; - static inline int omap2_i2c_add_bus(int bus_id) { int l; struct omap_hwmod *oh; - struct omap_device *od; + struct platform_device *pdev; char oh_name[MAX_OMAP_I2C_HWMOD_NAME_LEN]; struct omap_i2c_bus_platform_data *pdata; + struct omap_i2c_dev_attr *dev_attr; omap2_i2c_mux_pins(bus_id); @@ -152,6 +161,16 @@ static inline int omap2_i2c_add_bus(int bus_id) pdata = &i2c_pdata[bus_id - 1]; /* + * pass the hwmod class's CPU-specific knowledge of I2C IP revision in + * use, and functionality implementation flags, up to the OMAP I2C + * driver via platform data + */ + pdata->rev = oh->class->rev; + + dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr; + pdata->flags = dev_attr->flags; + + /* * When waiting for completion of a i2c transfer, we need to * set a wake up latency constraint for the MPU. This is to * ensure quick enough wakeup from idle, when transfer @@ -160,12 +179,12 @@ static inline int omap2_i2c_add_bus(int bus_id) */ if (cpu_is_omap34xx()) pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat; - od = omap_device_build(name, bus_id, oh, pdata, + pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(struct omap_i2c_bus_platform_data), - omap_i2c_latency, ARRAY_SIZE(omap_i2c_latency), 0); - WARN(IS_ERR(od), "Could not build omap_device for %s\n", name); + NULL, 0, 0); + WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name); - return PTR_ERR(od); + return PTR_RET(pdev); } #else static inline int omap2_i2c_add_bus(int bus_id) diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h index df4b9683f17..197ca03c3f7 100644 --- a/arch/arm/plat-omap/include/plat/clock.h +++ b/arch/arm/plat-omap/include/plat/clock.h @@ -80,8 +80,6 @@ struct clkops { * * @div is the divisor that should be applied to the parent clock's rate * to produce the current clock's rate. - * - * XXX @flags probably should be replaced with an struct omap_chip. */ struct clksel_rate { u32 val; diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h index 4564cc697d7..c50df4814f6 100644 --- a/arch/arm/plat-omap/include/plat/common.h +++ b/arch/arm/plat-omap/include/plat/common.h @@ -45,6 +45,18 @@ extern unsigned long long notrace omap_32k_sched_clock(void); extern void omap_reserve(void); +void omap2420_init_early(void); +void omap2430_init_early(void); +void omap3430_init_early(void); +void omap35xx_init_early(void); +void omap3630_init_early(void); +void omap3_init_early(void); /* Do not use this one */ +void am35xx_init_early(void); +void ti816x_init_early(void); +void omap4430_init_early(void); + +void omap_sram_init(void); + /* * IO bases for various OMAP processors * Except the tap base, rest all the io bases @@ -53,13 +65,13 @@ extern void omap_reserve(void); struct omap_globals { u32 class; /* OMAP class to detect */ void __iomem *tap; /* Control module ID code */ - unsigned long sdrc; /* SDRAM Controller */ - unsigned long sms; /* SDRAM Memory Scheduler */ - unsigned long ctrl; /* System Control Module */ - unsigned long ctrl_pad; /* PAD Control Module */ - unsigned long prm; /* Power and Reset Management */ - unsigned long cm; /* Clock Management */ - unsigned long cm2; + void __iomem *sdrc; /* SDRAM Controller */ + void __iomem *sms; /* SDRAM Memory Scheduler */ + void __iomem *ctrl; /* System Control Module */ + void __iomem *ctrl_pad; /* PAD Control Module */ + void __iomem *prm; /* Power and Reset Management */ + void __iomem *cm; /* Clock Management */ + void __iomem *cm2; }; void omap2_set_globals_242x(void); @@ -74,7 +86,11 @@ void omap2_set_globals_sdrc(struct omap_globals *); void omap2_set_globals_control(struct omap_globals *); void omap2_set_globals_prcm(struct omap_globals *); +void omap242x_map_io(void); +void omap243x_map_io(void); void omap3_map_io(void); +void omap4_map_io(void); + /** * omap_test_timeout - busy-loop, testing a condition diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h index 67b3d75884c..408a12f7920 100644 --- a/arch/arm/plat-omap/include/plat/cpu.h +++ b/arch/arm/plat-omap/include/plat/cpu.h @@ -44,13 +44,6 @@ int omap_type(void); -struct omap_chip_id { - u16 oc; - u8 type; -}; - -#define OMAP_CHIP_INIT(x) { .oc = x } - /* * omap_rev bits: * CPU id bits (0730, 1510, 1710, 2422...) [31:16] @@ -60,19 +53,6 @@ struct omap_chip_id { unsigned int omap_rev(void); /* - * Define CPU revision bits - * - * Verbose meaning of the revision bits may be different for a silicon - * family. This difference can be handled separately. - */ -#define OMAP_REVBITS_00 0x00 -#define OMAP_REVBITS_01 0x01 -#define OMAP_REVBITS_02 0x02 -#define OMAP_REVBITS_03 0x03 -#define OMAP_REVBITS_04 0x04 -#define OMAP_REVBITS_05 0x05 - -/* * Get the CPU revision for OMAP devices */ #define GET_OMAP_REVISION() ((omap_rev() >> 8) & 0xff) @@ -262,7 +242,7 @@ IS_OMAP_TYPE(2422, 0x2422) IS_OMAP_TYPE(2423, 0x2423) IS_OMAP_TYPE(2430, 0x2430) IS_OMAP_TYPE(3430, 0x3430) -IS_OMAP_TYPE(3505, 0x3505) +IS_OMAP_TYPE(3505, 0x3517) IS_OMAP_TYPE(3517, 0x3517) #define cpu_is_omap310() 0 @@ -354,8 +334,9 @@ IS_OMAP_TYPE(3517, 0x3517) (!omap3_has_sgx()) && \ (omap3_has_iva())) # define cpu_is_omap3530() (cpu_is_omap3430()) -# define cpu_is_omap3505() is_omap3505() # define cpu_is_omap3517() is_omap3517() +# define cpu_is_omap3505() (cpu_is_omap3517() && \ + !omap3_has_sgx()) # undef cpu_is_omap3630 # define cpu_is_omap3630() is_omap363x() # define cpu_is_ti816x() is_ti816x() @@ -379,35 +360,31 @@ IS_OMAP_TYPE(3517, 0x3517) /* Various silicon revisions for omap2 */ #define OMAP242X_CLASS 0x24200024 #define OMAP2420_REV_ES1_0 OMAP242X_CLASS -#define OMAP2420_REV_ES2_0 (OMAP242X_CLASS | (OMAP_REVBITS_01 << 8)) +#define OMAP2420_REV_ES2_0 (OMAP242X_CLASS | (0x1 << 8)) #define OMAP243X_CLASS 0x24300024 #define OMAP2430_REV_ES1_0 OMAP243X_CLASS #define OMAP343X_CLASS 0x34300034 #define OMAP3430_REV_ES1_0 OMAP343X_CLASS -#define OMAP3430_REV_ES2_0 (OMAP343X_CLASS | (OMAP_REVBITS_01 << 8)) -#define OMAP3430_REV_ES2_1 (OMAP343X_CLASS | (OMAP_REVBITS_02 << 8)) -#define OMAP3430_REV_ES3_0 (OMAP343X_CLASS | (OMAP_REVBITS_03 << 8)) -#define OMAP3430_REV_ES3_1 (OMAP343X_CLASS | (OMAP_REVBITS_04 << 8)) -#define OMAP3430_REV_ES3_1_2 (OMAP343X_CLASS | (OMAP_REVBITS_05 << 8)) +#define OMAP3430_REV_ES2_0 (OMAP343X_CLASS | (0x1 << 8)) +#define OMAP3430_REV_ES2_1 (OMAP343X_CLASS | (0x2 << 8)) +#define OMAP3430_REV_ES3_0 (OMAP343X_CLASS | (0x3 << 8)) +#define OMAP3430_REV_ES3_1 (OMAP343X_CLASS | (0x4 << 8)) +#define OMAP3430_REV_ES3_1_2 (OMAP343X_CLASS | (0x5 << 8)) #define OMAP363X_CLASS 0x36300034 #define OMAP3630_REV_ES1_0 OMAP363X_CLASS -#define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (OMAP_REVBITS_01 << 8)) -#define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (OMAP_REVBITS_02 << 8)) +#define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (0x1 << 8)) +#define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (0x2 << 8)) -#define OMAP35XX_CLASS 0x35000034 -#define OMAP3503_REV(v) (OMAP35XX_CLASS | (0x3503 << 16) | (v << 8)) -#define OMAP3515_REV(v) (OMAP35XX_CLASS | (0x3515 << 16) | (v << 8)) -#define OMAP3525_REV(v) (OMAP35XX_CLASS | (0x3525 << 16) | (v << 8)) -#define OMAP3530_REV(v) (OMAP35XX_CLASS | (0x3530 << 16) | (v << 8)) -#define OMAP3505_REV(v) (OMAP35XX_CLASS | (0x3505 << 16) | (v << 8)) -#define OMAP3517_REV(v) (OMAP35XX_CLASS | (0x3517 << 16) | (v << 8)) +#define OMAP3517_CLASS 0x35170034 +#define OMAP3517_REV_ES1_0 OMAP3517_CLASS +#define OMAP3517_REV_ES1_1 (OMAP3517_CLASS | (0x1 << 8)) #define TI816X_CLASS 0x81600034 #define TI8168_REV_ES1_0 TI816X_CLASS -#define TI8168_REV_ES1_1 (TI816X_CLASS | (OMAP_REVBITS_01 << 8)) +#define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8)) #define OMAP443X_CLASS 0x44300044 #define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8)) @@ -418,65 +395,17 @@ IS_OMAP_TYPE(3517, 0x3517) #define OMAP446X_CLASS 0x44600044 #define OMAP4460_REV_ES1_0 (OMAP446X_CLASS | (0x10 << 8)) -/* - * omap_chip bits - * - * CHIP_IS_OMAP{2420,2430,3430} indicate that a particular structure is - * valid on all chips of that type. CHIP_IS_OMAP3430ES{1,2} indicates - * something that is only valid on that particular ES revision. - * - * These bits may be ORed together to indicate structures that are - * available on multiple chip types. - * - * To test whether a particular structure matches the current OMAP chip type, - * use omap_chip_is(). - * - */ -#define CHIP_IS_OMAP2420 (1 << 0) -#define CHIP_IS_OMAP2430 (1 << 1) -#define CHIP_IS_OMAP3430 (1 << 2) -#define CHIP_IS_OMAP3430ES1 (1 << 3) -#define CHIP_IS_OMAP3430ES2 (1 << 4) -#define CHIP_IS_OMAP3430ES3_0 (1 << 5) -#define CHIP_IS_OMAP3430ES3_1 (1 << 6) -#define CHIP_IS_OMAP3630ES1 (1 << 7) -#define CHIP_IS_OMAP4430ES1 (1 << 8) -#define CHIP_IS_OMAP3630ES1_1 (1 << 9) -#define CHIP_IS_OMAP3630ES1_2 (1 << 10) -#define CHIP_IS_OMAP4430ES2 (1 << 11) -#define CHIP_IS_OMAP4430ES2_1 (1 << 12) -#define CHIP_IS_OMAP4430ES2_2 (1 << 13) -#define CHIP_IS_TI816X (1 << 14) -#define CHIP_IS_OMAP4460ES1_0 (1 << 15) - -#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430) - -#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \ - CHIP_IS_OMAP4430ES2 | \ - CHIP_IS_OMAP4430ES2_1 | \ - CHIP_IS_OMAP4430ES2_2 | \ - CHIP_IS_OMAP4460ES1_0) - -/* - * "GE" here represents "greater than or equal to" in terms of ES - * levels. So CHIP_GE_OMAP3430ES2 is intended to match all OMAP3430 - * chips at ES2 and beyond, but not, for example, any OMAP lines after - * OMAP3. - */ -#define CHIP_GE_OMAP3430ES2 (CHIP_IS_OMAP3430ES2 | \ - CHIP_IS_OMAP3430ES3_0 | \ - CHIP_GE_OMAP3430ES3_1) -#define CHIP_GE_OMAP3430ES3_1 (CHIP_IS_OMAP3430ES3_1 | \ - CHIP_IS_OMAP3630ES1 | \ - CHIP_GE_OMAP3630ES1_1) -#define CHIP_GE_OMAP3630ES1_1 (CHIP_IS_OMAP3630ES1_1 | \ - CHIP_IS_OMAP3630ES1_2) - -int omap_chip_is(struct omap_chip_id oci); void omap2_check_revision(void); /* * Runtime detection of OMAP3 features + * + * OMAP3_HAS_IO_CHAIN_CTRL: Some later members of the OMAP3 chip + * family have OS-level control over the I/O chain clock. This is + * to avoid a window during which wakeups could potentially be lost + * during powerdomain transitions. If this bit is set, it + * indicates that the chip does support OS-level control of this + * feature. */ extern u32 omap_features; @@ -488,9 +417,10 @@ extern u32 omap_features; #define OMAP3_HAS_192MHZ_CLK BIT(5) #define OMAP3_HAS_IO_WAKEUP BIT(6) #define OMAP3_HAS_SDRC BIT(7) -#define OMAP4_HAS_MPU_1GHZ BIT(8) -#define OMAP4_HAS_MPU_1_2GHZ BIT(9) -#define OMAP4_HAS_MPU_1_5GHZ BIT(10) +#define OMAP3_HAS_IO_CHAIN_CTRL BIT(8) +#define OMAP4_HAS_MPU_1GHZ BIT(9) +#define OMAP4_HAS_MPU_1_2GHZ BIT(10) +#define OMAP4_HAS_MPU_1_5GHZ BIT(11) #define OMAP3_HAS_FEATURE(feat,flag) \ @@ -507,12 +437,11 @@ OMAP3_HAS_FEATURE(isp, ISP) OMAP3_HAS_FEATURE(192mhz_clk, 192MHZ_CLK) OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP) OMAP3_HAS_FEATURE(sdrc, SDRC) +OMAP3_HAS_FEATURE(io_chain_ctrl, IO_CHAIN_CTRL) /* * Runtime detection of OMAP4 features */ -extern u32 omap_features; - #define OMAP4_HAS_FEATURE(feat, flag) \ static inline unsigned int omap4_has_ ##feat(void) \ { \ diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h index eb5d16c60cd..9418f00b6c3 100644 --- a/arch/arm/plat-omap/include/plat/dmtimer.h +++ b/arch/arm/plat-omap/include/plat/dmtimer.h @@ -1,5 +1,5 @@ /* - * arch/arm/plat-omap/include/mach/dmtimer.h + * arch/arm/plat-omap/include/plat/dmtimer.h * * OMAP Dual-Mode Timers * @@ -35,6 +35,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/platform_device.h> #ifndef __ASM_ARCH_DMTIMER_H #define __ASM_ARCH_DMTIMER_H @@ -59,12 +60,56 @@ * in OMAP4 can be distinguished. */ #define OMAP_TIMER_IP_VERSION_1 0x1 + +/* timer capabilities used in hwmod database */ +#define OMAP_TIMER_SECURE 0x80000000 +#define OMAP_TIMER_ALWON 0x40000000 +#define OMAP_TIMER_HAS_PWM 0x20000000 + +struct omap_timer_capability_dev_attr { + u32 timer_capability; +}; + struct omap_dm_timer; struct clk; +struct timer_regs { + u32 tidr; + u32 tiocp_cfg; + u32 tistat; + u32 tisr; + u32 tier; + u32 twer; + u32 tclr; + u32 tcrr; + u32 tldr; + u32 ttrg; + u32 twps; + u32 tmar; + u32 tcar1; + u32 tsicr; + u32 tcar2; + u32 tpir; + u32 tnir; + u32 tcvr; + u32 tocr; + u32 towr; +}; + +struct dmtimer_platform_data { + int (*set_timer_src)(struct platform_device *pdev, int source); + int timer_ip_version; + u32 needs_manual_reset:1; + bool reserved; + + bool loses_context; + + int (*get_context_loss_count)(struct device *dev); +}; + struct omap_dm_timer *omap_dm_timer_request(void); struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id); -void omap_dm_timer_free(struct omap_dm_timer *timer); +int omap_dm_timer_free(struct omap_dm_timer *timer); void omap_dm_timer_enable(struct omap_dm_timer *timer); void omap_dm_timer_disable(struct omap_dm_timer *timer); @@ -73,23 +118,23 @@ int omap_dm_timer_get_irq(struct omap_dm_timer *timer); u32 omap_dm_timer_modify_idlect_mask(u32 inputmask); struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer); -void omap_dm_timer_trigger(struct omap_dm_timer *timer); -void omap_dm_timer_start(struct omap_dm_timer *timer); -void omap_dm_timer_stop(struct omap_dm_timer *timer); +int omap_dm_timer_trigger(struct omap_dm_timer *timer); +int omap_dm_timer_start(struct omap_dm_timer *timer); +int omap_dm_timer_stop(struct omap_dm_timer *timer); int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source); -void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, unsigned int value); -void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, unsigned int value); -void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match); -void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, int toggle, int trigger); -void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler); +int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, unsigned int value); +int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, unsigned int value); +int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match); +int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, int toggle, int trigger); +int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler); -void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value); +int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value); unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer); -void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value); +int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value); unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer); -void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value); +int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value); int omap_dm_timers_active(void); @@ -98,12 +143,30 @@ int omap_dm_timers_active(void); * used by dmtimer.c and sys_timer related code. */ -/* register offsets */ -#define _OMAP_TIMER_ID_OFFSET 0x00 -#define _OMAP_TIMER_OCP_CFG_OFFSET 0x10 -#define _OMAP_TIMER_SYS_STAT_OFFSET 0x14 -#define _OMAP_TIMER_STAT_OFFSET 0x18 -#define _OMAP_TIMER_INT_EN_OFFSET 0x1c +/* + * The interrupt registers are different between v1 and v2 ip. + * These registers are offsets from timer->iobase. + */ +#define OMAP_TIMER_ID_OFFSET 0x00 +#define OMAP_TIMER_OCP_CFG_OFFSET 0x10 + +#define OMAP_TIMER_V1_SYS_STAT_OFFSET 0x14 +#define OMAP_TIMER_V1_STAT_OFFSET 0x18 +#define OMAP_TIMER_V1_INT_EN_OFFSET 0x1c + +#define OMAP_TIMER_V2_IRQSTATUS_RAW 0x24 +#define OMAP_TIMER_V2_IRQSTATUS 0x28 +#define OMAP_TIMER_V2_IRQENABLE_SET 0x2c +#define OMAP_TIMER_V2_IRQENABLE_CLR 0x30 + +/* + * The functional registers have a different base on v1 and v2 ip. + * These registers are offsets from timer->func_base. The func_base + * is samae as io_base for v1 and io_base + 0x14 for v2 ip. + * + */ +#define OMAP_TIMER_V2_FUNC_OFFSET 0x14 + #define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20 #define _OMAP_TIMER_CTRL_OFFSET 0x24 #define OMAP_TIMER_CTRL_GPOCFG (1 << 14) @@ -147,21 +210,6 @@ int omap_dm_timers_active(void); /* register offsets with the write pending bit encoded */ #define WPSHIFT 16 -#define OMAP_TIMER_ID_REG (_OMAP_TIMER_ID_OFFSET \ - | (WP_NONE << WPSHIFT)) - -#define OMAP_TIMER_OCP_CFG_REG (_OMAP_TIMER_OCP_CFG_OFFSET \ - | (WP_NONE << WPSHIFT)) - -#define OMAP_TIMER_SYS_STAT_REG (_OMAP_TIMER_SYS_STAT_OFFSET \ - | (WP_NONE << WPSHIFT)) - -#define OMAP_TIMER_STAT_REG (_OMAP_TIMER_STAT_OFFSET \ - | (WP_NONE << WPSHIFT)) - -#define OMAP_TIMER_INT_EN_REG (_OMAP_TIMER_INT_EN_OFFSET \ - | (WP_NONE << WPSHIFT)) - #define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \ | (WP_NONE << WPSHIFT)) @@ -209,49 +257,88 @@ int omap_dm_timers_active(void); struct omap_dm_timer { unsigned long phys_base; + int id; int irq; -#ifdef CONFIG_ARCH_OMAP2PLUS struct clk *iclk, *fclk; -#endif - void __iomem *io_base; + + void __iomem *io_base; + void __iomem *sys_stat; /* TISTAT timer status */ + void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */ + void __iomem *irq_ena; /* irq enable */ + void __iomem *irq_dis; /* irq disable, only on v2 ip */ + void __iomem *pend; /* write pending */ + void __iomem *func_base; /* function register base */ + unsigned long rate; unsigned reserved:1; - unsigned enabled:1; unsigned posted:1; + struct timer_regs context; + bool loses_context; + int ctx_loss_count; + int revision; + struct platform_device *pdev; + struct list_head node; + + int (*get_context_loss_count)(struct device *dev); }; -extern u32 sys_timer_reserved; -void omap_dm_timer_prepare(struct omap_dm_timer *timer); +int omap_dm_timer_prepare(struct omap_dm_timer *timer); -static inline u32 __omap_dm_timer_read(void __iomem *base, u32 reg, +static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, int posted) { if (posted) - while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff)) - & (reg >> WPSHIFT)) + while (__raw_readl(timer->pend) & (reg >> WPSHIFT)) cpu_relax(); - return __raw_readl(base + (reg & 0xff)); + return __raw_readl(timer->func_base + (reg & 0xff)); } -static inline void __omap_dm_timer_write(void __iomem *base, u32 reg, u32 val, - int posted) +static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, + u32 reg, u32 val, int posted) { if (posted) - while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff)) - & (reg >> WPSHIFT)) + while (__raw_readl(timer->pend) & (reg >> WPSHIFT)) cpu_relax(); - __raw_writel(val, base + (reg & 0xff)); + __raw_writel(val, timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) +{ + u32 tidr; + + /* Assume v1 ip if bits [31:16] are zero */ + tidr = __raw_readl(timer->io_base); + if (!(tidr >> 16)) { + timer->revision = 1; + timer->sys_stat = timer->io_base + + OMAP_TIMER_V1_SYS_STAT_OFFSET; + timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; + timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->irq_dis = 0; + timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; + timer->func_base = timer->io_base; + } else { + timer->revision = 2; + timer->sys_stat = 0; + timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; + timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; + timer->pend = timer->io_base + + _OMAP_TIMER_WRITE_PEND_OFFSET + + OMAP_TIMER_V2_FUNC_OFFSET; + timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; + } } /* Assumes the source clock has been set by caller */ -static inline void __omap_dm_timer_reset(void __iomem *base, int autoidle, - int wakeup) +static inline void __omap_dm_timer_reset(struct omap_dm_timer *timer, + int autoidle, int wakeup) { u32 l; - l = __omap_dm_timer_read(base, OMAP_TIMER_OCP_CFG_REG, 0); + l = __raw_readl(timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET); l |= 0x02 << 3; /* Set to smart-idle mode */ l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */ @@ -261,10 +348,10 @@ static inline void __omap_dm_timer_reset(void __iomem *base, int autoidle, if (wakeup) l |= 1 << 2; - __omap_dm_timer_write(base, OMAP_TIMER_OCP_CFG_REG, l, 0); + __raw_writel(l, timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET); /* Match hardware reset default of posted mode */ - __omap_dm_timer_write(base, OMAP_TIMER_IF_CTRL_REG, + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED, 0); } @@ -286,18 +373,18 @@ static inline int __omap_dm_timer_set_source(struct clk *timer_fck, return ret; } -static inline void __omap_dm_timer_stop(void __iomem *base, int posted, - unsigned long rate) +static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, + int posted, unsigned long rate) { u32 l; - l = __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted); + l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); if (l & OMAP_TIMER_CTRL_ST) { l &= ~0x1; - __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, l, posted); + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); #ifdef CONFIG_ARCH_OMAP2PLUS /* Readback to make sure write has completed */ - __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted); + __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); /* * Wait for functional clock period x 3.5 to make sure that * timer is stopped @@ -307,34 +394,34 @@ static inline void __omap_dm_timer_stop(void __iomem *base, int posted, } /* Ack possibly pending interrupt */ - __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG, - OMAP_TIMER_INT_OVERFLOW, 0); + __raw_writel(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); } -static inline void __omap_dm_timer_load_start(void __iomem *base, u32 ctrl, - unsigned int load, int posted) +static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer, + u32 ctrl, unsigned int load, + int posted) { - __omap_dm_timer_write(base, OMAP_TIMER_COUNTER_REG, load, posted); - __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, ctrl, posted); + __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted); + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted); } -static inline void __omap_dm_timer_int_enable(void __iomem *base, +static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, unsigned int value) { - __omap_dm_timer_write(base, OMAP_TIMER_INT_EN_REG, value, 0); - __omap_dm_timer_write(base, OMAP_TIMER_WAKEUP_EN_REG, value, 0); + __raw_writel(value, timer->irq_ena); + __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); } -static inline unsigned int __omap_dm_timer_read_counter(void __iomem *base, - int posted) +static inline unsigned int +__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) { - return __omap_dm_timer_read(base, OMAP_TIMER_COUNTER_REG, posted); + return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); } -static inline void __omap_dm_timer_write_status(void __iomem *base, +static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value) { - __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG, value, 0); + __raw_writel(value, timer->irq_stat); } #endif /* __ASM_ARCH_DMTIMER_H */ diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h index 91e8de3db08..9e86ee0aed0 100644 --- a/arch/arm/plat-omap/include/plat/gpio.h +++ b/arch/arm/plat-omap/include/plat/gpio.h @@ -222,26 +222,6 @@ extern void omap_gpio_restore_context(void); #include <linux/errno.h> #include <asm-generic/gpio.h> -static inline int gpio_get_value(unsigned gpio) -{ - return __gpio_get_value(gpio); -} - -static inline void gpio_set_value(unsigned gpio, int value) -{ - __gpio_set_value(gpio, value); -} - -static inline int gpio_cansleep(unsigned gpio) -{ - return __gpio_cansleep(gpio); -} - -static inline int gpio_to_irq(unsigned gpio) -{ - return __gpio_to_irq(gpio); -} - static inline int irq_to_gpio(unsigned irq) { int tmp; diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h index d72ec85c97e..7f2969eadb8 100644 --- a/arch/arm/plat-omap/include/plat/io.h +++ b/arch/arm/plat-omap/include/plat/io.h @@ -228,13 +228,13 @@ #define OMAP44XX_EMIF2_PHYS OMAP44XX_EMIF2_BASE /* 0x4d000000 --> 0xfd200000 */ -#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF2_PHYS + OMAP4_L3_PER_IO_OFFSET) #define OMAP44XX_EMIF2_SIZE SZ_1M +#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF1_VIRT + OMAP44XX_EMIF1_SIZE) #define OMAP44XX_DMM_PHYS OMAP44XX_DMM_BASE /* 0x4e000000 --> 0xfd300000 */ -#define OMAP44XX_DMM_VIRT (OMAP44XX_DMM_PHYS + OMAP4_L3_PER_IO_OFFSET) #define OMAP44XX_DMM_SIZE SZ_1M +#define OMAP44XX_DMM_VIRT (OMAP44XX_EMIF2_VIRT + OMAP44XX_EMIF2_SIZE) /* * ---------------------------------------------------------------------------- * Omap specific register access @@ -247,6 +247,8 @@ * NOTE: Please use ioremap + __raw_read/write where possible instead of these */ +void omap_ioremap_init(void); + extern u8 omap_readb(u32 pa); extern u16 omap_readw(u32 pa); extern u32 omap_readl(u32 pa); @@ -256,8 +258,31 @@ extern void omap_writel(u32 v, u32 pa); struct omap_sdrc_params; -extern void omap1_map_common_io(void); -extern void omap1_init_common_hw(void); +#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) +void omap7xx_map_io(void); +#else +static inline void omap_map_io(void) +{ +} +#endif + +#ifdef CONFIG_ARCH_OMAP15XX +void omap15xx_map_io(void); +#else +static inline void omap15xx_map_io(void) +{ +} +#endif + +#ifdef CONFIG_ARCH_OMAP16XX +void omap16xx_map_io(void); +#else +static inline void omap16xx_map_io(void) +{ +} +#endif + +void omap1_init_early(void); #ifdef CONFIG_SOC_OMAP2420 extern void omap242x_map_common_io(void); @@ -300,7 +325,7 @@ static inline void omap44xx_map_common_io(void) #endif extern void omap2_init_common_infrastructure(void); -extern void omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0, +extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1); #define __arch_ioremap omap_ioremap @@ -309,6 +334,8 @@ extern void omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0, void __iomem *omap_ioremap(unsigned long phys, size_t size, unsigned int type); void omap_iounmap(volatile void __iomem *addr); +extern void __init omap_init_consistent_dma_size(void); + #endif #endif diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h index 174f1b9c8c0..a1d79ee1925 100644 --- a/arch/arm/plat-omap/include/plat/iommu.h +++ b/arch/arm/plat-omap/include/plat/iommu.h @@ -25,16 +25,17 @@ struct iotlb_entry { }; }; -struct iommu { +struct omap_iommu { const char *name; struct module *owner; struct clk *clk; void __iomem *regbase; struct device *dev; void *isr_priv; + struct iommu_domain *domain; unsigned int refcount; - struct mutex iommu_lock; /* global for this whole object */ + spinlock_t iommu_lock; /* global for this whole object */ /* * We don't change iopgd for a situation like pgd for a task, @@ -48,8 +49,6 @@ struct iommu { struct list_head mmap; struct mutex mmap_lock; /* protect mmap */ - int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv); - void *ctx; /* iommu context: registres saved area */ u32 da_start; u32 da_end; @@ -81,25 +80,27 @@ struct iotlb_lock { struct iommu_functions { unsigned long version; - int (*enable)(struct iommu *obj); - void (*disable)(struct iommu *obj); - void (*set_twl)(struct iommu *obj, bool on); - u32 (*fault_isr)(struct iommu *obj, u32 *ra); + int (*enable)(struct omap_iommu *obj); + void (*disable)(struct omap_iommu *obj); + void (*set_twl)(struct omap_iommu *obj, bool on); + u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); - void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr); - void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr); + void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); + void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); - struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e); + struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, + struct iotlb_entry *e); int (*cr_valid)(struct cr_regs *cr); u32 (*cr_to_virt)(struct cr_regs *cr); void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); - ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf); + ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, + char *buf); u32 (*get_pte_attr)(struct iotlb_entry *e); - void (*save_ctx)(struct iommu *obj); - void (*restore_ctx)(struct iommu *obj); - ssize_t (*dump_ctx)(struct iommu *obj, char *buf, ssize_t len); + void (*save_ctx)(struct omap_iommu *obj); + void (*restore_ctx)(struct omap_iommu *obj); + ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); }; struct iommu_platform_data { @@ -150,40 +151,31 @@ struct iommu_platform_data { /* * global functions */ -extern u32 iommu_arch_version(void); - -extern void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); -extern u32 iotlb_cr_to_virt(struct cr_regs *cr); - -extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e); -extern void iommu_set_twl(struct iommu *obj, bool on); -extern void flush_iotlb_page(struct iommu *obj, u32 da); -extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end); -extern void flush_iotlb_all(struct iommu *obj); - -extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); -extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, - u32 **ppte); -extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); - -extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); -extern struct iommu *iommu_get(const char *name); -extern void iommu_put(struct iommu *obj); -extern int iommu_set_isr(const char *name, - int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, +extern u32 omap_iommu_arch_version(void); + +extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); + +extern int +omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); + +extern int omap_iommu_set_isr(const char *name, + int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs, void *priv), void *isr_priv); -extern void iommu_save_ctx(struct iommu *obj); -extern void iommu_restore_ctx(struct iommu *obj); +extern void omap_iommu_save_ctx(struct omap_iommu *obj); +extern void omap_iommu_restore_ctx(struct omap_iommu *obj); -extern int install_iommu_arch(const struct iommu_functions *ops); -extern void uninstall_iommu_arch(const struct iommu_functions *ops); +extern int omap_install_iommu_arch(const struct iommu_functions *ops); +extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); -extern int foreach_iommu_device(void *data, +extern int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)); -extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); -extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); +extern ssize_t +omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); +extern size_t +omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); +struct device *omap_find_iommu_device(const char *name); #endif /* __MACH_IOMMU_H */ diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h index 10ad05f410e..d4116b595e4 100644 --- a/arch/arm/plat-omap/include/plat/iommu2.h +++ b/arch/arm/plat-omap/include/plat/iommu2.h @@ -83,12 +83,12 @@ /* * register accessors */ -static inline u32 iommu_read_reg(struct iommu *obj, size_t offs) +static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs) { return __raw_readl(obj->regbase + offs); } -static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs) +static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) { __raw_writel(val, obj->regbase + offs); } diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/include/plat/iopgtable.h index c3e93bb0911..66a813977d5 100644 --- a/arch/arm/plat-omap/iopgtable.h +++ b/arch/arm/plat-omap/include/plat/iopgtable.h @@ -56,6 +56,19 @@ #define IOPAGE_MASK IOPTE_MASK +/** + * omap_iommu_translate() - va to pa translation + * @d: omap iommu descriptor + * @va: virtual address + * @mask: omap iommu descriptor mask + * + * va to pa translation + */ +static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) +{ + return (d & mask) | (va & (~mask)); +} + /* * some descriptor attributes. */ @@ -64,10 +77,15 @@ #define IOPGD_SUPER (1 << 18 | 2 << 0) #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) +#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION) +#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER) #define IOPTE_SMALL (2 << 0) #define IOPTE_LARGE (1 << 0) +#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL) +#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE) + /* to find an entry in a page-table-directory */ #define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) @@ -97,6 +115,6 @@ static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, } #define to_iommu(dev) \ - (struct iommu *)platform_get_drvdata(to_platform_device(dev)) + (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) #endif /* __PLAT_OMAP_IOMMU_H */ diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h index e992b9655fb..6af1a91c0f3 100644 --- a/arch/arm/plat-omap/include/plat/iovmm.h +++ b/arch/arm/plat-omap/include/plat/iovmm.h @@ -13,8 +13,10 @@ #ifndef __IOMMU_MMAP_H #define __IOMMU_MMAP_H +#include <linux/iommu.h> + struct iovm_struct { - struct iommu *iommu; /* iommu object which this belongs to */ + struct omap_iommu *iommu; /* iommu object which this belongs to */ u32 da_start; /* area definition */ u32 da_end; u32 flags; /* IOVMF_: see below */ @@ -70,20 +72,18 @@ struct iovm_struct { #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) -extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); -extern u32 iommu_vmap(struct iommu *obj, u32 da, +extern struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da); +extern u32 +omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, u32 flags); -extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); -extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, - u32 flags); -extern void iommu_vfree(struct iommu *obj, const u32 da); -extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, - u32 flags); -extern void iommu_kunmap(struct iommu *obj, u32 da); -extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, - u32 flags); -extern void iommu_kfree(struct iommu *obj, u32 da); - -extern void *da_to_va(struct iommu *obj, u32 da); +extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain, + struct omap_iommu *obj, u32 da); +extern u32 +omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, + u32 da, size_t bytes, u32 flags); +extern void +omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, + const u32 da); +extern void *omap_da_to_va(struct omap_iommu *obj, u32 da); #endif /* __IOMMU_MMAP_H */ diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h index 9882c657b2d..8fa74e2c9d6 100644 --- a/arch/arm/plat-omap/include/plat/mcbsp.h +++ b/arch/arm/plat-omap/include/plat/mcbsp.h @@ -25,9 +25,7 @@ #define __ASM_ARCH_OMAP_MCBSP_H #include <linux/spinlock.h> - -#include <mach/hardware.h> -#include <plat/clock.h> +#include <linux/clk.h> /* macro for building platform_device for McBSP ports */ #define OMAP_MCBSP_PLATFORM_DEVICE(port_nr) \ @@ -40,104 +38,60 @@ static struct platform_device omap_mcbsp##port_nr = { \ #define MCBSP_CONFIG_TYPE3 0x3 #define MCBSP_CONFIG_TYPE4 0x4 -#define OMAP7XX_MCBSP1_BASE 0xfffb1000 -#define OMAP7XX_MCBSP2_BASE 0xfffb1800 - -#define OMAP1510_MCBSP1_BASE 0xe1011800 -#define OMAP1510_MCBSP2_BASE 0xfffb1000 -#define OMAP1510_MCBSP3_BASE 0xe1017000 - -#define OMAP1610_MCBSP1_BASE 0xe1011800 -#define OMAP1610_MCBSP2_BASE 0xfffb1000 -#define OMAP1610_MCBSP3_BASE 0xe1017000 - -#ifdef CONFIG_ARCH_OMAP1 - -#define OMAP_MCBSP_REG_DRR2 0x00 -#define OMAP_MCBSP_REG_DRR1 0x02 -#define OMAP_MCBSP_REG_DXR2 0x04 -#define OMAP_MCBSP_REG_DXR1 0x06 -#define OMAP_MCBSP_REG_DRR 0x02 -#define OMAP_MCBSP_REG_DXR 0x06 -#define OMAP_MCBSP_REG_SPCR2 0x08 -#define OMAP_MCBSP_REG_SPCR1 0x0a -#define OMAP_MCBSP_REG_RCR2 0x0c -#define OMAP_MCBSP_REG_RCR1 0x0e -#define OMAP_MCBSP_REG_XCR2 0x10 -#define OMAP_MCBSP_REG_XCR1 0x12 -#define OMAP_MCBSP_REG_SRGR2 0x14 -#define OMAP_MCBSP_REG_SRGR1 0x16 -#define OMAP_MCBSP_REG_MCR2 0x18 -#define OMAP_MCBSP_REG_MCR1 0x1a -#define OMAP_MCBSP_REG_RCERA 0x1c -#define OMAP_MCBSP_REG_RCERB 0x1e -#define OMAP_MCBSP_REG_XCERA 0x20 -#define OMAP_MCBSP_REG_XCERB 0x22 -#define OMAP_MCBSP_REG_PCR0 0x24 -#define OMAP_MCBSP_REG_RCERC 0x26 -#define OMAP_MCBSP_REG_RCERD 0x28 -#define OMAP_MCBSP_REG_XCERC 0x2A -#define OMAP_MCBSP_REG_XCERD 0x2C -#define OMAP_MCBSP_REG_RCERE 0x2E -#define OMAP_MCBSP_REG_RCERF 0x30 -#define OMAP_MCBSP_REG_XCERE 0x32 -#define OMAP_MCBSP_REG_XCERF 0x34 -#define OMAP_MCBSP_REG_RCERG 0x36 -#define OMAP_MCBSP_REG_RCERH 0x38 -#define OMAP_MCBSP_REG_XCERG 0x3A -#define OMAP_MCBSP_REG_XCERH 0x3C - -/* Dummy defines, these are not available on omap1 */ -#define OMAP_MCBSP_REG_XCCR 0x00 -#define OMAP_MCBSP_REG_RCCR 0x00 - -#else - -#define OMAP_MCBSP_REG_DRR2 0x00 -#define OMAP_MCBSP_REG_DRR1 0x04 -#define OMAP_MCBSP_REG_DXR2 0x08 -#define OMAP_MCBSP_REG_DXR1 0x0C -#define OMAP_MCBSP_REG_DRR 0x00 -#define OMAP_MCBSP_REG_DXR 0x08 -#define OMAP_MCBSP_REG_SPCR2 0x10 -#define OMAP_MCBSP_REG_SPCR1 0x14 -#define OMAP_MCBSP_REG_RCR2 0x18 -#define OMAP_MCBSP_REG_RCR1 0x1C -#define OMAP_MCBSP_REG_XCR2 0x20 -#define OMAP_MCBSP_REG_XCR1 0x24 -#define OMAP_MCBSP_REG_SRGR2 0x28 -#define OMAP_MCBSP_REG_SRGR1 0x2C -#define OMAP_MCBSP_REG_MCR2 0x30 -#define OMAP_MCBSP_REG_MCR1 0x34 -#define OMAP_MCBSP_REG_RCERA 0x38 -#define OMAP_MCBSP_REG_RCERB 0x3C -#define OMAP_MCBSP_REG_XCERA 0x40 -#define OMAP_MCBSP_REG_XCERB 0x44 -#define OMAP_MCBSP_REG_PCR0 0x48 -#define OMAP_MCBSP_REG_RCERC 0x4C -#define OMAP_MCBSP_REG_RCERD 0x50 -#define OMAP_MCBSP_REG_XCERC 0x54 -#define OMAP_MCBSP_REG_XCERD 0x58 -#define OMAP_MCBSP_REG_RCERE 0x5C -#define OMAP_MCBSP_REG_RCERF 0x60 -#define OMAP_MCBSP_REG_XCERE 0x64 -#define OMAP_MCBSP_REG_XCERF 0x68 -#define OMAP_MCBSP_REG_RCERG 0x6C -#define OMAP_MCBSP_REG_RCERH 0x70 -#define OMAP_MCBSP_REG_XCERG 0x74 -#define OMAP_MCBSP_REG_XCERH 0x78 -#define OMAP_MCBSP_REG_SYSCON 0x8C -#define OMAP_MCBSP_REG_THRSH2 0x90 -#define OMAP_MCBSP_REG_THRSH1 0x94 -#define OMAP_MCBSP_REG_IRQST 0xA0 -#define OMAP_MCBSP_REG_IRQEN 0xA4 -#define OMAP_MCBSP_REG_WAKEUPEN 0xA8 -#define OMAP_MCBSP_REG_XCCR 0xAC -#define OMAP_MCBSP_REG_RCCR 0xB0 -#define OMAP_MCBSP_REG_XBUFFSTAT 0xB4 -#define OMAP_MCBSP_REG_RBUFFSTAT 0xB8 -#define OMAP_MCBSP_REG_SSELCR 0xBC +/* McBSP register numbers. Register address offset = num * reg_step */ +enum { + /* Common registers */ + OMAP_MCBSP_REG_SPCR2 = 4, + OMAP_MCBSP_REG_SPCR1, + OMAP_MCBSP_REG_RCR2, + OMAP_MCBSP_REG_RCR1, + OMAP_MCBSP_REG_XCR2, + OMAP_MCBSP_REG_XCR1, + OMAP_MCBSP_REG_SRGR2, + OMAP_MCBSP_REG_SRGR1, + OMAP_MCBSP_REG_MCR2, + OMAP_MCBSP_REG_MCR1, + OMAP_MCBSP_REG_RCERA, + OMAP_MCBSP_REG_RCERB, + OMAP_MCBSP_REG_XCERA, + OMAP_MCBSP_REG_XCERB, + OMAP_MCBSP_REG_PCR0, + OMAP_MCBSP_REG_RCERC, + OMAP_MCBSP_REG_RCERD, + OMAP_MCBSP_REG_XCERC, + OMAP_MCBSP_REG_XCERD, + OMAP_MCBSP_REG_RCERE, + OMAP_MCBSP_REG_RCERF, + OMAP_MCBSP_REG_XCERE, + OMAP_MCBSP_REG_XCERF, + OMAP_MCBSP_REG_RCERG, + OMAP_MCBSP_REG_RCERH, + OMAP_MCBSP_REG_XCERG, + OMAP_MCBSP_REG_XCERH, + + /* OMAP1-OMAP2420 registers */ + OMAP_MCBSP_REG_DRR2 = 0, + OMAP_MCBSP_REG_DRR1, + OMAP_MCBSP_REG_DXR2, + OMAP_MCBSP_REG_DXR1, + + /* OMAP2430 and onwards */ + OMAP_MCBSP_REG_DRR = 0, + OMAP_MCBSP_REG_DXR = 2, + OMAP_MCBSP_REG_SYSCON = 35, + OMAP_MCBSP_REG_THRSH2, + OMAP_MCBSP_REG_THRSH1, + OMAP_MCBSP_REG_IRQST = 40, + OMAP_MCBSP_REG_IRQEN, + OMAP_MCBSP_REG_WAKEUPEN, + OMAP_MCBSP_REG_XCCR, + OMAP_MCBSP_REG_RCCR, + OMAP_MCBSP_REG_XBUFFSTAT, + OMAP_MCBSP_REG_RBUFFSTAT, + OMAP_MCBSP_REG_SSELCR, +}; +/* OMAP3 sidetone control registers */ #define OMAP_ST_REG_REV 0x00 #define OMAP_ST_REG_SYSCONFIG 0x10 #define OMAP_ST_REG_IRQSTATUS 0x18 @@ -146,8 +100,6 @@ static struct platform_device omap_mcbsp##port_nr = { \ #define OMAP_ST_REG_SFIRCR 0x28 #define OMAP_ST_REG_SSELCR 0x2C -#endif - /************************** McBSP SPCR1 bit definitions ***********************/ #define RRST 0x0001 #define RRDY 0x0002 @@ -344,20 +296,20 @@ typedef enum { struct omap_mcbsp_ops { void (*request)(unsigned int); void (*free)(unsigned int); - int (*set_clks_src)(u8, u8); }; struct omap_mcbsp_platform_data { - unsigned long phys_base; - u8 dma_rx_sync, dma_tx_sync; - u16 rx_irq, tx_irq; struct omap_mcbsp_ops *ops; -#ifdef CONFIG_ARCH_OMAP3 - /* Sidetone block for McBSP 2 and 3 */ - unsigned long phys_base_st; -#endif u16 buffer_size; - unsigned int mcbsp_config_type; + u8 reg_size; + u8 reg_step; + + /* McBSP platform and instance specific features */ + bool has_wakeup; /* Wakeup capability */ + bool has_ccr; /* Transceiver has configuration control registers */ + int (*enable_st_clock)(unsigned int, bool); + int (*set_clk_src)(struct device *dev, struct clk *clk, const char *src); + int (*mux_signal)(struct device *dev, const char *signal, const char *src); }; struct omap_mcbsp_st_data { @@ -389,14 +341,12 @@ struct omap_mcbsp { spinlock_t lock; struct omap_mcbsp_platform_data *pdata; struct clk *fclk; -#ifdef CONFIG_ARCH_OMAP3 struct omap_mcbsp_st_data *st_data; int dma_op_mode; u16 max_tx_thres; u16 max_rx_thres; -#endif void *reg_cache; - unsigned int mcbsp_config_type; + int reg_cache_size; }; /** @@ -408,16 +358,10 @@ struct omap_mcbsp_dev_attr { }; extern struct omap_mcbsp **mcbsp_ptr; -extern int omap_mcbsp_count, omap_mcbsp_cache_size; - -#define omap_mcbsp_check_valid_id(id) (id < omap_mcbsp_count) -#define id_to_mcbsp_ptr(id) mcbsp_ptr[id]; +extern int omap_mcbsp_count; int omap_mcbsp_init(void); -void omap_mcbsp_register_board_cfg(struct resource *res, int res_count, - struct omap_mcbsp_platform_data *config, int size); void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); -#ifdef CONFIG_ARCH_OMAP3 void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold); void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold); u16 omap_mcbsp_get_max_tx_threshold(unsigned int id); @@ -426,18 +370,6 @@ u16 omap_mcbsp_get_fifo_size(unsigned int id); u16 omap_mcbsp_get_tx_delay(unsigned int id); u16 omap_mcbsp_get_rx_delay(unsigned int id); int omap_mcbsp_get_dma_op_mode(unsigned int id); -#else -static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) -{ } -static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) -{ } -static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; } -static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; } -static inline u16 omap_mcbsp_get_fifo_size(unsigned int id) { return 0; } -static inline u16 omap_mcbsp_get_tx_delay(unsigned int id) { return 0; } -static inline u16 omap_mcbsp_get_rx_delay(unsigned int id) { return 0; } -static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; } -#endif int omap_mcbsp_request(unsigned int id); void omap_mcbsp_free(unsigned int id); void omap_mcbsp_start(unsigned int id, int tx, int rx); @@ -453,21 +385,11 @@ void omap2_mcbsp1_mux_fsr_src(u8 mux); int omap_mcbsp_dma_ch_params(unsigned int id, unsigned int stream); int omap_mcbsp_dma_reg_params(unsigned int id, unsigned int stream); -#ifdef CONFIG_ARCH_OMAP3 /* Sidetone specific API */ int omap_st_set_chgain(unsigned int id, int channel, s16 chgain); int omap_st_get_chgain(unsigned int id, int channel, s16 *chgain); int omap_st_enable(unsigned int id); int omap_st_disable(unsigned int id); int omap_st_is_enabled(unsigned int id); -#else -static inline int omap_st_set_chgain(unsigned int id, int channel, - s16 chgain) { return 0; } -static inline int omap_st_get_chgain(unsigned int id, int channel, - s16 *chgain) { return 0; } -static inline int omap_st_enable(unsigned int id) { return 0; } -static inline int omap_st_disable(unsigned int id) { return 0; } -static inline int omap_st_is_enabled(unsigned int id) { return 0; } -#endif #endif diff --git a/arch/arm/plat-omap/include/plat/memory.h b/arch/arm/plat-omap/include/plat/memory.h deleted file mode 100644 index e6720aa2d55..00000000000 --- a/arch/arm/plat-omap/include/plat/memory.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * arch/arm/plat-omap/include/mach/memory.h - * - * Memory map for OMAP-1510 and 1610 - * - * Copyright (C) 2000 RidgeRun, Inc. - * Author: Greg Lonnon <glonnon@ridgerun.com> - * - * This file was derived from arch/arm/mach-intergrator/include/mach/memory.h - * Copyright (C) 1999 ARM Limited - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __ASM_ARCH_MEMORY_H -#define __ASM_ARCH_MEMORY_H - -/* - * Physical DRAM offset. - */ -#if defined(CONFIG_ARCH_OMAP1) -#define PLAT_PHYS_OFFSET UL(0x10000000) -#else -#define PLAT_PHYS_OFFSET UL(0x80000000) -#endif - -/* - * Bus address is physical address, except for OMAP-1510 Local Bus. - * OMAP-1510 bus address is translated into a Local Bus address if the - * OMAP bus type is lbus. We do the address translation based on the - * device overriding the defaults used in the dma-mapping API. - * Note that the is_lbus_device() test is not very efficient on 1510 - * because of the strncmp(). - */ -#ifdef CONFIG_ARCH_OMAP15XX - -/* - * OMAP-1510 Local Bus address offset - */ -#define OMAP1510_LB_OFFSET UL(0x30000000) - -#define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET) -#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET) -#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0)) - -#define __arch_pfn_to_dma(dev, pfn) \ - ({ dma_addr_t __dma = __pfn_to_phys(pfn); \ - if (is_lbus_device(dev)) \ - __dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \ - __dma; }) - -#define __arch_dma_to_pfn(dev, addr) \ - ({ dma_addr_t __dma = addr; \ - if (is_lbus_device(dev)) \ - __dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \ - __phys_to_pfn(__dma); \ - }) - -#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \ - lbus_to_virt(addr) : \ - __phys_to_virt(addr)); }) - -#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \ - (dma_addr_t) (is_lbus_device(dev) ? \ - virt_to_lbus(__addr) : \ - __virt_to_phys(__addr)); }) - -#endif /* CONFIG_ARCH_OMAP15XX */ - -/* Override the ARM default */ -#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE - -#if (CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE == 0) -#undef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE -#define CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE 2 -#endif - -#define CONSISTENT_DMA_SIZE \ - (((CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE + 1) & ~1) * 1024 * 1024) - -#endif - -#endif - diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h index c7b874186c2..94cf70afb23 100644 --- a/arch/arm/plat-omap/include/plat/mmc.h +++ b/arch/arm/plat-omap/include/plat/mmc.h @@ -31,7 +31,24 @@ #define OMAP_MMC_MAX_SLOTS 2 -#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(1) +/* + * struct omap_mmc_dev_attr.flags possibilities + * + * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can + * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag + * should be set if this is the case. See for example Section 22.5.3 + * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia + * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). + * + * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers + * don't work correctly on some MMC controller instances on some + * OMAP3 SoCs; this flag should be set if this is the case. See + * for example Advisory 2.1.1.128 "MMC: Multiple Block Read + * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ + * Revision F (October 2010) (SPRZ278F). + */ +#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) +#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) struct omap_mmc_dev_attr { u8 flags; diff --git a/arch/arm/plat-omap/include/plat/omap-alsa.h b/arch/arm/plat-omap/include/plat/omap-alsa.h deleted file mode 100644 index b53055b390d..00000000000 --- a/arch/arm/plat-omap/include/plat/omap-alsa.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * arch/arm/plat-omap/include/mach/omap-alsa.h - * - * Alsa Driver for AIC23 and TSC2101 codecs on OMAP platform boards. - * - * Copyright (C) 2006 Mika Laitio <lamikr@cc.jyu.fi> - * - * Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus Brazil - * Written by Daniel Petrini, David Cohen, Anderson Briglia - * {daniel.petrini, david.cohen, anderson.briglia}@indt.org.br - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - * - * History - * ------- - * - * 2005/07/25 INdT-10LE Kernel Team - Alsa driver for omap osk, - * original version based in sa1100 driver - * and omap oss driver. - */ - -#ifndef __OMAP_ALSA_H -#define __OMAP_ALSA_H - -#include <plat/dma.h> -#include <sound/core.h> -#include <sound/pcm.h> -#include <plat/mcbsp.h> -#include <linux/platform_device.h> - -#define DMA_BUF_SIZE (1024 * 8) - -/* - * Buffer management for alsa and dma - */ -struct audio_stream { - char *id; /* identification string */ - int stream_id; /* numeric identification */ - int dma_dev; /* dma number of that device */ - int *lch; /* Chain of channels this stream is linked to */ - char started; /* to store if the chain was started or not */ - int dma_q_head; /* DMA Channel Q Head */ - int dma_q_tail; /* DMA Channel Q Tail */ - char dma_q_count; /* DMA Channel Q Count */ - int active:1; /* we are using this stream for transfer now */ - int period; /* current transfer period */ - int periods; /* current count of periods registerd in the DMA engine */ - spinlock_t dma_lock; /* for locking in DMA operations */ - struct snd_pcm_substream *stream; /* the pcm stream */ - unsigned linked:1; /* dma channels linked */ - int offset; /* store start position of the last period in the alsa buffer */ - int (*hw_start)(void); /* interface to start HW interface, e.g. McBSP */ - int (*hw_stop)(void); /* interface to stop HW interface, e.g. McBSP */ -}; - -/* - * Alsa card structure for aic23 - */ -struct snd_card_omap_codec { - struct snd_card *card; - struct snd_pcm *pcm; - long samplerate; - struct audio_stream s[2]; /* playback & capture */ -}; - -/* Codec specific information and function pointers. - * Codec (omap-alsa-aic23.c and omap-alsa-tsc2101.c) - * are responsible for defining the function pointers. - */ -struct omap_alsa_codec_config { - char *name; - struct omap_mcbsp_reg_cfg *mcbsp_regs_alsa; - struct snd_pcm_hw_constraint_list *hw_constraints_rates; - struct snd_pcm_hardware *snd_omap_alsa_playback; - struct snd_pcm_hardware *snd_omap_alsa_capture; - void (*codec_configure_dev)(void); - void (*codec_set_samplerate)(long); - void (*codec_clock_setup)(void); - int (*codec_clock_on)(void); - int (*codec_clock_off)(void); - int (*get_default_samplerate)(void); -}; - -/*********** Mixer function prototypes *************************/ -int snd_omap_mixer(struct snd_card_omap_codec *); -void snd_omap_init_mixer(void); - -#ifdef CONFIG_PM -void snd_omap_suspend_mixer(void); -void snd_omap_resume_mixer(void); -#endif - -int snd_omap_alsa_post_probe(struct platform_device *pdev, struct omap_alsa_codec_config *config); -int snd_omap_alsa_remove(struct platform_device *pdev); -#ifdef CONFIG_PM -int snd_omap_alsa_suspend(struct platform_device *pdev, pm_message_t state); -int snd_omap_alsa_resume(struct platform_device *pdev); -#else -#define snd_omap_alsa_suspend NULL -#define snd_omap_alsa_resume NULL -#endif - -void callback_omap_alsa_sound_dma(void *); - -#endif diff --git a/arch/arm/plat-omap/include/plat/omap-pm.h b/arch/arm/plat-omap/include/plat/omap-pm.h index 0840df813f4..67faa7b8fe9 100644 --- a/arch/arm/plat-omap/include/plat/omap-pm.h +++ b/arch/arm/plat-omap/include/plat/omap-pm.h @@ -342,9 +342,9 @@ unsigned long omap_pm_cpu_get_freq(void); * driver must restore device context. If the number of context losses * exceeds the maximum positive integer, the function will wrap to 0 and * continue counting. Returns the number of context losses for this device, - * or zero upon error. + * or negative value upon error. */ -u32 omap_pm_get_dev_context_loss_count(struct device *dev); +int omap_pm_get_dev_context_loss_count(struct device *dev); void omap_pm_enable_off_mode(void); void omap_pm_disable_off_mode(void); diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h index ee405b36df4..51423d2727a 100644 --- a/arch/arm/plat-omap/include/plat/omap_device.h +++ b/arch/arm/plat-omap/include/plat/omap_device.h @@ -68,7 +68,7 @@ extern struct device omap_device_parent; * */ struct omap_device { - struct platform_device pdev; + struct platform_device *pdev; struct omap_hwmod **hwmods; struct omap_device_pm_latency *pm_lats; u32 dev_wakeup_lat; @@ -88,31 +88,26 @@ int omap_device_shutdown(struct platform_device *pdev); /* Core code interface */ -int omap_device_count_resources(struct omap_device *od); -int omap_device_fill_resources(struct omap_device *od, struct resource *res); - -struct omap_device *omap_device_build(const char *pdev_name, int pdev_id, +struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, struct omap_hwmod *oh, void *pdata, int pdata_len, struct omap_device_pm_latency *pm_lats, int pm_lats_cnt, int is_early_device); -struct omap_device *omap_device_build_ss(const char *pdev_name, int pdev_id, +struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id, struct omap_hwmod **oh, int oh_cnt, void *pdata, int pdata_len, struct omap_device_pm_latency *pm_lats, int pm_lats_cnt, int is_early_device); -int omap_device_register(struct omap_device *od); -int omap_early_device_register(struct omap_device *od); - void __iomem *omap_device_get_rt_va(struct omap_device *od); +struct device *omap_device_get_by_hwmod_name(const char *oh_name); /* OMAP PM interface */ int omap_device_align_pm_lat(struct platform_device *pdev, u32 new_wakeup_lat_limit); struct powerdomain *omap_device_get_pwrdm(struct omap_device *od); -u32 omap_device_get_context_loss_count(struct platform_device *pdev); +int omap_device_get_context_loss_count(struct platform_device *pdev); /* Other */ @@ -122,11 +117,6 @@ int omap_device_enable_hwmods(struct omap_device *od); int omap_device_disable_clocks(struct omap_device *od); int omap_device_enable_clocks(struct omap_device *od); -static inline void omap_device_disable_idle_on_suspend(struct omap_device *od) -{ - od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND; -} - /* * Entries should be kept in latency order ascending * @@ -157,6 +147,17 @@ struct omap_device_pm_latency { #define OMAP_DEVICE_LATENCY_AUTO_ADJUST BIT(1) /* Get omap_device pointer from platform_device pointer */ -#define to_omap_device(x) container_of((x), struct omap_device, pdev) +static inline struct omap_device *to_omap_device(struct platform_device *pdev) +{ + return pdev ? pdev->archdata.od : NULL; +} + +static inline +void omap_device_disable_idle_on_suspend(struct platform_device *pdev) +{ + struct omap_device *od = to_omap_device(pdev); + + od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND; +} #endif diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index 0e329ca88a7..8b372ede17c 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h @@ -496,7 +496,6 @@ struct omap_hwmod_class { * @_state: internal-use hwmod state * @_postsetup_state: internal-use state to leave the hwmod in after _setup() * @flags: hwmod flags (documented below) - * @omap_chip: OMAP chips this hwmod is present on * @_lock: spinlock serializing operations on this hwmod * @node: list node for hwmod list (internal use) * @@ -526,7 +525,6 @@ struct omap_hwmod { char *clkdm_name; struct clockdomain *clkdm; char *vdd_name; - struct voltagedomain *voltdm; struct omap_hwmod_ocp_if **masters; /* connect to *_IA */ struct omap_hwmod_ocp_if **slaves; /* connect to *_TA */ void *dev_attr; @@ -545,7 +543,6 @@ struct omap_hwmod { u8 _int_flags; u8 _state; u8 _postsetup_state; - const struct omap_chip_id omap_chip; }; int omap_hwmod_register(struct omap_hwmod **ohs); @@ -603,7 +600,7 @@ int omap_hwmod_for_each_by_class(const char *classname, void *user); int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state); -u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh); +int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh); int omap_hwmod_no_setup_reset(struct omap_hwmod *oh); diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h index de3b10c1812..1ab9fd6abe6 100644 --- a/arch/arm/plat-omap/include/plat/serial.h +++ b/arch/arm/plat-omap/include/plat/serial.h @@ -16,8 +16,8 @@ #include <linux/init.h> /* - * Memory entry used for the DEBUG_LL UART configuration. See also - * uncompress.h and debug-macro.S. + * Memory entry used for the DEBUG_LL UART configuration, relative to + * start of RAM. See also uncompress.h and debug-macro.S. * * Note that using a memory location for storing the UART configuration * has at least two limitations: @@ -27,7 +27,7 @@ * 2. We assume printascii is called at least once before paging_init, * and addruart has a chance to read OMAP_UART_INFO */ -#define OMAP_UART_INFO (PLAT_PHYS_OFFSET + 0x3ffc) +#define OMAP_UART_INFO_OFS 0x3ffc /* OMAP1 serial ports */ #define OMAP1_UART1_BASE 0xfffb0000 diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h index a067484cc4a..2f472e989ec 100644 --- a/arch/arm/plat-omap/include/plat/uncompress.h +++ b/arch/arm/plat-omap/include/plat/uncompress.h @@ -36,7 +36,13 @@ int uart_shift; */ static void set_omap_uart_info(unsigned char port) { - *(volatile u32 *)OMAP_UART_INFO = port; + /* + * Get address of some.bss variable and round it down + * a la CONFIG_AUTO_ZRELADDR. + */ + u32 ram_start = (u32)&uart_shift & 0xf8000000; + u32 *uart_info = (u32 *)(ram_start + OMAP_UART_INFO_OFS); + *uart_info = port; } static void putc(int c) diff --git a/arch/arm/plat-omap/include/plat/voltage.h b/arch/arm/plat-omap/include/plat/voltage.h new file mode 100644 index 00000000000..0a6a482ec01 --- /dev/null +++ b/arch/arm/plat-omap/include/plat/voltage.h @@ -0,0 +1,20 @@ +/* + * OMAP Voltage Management Routines + * + * Copyright (C) 2011, Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ARCH_ARM_OMAP_VOLTAGE_H +#define __ARCH_ARM_OMAP_VOLTAGE_H + +struct voltagedomain; + +struct voltagedomain *voltdm_lookup(const char *name); +int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt); +unsigned long voltdm_get_voltage(struct voltagedomain *voltdm); + +#endif diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c index f1ecfa9fc61..333871f5999 100644 --- a/arch/arm/plat-omap/io.c +++ b/arch/arm/plat-omap/io.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/io.h> #include <linux/mm.h> +#include <linux/dma-mapping.h> #include <plat/omap7xx.h> #include <plat/omap1510.h> @@ -23,11 +24,16 @@ #define BETWEEN(p,st,sz) ((p) >= (st) && (p) < ((st) + (sz))) #define XLATE(p,pst,vst) ((void __iomem *)((p) - (pst) + (vst))) +static int initialized; + /* * Intercept ioremap() requests for addresses in our fixed mapping regions. */ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type) { + + WARN(!initialized, "Do not use ioremap before init_early\n"); + #ifdef CONFIG_ARCH_OMAP1 if (cpu_class_is_omap1()) { if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE)) @@ -139,3 +145,15 @@ void omap_iounmap(volatile void __iomem *addr) __iounmap(addr); } EXPORT_SYMBOL(omap_iounmap); + +void __init omap_init_consistent_dma_size(void) +{ +#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE + init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20); +#endif +} + +void __init omap_ioremap_init(void) +{ + initialized++; +} diff --git a/arch/arm/plat-omap/iommu-debug.c b/arch/arm/plat-omap/iommu-debug.c deleted file mode 100644 index f07cf2f08e0..00000000000 --- a/arch/arm/plat-omap/iommu-debug.c +++ /dev/null @@ -1,418 +0,0 @@ -/* - * omap iommu: debugfs interface - * - * Copyright (C) 2008-2009 Nokia Corporation - * - * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/err.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/platform_device.h> -#include <linux/debugfs.h> - -#include <plat/iommu.h> -#include <plat/iovmm.h> - -#include "iopgtable.h" - -#define MAXCOLUMN 100 /* for short messages */ - -static DEFINE_MUTEX(iommu_debug_lock); - -static struct dentry *iommu_debug_root; - -static ssize_t debug_read_ver(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - u32 ver = iommu_arch_version(); - char buf[MAXCOLUMN], *p = buf; - - p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); - - return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); -} - -static ssize_t debug_read_regs(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct iommu *obj = file->private_data; - char *p, *buf; - ssize_t bytes; - - buf = kmalloc(count, GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - mutex_lock(&iommu_debug_lock); - - bytes = iommu_dump_ctx(obj, p, count); - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); - - mutex_unlock(&iommu_debug_lock); - kfree(buf); - - return bytes; -} - -static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct iommu *obj = file->private_data; - char *p, *buf; - ssize_t bytes, rest; - - buf = kmalloc(count, GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - mutex_lock(&iommu_debug_lock); - - p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); - p += sprintf(p, "-----------------------------------------\n"); - rest = count - (p - buf); - p += dump_tlb_entries(obj, p, rest); - - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); - - mutex_unlock(&iommu_debug_lock); - kfree(buf); - - return bytes; -} - -static ssize_t debug_write_pagetable(struct file *file, - const char __user *userbuf, size_t count, loff_t *ppos) -{ - struct iotlb_entry e; - struct cr_regs cr; - int err; - struct iommu *obj = file->private_data; - char buf[MAXCOLUMN], *p = buf; - - count = min(count, sizeof(buf)); - - mutex_lock(&iommu_debug_lock); - if (copy_from_user(p, userbuf, count)) { - mutex_unlock(&iommu_debug_lock); - return -EFAULT; - } - - sscanf(p, "%x %x", &cr.cam, &cr.ram); - if (!cr.cam || !cr.ram) { - mutex_unlock(&iommu_debug_lock); - return -EINVAL; - } - - iotlb_cr_to_e(&cr, &e); - err = iopgtable_store_entry(obj, &e); - if (err) - dev_err(obj->dev, "%s: fail to store cr\n", __func__); - - mutex_unlock(&iommu_debug_lock); - return count; -} - -#define dump_ioptable_entry_one(lv, da, val) \ - ({ \ - int __err = 0; \ - ssize_t bytes; \ - const int maxcol = 22; \ - const char *str = "%d: %08x %08x\n"; \ - bytes = snprintf(p, maxcol, str, lv, da, val); \ - p += bytes; \ - len -= bytes; \ - if (len < maxcol) \ - __err = -ENOMEM; \ - __err; \ - }) - -static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len) -{ - int i; - u32 *iopgd; - char *p = buf; - - spin_lock(&obj->page_table_lock); - - iopgd = iopgd_offset(obj, 0); - for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { - int j, err; - u32 *iopte; - u32 da; - - if (!*iopgd) - continue; - - if (!(*iopgd & IOPGD_TABLE)) { - da = i << IOPGD_SHIFT; - - err = dump_ioptable_entry_one(1, da, *iopgd); - if (err) - goto out; - continue; - } - - iopte = iopte_offset(iopgd, 0); - - for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { - if (!*iopte) - continue; - - da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); - err = dump_ioptable_entry_one(2, da, *iopgd); - if (err) - goto out; - } - } -out: - spin_unlock(&obj->page_table_lock); - - return p - buf; -} - -static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct iommu *obj = file->private_data; - char *p, *buf; - size_t bytes; - - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); - p += sprintf(p, "-----------------------------------------\n"); - - mutex_lock(&iommu_debug_lock); - - bytes = PAGE_SIZE - (p - buf); - p += dump_ioptable(obj, p, bytes); - - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); - - mutex_unlock(&iommu_debug_lock); - free_page((unsigned long)buf); - - return bytes; -} - -static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct iommu *obj = file->private_data; - char *p, *buf; - struct iovm_struct *tmp; - int uninitialized_var(i); - ssize_t bytes; - - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n", - "No", "start", "end", "size", "flags"); - p += sprintf(p, "-------------------------------------------------\n"); - - mutex_lock(&iommu_debug_lock); - - list_for_each_entry(tmp, &obj->mmap, list) { - size_t len; - const char *str = "%3d %08x-%08x %6x %8x\n"; - const int maxcol = 39; - - len = tmp->da_end - tmp->da_start; - p += snprintf(p, maxcol, str, - i, tmp->da_start, tmp->da_end, len, tmp->flags); - - if (PAGE_SIZE - (p - buf) < maxcol) - break; - i++; - } - - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); - - mutex_unlock(&iommu_debug_lock); - free_page((unsigned long)buf); - - return bytes; -} - -static ssize_t debug_read_mem(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct iommu *obj = file->private_data; - char *p, *buf; - struct iovm_struct *area; - ssize_t bytes; - - count = min_t(ssize_t, count, PAGE_SIZE); - - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - mutex_lock(&iommu_debug_lock); - - area = find_iovm_area(obj, (u32)ppos); - if (IS_ERR(area)) { - bytes = -EINVAL; - goto err_out; - } - memcpy(p, area->va, count); - p += count; - - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); -err_out: - mutex_unlock(&iommu_debug_lock); - free_page((unsigned long)buf); - - return bytes; -} - -static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct iommu *obj = file->private_data; - struct iovm_struct *area; - char *p, *buf; - - count = min_t(size_t, count, PAGE_SIZE); - - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - mutex_lock(&iommu_debug_lock); - - if (copy_from_user(p, userbuf, count)) { - count = -EFAULT; - goto err_out; - } - - area = find_iovm_area(obj, (u32)ppos); - if (IS_ERR(area)) { - count = -EINVAL; - goto err_out; - } - memcpy(area->va, p, count); -err_out: - mutex_unlock(&iommu_debug_lock); - free_page((unsigned long)buf); - - return count; -} - -static int debug_open_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -#define DEBUG_FOPS(name) \ - static const struct file_operations debug_##name##_fops = { \ - .open = debug_open_generic, \ - .read = debug_read_##name, \ - .write = debug_write_##name, \ - .llseek = generic_file_llseek, \ - }; - -#define DEBUG_FOPS_RO(name) \ - static const struct file_operations debug_##name##_fops = { \ - .open = debug_open_generic, \ - .read = debug_read_##name, \ - .llseek = generic_file_llseek, \ - }; - -DEBUG_FOPS_RO(ver); -DEBUG_FOPS_RO(regs); -DEBUG_FOPS_RO(tlb); -DEBUG_FOPS(pagetable); -DEBUG_FOPS_RO(mmap); -DEBUG_FOPS(mem); - -#define __DEBUG_ADD_FILE(attr, mode) \ - { \ - struct dentry *dent; \ - dent = debugfs_create_file(#attr, mode, parent, \ - obj, &debug_##attr##_fops); \ - if (!dent) \ - return -ENOMEM; \ - } - -#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) -#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) - -static int iommu_debug_register(struct device *dev, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - struct iommu *obj = platform_get_drvdata(pdev); - struct dentry *d, *parent; - - if (!obj || !obj->dev) - return -EINVAL; - - d = debugfs_create_dir(obj->name, iommu_debug_root); - if (!d) - return -ENOMEM; - parent = d; - - d = debugfs_create_u8("nr_tlb_entries", 400, parent, - (u8 *)&obj->nr_tlb_entries); - if (!d) - return -ENOMEM; - - DEBUG_ADD_FILE_RO(ver); - DEBUG_ADD_FILE_RO(regs); - DEBUG_ADD_FILE_RO(tlb); - DEBUG_ADD_FILE(pagetable); - DEBUG_ADD_FILE_RO(mmap); - DEBUG_ADD_FILE(mem); - - return 0; -} - -static int __init iommu_debug_init(void) -{ - struct dentry *d; - int err; - - d = debugfs_create_dir("iommu", NULL); - if (!d) - return -ENOMEM; - iommu_debug_root = d; - - err = foreach_iommu_device(d, iommu_debug_register); - if (err) - goto err_out; - return 0; - -err_out: - debugfs_remove_recursive(iommu_debug_root); - return err; -} -module_init(iommu_debug_init) - -static void __exit iommu_debugfs_exit(void) -{ - debugfs_remove_recursive(iommu_debug_root); -} -module_exit(iommu_debugfs_exit) - -MODULE_DESCRIPTION("omap iommu: debugfs interface"); -MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c deleted file mode 100644 index 34fc31ee908..00000000000 --- a/arch/arm/plat-omap/iommu.c +++ /dev/null @@ -1,1102 +0,0 @@ -/* - * omap iommu: tlb and pagetable primitives - * - * Copyright (C) 2008-2010 Nokia Corporation - * - * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, - * Paul Mundt and Toshihiro Kobayashi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/err.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/clk.h> -#include <linux/platform_device.h> - -#include <asm/cacheflush.h> - -#include <plat/iommu.h> - -#include "iopgtable.h" - -#define for_each_iotlb_cr(obj, n, __i, cr) \ - for (__i = 0; \ - (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ - __i++) - -/* accommodate the difference between omap1 and omap2/3 */ -static const struct iommu_functions *arch_iommu; - -static struct platform_driver omap_iommu_driver; -static struct kmem_cache *iopte_cachep; - -/** - * install_iommu_arch - Install archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * There are several kind of iommu algorithm(tlb, pagetable) among - * omap series. This interface installs such an iommu algorighm. - **/ -int install_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu) - return -EBUSY; - - arch_iommu = ops; - return 0; -} -EXPORT_SYMBOL_GPL(install_iommu_arch); - -/** - * uninstall_iommu_arch - Uninstall archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * This interface uninstalls the iommu algorighm installed previously. - **/ -void uninstall_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu != ops) - pr_err("%s: not your arch\n", __func__); - - arch_iommu = NULL; -} -EXPORT_SYMBOL_GPL(uninstall_iommu_arch); - -/** - * iommu_save_ctx - Save registers for pm off-mode support - * @obj: target iommu - **/ -void iommu_save_ctx(struct iommu *obj) -{ - arch_iommu->save_ctx(obj); -} -EXPORT_SYMBOL_GPL(iommu_save_ctx); - -/** - * iommu_restore_ctx - Restore registers for pm off-mode support - * @obj: target iommu - **/ -void iommu_restore_ctx(struct iommu *obj) -{ - arch_iommu->restore_ctx(obj); -} -EXPORT_SYMBOL_GPL(iommu_restore_ctx); - -/** - * iommu_arch_version - Return running iommu arch version - **/ -u32 iommu_arch_version(void) -{ - return arch_iommu->version; -} -EXPORT_SYMBOL_GPL(iommu_arch_version); - -static int iommu_enable(struct iommu *obj) -{ - int err; - - if (!obj) - return -EINVAL; - - if (!arch_iommu) - return -ENODEV; - - clk_enable(obj->clk); - - err = arch_iommu->enable(obj); - - clk_disable(obj->clk); - return err; -} - -static void iommu_disable(struct iommu *obj) -{ - if (!obj) - return; - - clk_enable(obj->clk); - - arch_iommu->disable(obj); - - clk_disable(obj->clk); -} - -/* - * TLB operations - */ -void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -{ - BUG_ON(!cr || !e); - - arch_iommu->cr_to_e(cr, e); -} -EXPORT_SYMBOL_GPL(iotlb_cr_to_e); - -static inline int iotlb_cr_valid(struct cr_regs *cr) -{ - if (!cr) - return -EINVAL; - - return arch_iommu->cr_valid(cr); -} - -static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, - struct iotlb_entry *e) -{ - if (!e) - return NULL; - - return arch_iommu->alloc_cr(obj, e); -} - -u32 iotlb_cr_to_virt(struct cr_regs *cr) -{ - return arch_iommu->cr_to_virt(cr); -} -EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); - -static u32 get_iopte_attr(struct iotlb_entry *e) -{ - return arch_iommu->get_pte_attr(e); -} - -static u32 iommu_report_fault(struct iommu *obj, u32 *da) -{ - return arch_iommu->fault_isr(obj, da); -} - -static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) -{ - u32 val; - - val = iommu_read_reg(obj, MMU_LOCK); - - l->base = MMU_LOCK_BASE(val); - l->vict = MMU_LOCK_VICT(val); - -} - -static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) -{ - u32 val; - - val = (l->base << MMU_LOCK_BASE_SHIFT); - val |= (l->vict << MMU_LOCK_VICT_SHIFT); - - iommu_write_reg(obj, val, MMU_LOCK); -} - -static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) -{ - arch_iommu->tlb_read_cr(obj, cr); -} - -static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) -{ - arch_iommu->tlb_load_cr(obj, cr); - - iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); - iommu_write_reg(obj, 1, MMU_LD_TLB); -} - -/** - * iotlb_dump_cr - Dump an iommu tlb entry into buf - * @obj: target iommu - * @cr: contents of cam and ram register - * @buf: output buffer - **/ -static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, - char *buf) -{ - BUG_ON(!cr || !buf); - - return arch_iommu->dump_cr(obj, cr, buf); -} - -/* only used in iotlb iteration for-loop */ -static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) -{ - struct cr_regs cr; - struct iotlb_lock l; - - iotlb_lock_get(obj, &l); - l.vict = n; - iotlb_lock_set(obj, &l); - iotlb_read_cr(obj, &cr); - - return cr; -} - -/** - * load_iotlb_entry - Set an iommu tlb entry - * @obj: target iommu - * @e: an iommu tlb entry info - **/ -int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) -{ - int err = 0; - struct iotlb_lock l; - struct cr_regs *cr; - - if (!obj || !obj->nr_tlb_entries || !e) - return -EINVAL; - - clk_enable(obj->clk); - - iotlb_lock_get(obj, &l); - if (l.base == obj->nr_tlb_entries) { - dev_warn(obj->dev, "%s: preserve entries full\n", __func__); - err = -EBUSY; - goto out; - } - if (!e->prsvd) { - int i; - struct cr_regs tmp; - - for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) - if (!iotlb_cr_valid(&tmp)) - break; - - if (i == obj->nr_tlb_entries) { - dev_dbg(obj->dev, "%s: full: no entry\n", __func__); - err = -EBUSY; - goto out; - } - - iotlb_lock_get(obj, &l); - } else { - l.vict = l.base; - iotlb_lock_set(obj, &l); - } - - cr = iotlb_alloc_cr(obj, e); - if (IS_ERR(cr)) { - clk_disable(obj->clk); - return PTR_ERR(cr); - } - - iotlb_load_cr(obj, cr); - kfree(cr); - - if (e->prsvd) - l.base++; - /* increment victim for next tlb load */ - if (++l.vict == obj->nr_tlb_entries) - l.vict = l.base; - iotlb_lock_set(obj, &l); -out: - clk_disable(obj->clk); - return err; -} -EXPORT_SYMBOL_GPL(load_iotlb_entry); - -/** - * flush_iotlb_page - Clear an iommu tlb entry - * @obj: target iommu - * @da: iommu device virtual address - * - * Clear an iommu tlb entry which includes 'da' address. - **/ -void flush_iotlb_page(struct iommu *obj, u32 da) -{ - int i; - struct cr_regs cr; - - clk_enable(obj->clk); - - for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { - u32 start; - size_t bytes; - - if (!iotlb_cr_valid(&cr)) - continue; - - start = iotlb_cr_to_virt(&cr); - bytes = iopgsz_to_bytes(cr.cam & 3); - - if ((start <= da) && (da < start + bytes)) { - dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", - __func__, start, da, bytes); - iotlb_load_cr(obj, &cr); - iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); - } - } - clk_disable(obj->clk); - - if (i == obj->nr_tlb_entries) - dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); -} -EXPORT_SYMBOL_GPL(flush_iotlb_page); - -/** - * flush_iotlb_range - Clear an iommu tlb entries - * @obj: target iommu - * @start: iommu device virtual address(start) - * @end: iommu device virtual address(end) - * - * Clear an iommu tlb entry which includes 'da' address. - **/ -void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) -{ - u32 da = start; - - while (da < end) { - flush_iotlb_page(obj, da); - /* FIXME: Optimize for multiple page size */ - da += IOPTE_SIZE; - } -} -EXPORT_SYMBOL_GPL(flush_iotlb_range); - -/** - * flush_iotlb_all - Clear all iommu tlb entries - * @obj: target iommu - **/ -void flush_iotlb_all(struct iommu *obj) -{ - struct iotlb_lock l; - - clk_enable(obj->clk); - - l.base = 0; - l.vict = 0; - iotlb_lock_set(obj, &l); - - iommu_write_reg(obj, 1, MMU_GFLUSH); - - clk_disable(obj->clk); -} -EXPORT_SYMBOL_GPL(flush_iotlb_all); - -/** - * iommu_set_twl - enable/disable table walking logic - * @obj: target iommu - * @on: enable/disable - * - * Function used to enable/disable TWL. If one wants to work - * exclusively with locked TLB entries and receive notifications - * for TLB miss then call this function to disable TWL. - */ -void iommu_set_twl(struct iommu *obj, bool on) -{ - clk_enable(obj->clk); - arch_iommu->set_twl(obj, on); - clk_disable(obj->clk); -} -EXPORT_SYMBOL_GPL(iommu_set_twl); - -#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) - -ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) -{ - if (!obj || !buf) - return -EINVAL; - - clk_enable(obj->clk); - - bytes = arch_iommu->dump_ctx(obj, buf, bytes); - - clk_disable(obj->clk); - - return bytes; -} -EXPORT_SYMBOL_GPL(iommu_dump_ctx); - -static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) -{ - int i; - struct iotlb_lock saved; - struct cr_regs tmp; - struct cr_regs *p = crs; - - clk_enable(obj->clk); - iotlb_lock_get(obj, &saved); - - for_each_iotlb_cr(obj, num, i, tmp) { - if (!iotlb_cr_valid(&tmp)) - continue; - *p++ = tmp; - } - - iotlb_lock_set(obj, &saved); - clk_disable(obj->clk); - - return p - crs; -} - -/** - * dump_tlb_entries - dump cr arrays to given buffer - * @obj: target iommu - * @buf: output buffer - **/ -size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) -{ - int i, num; - struct cr_regs *cr; - char *p = buf; - - num = bytes / sizeof(*cr); - num = min(obj->nr_tlb_entries, num); - - cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); - if (!cr) - return 0; - - num = __dump_tlb_entries(obj, cr, num); - for (i = 0; i < num; i++) - p += iotlb_dump_cr(obj, cr + i, p); - kfree(cr); - - return p - buf; -} -EXPORT_SYMBOL_GPL(dump_tlb_entries); - -int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) -{ - return driver_for_each_device(&omap_iommu_driver.driver, - NULL, data, fn); -} -EXPORT_SYMBOL_GPL(foreach_iommu_device); - -#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ - -/* - * H/W pagetable operations - */ -static void flush_iopgd_range(u32 *first, u32 *last) -{ - /* FIXME: L2 cache should be taken care of if it exists */ - do { - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" - : : "r" (first)); - first += L1_CACHE_BYTES / sizeof(*first); - } while (first <= last); -} - -static void flush_iopte_range(u32 *first, u32 *last) -{ - /* FIXME: L2 cache should be taken care of if it exists */ - do { - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" - : : "r" (first)); - first += L1_CACHE_BYTES / sizeof(*first); - } while (first <= last); -} - -static void iopte_free(u32 *iopte) -{ - /* Note: freed iopte's must be clean ready for re-use */ - kmem_cache_free(iopte_cachep, iopte); -} - -static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) -{ - u32 *iopte; - - /* a table has already existed */ - if (*iopgd) - goto pte_ready; - - /* - * do the allocation outside the page table lock - */ - spin_unlock(&obj->page_table_lock); - iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); - spin_lock(&obj->page_table_lock); - - if (!*iopgd) { - if (!iopte) - return ERR_PTR(-ENOMEM); - - *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; - flush_iopgd_range(iopgd, iopgd); - - dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); - } else { - /* We raced, free the reduniovant table */ - iopte_free(iopte); - } - -pte_ready: - iopte = iopte_offset(iopgd, da); - - dev_vdbg(obj->dev, - "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", - __func__, da, iopgd, *iopgd, iopte, *iopte); - - return iopte; -} - -static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) -{ - u32 *iopgd = iopgd_offset(obj, da); - - if ((da | pa) & ~IOSECTION_MASK) { - dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", - __func__, da, pa, IOSECTION_SIZE); - return -EINVAL; - } - - *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; - flush_iopgd_range(iopgd, iopgd); - return 0; -} - -static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) -{ - u32 *iopgd = iopgd_offset(obj, da); - int i; - - if ((da | pa) & ~IOSUPER_MASK) { - dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", - __func__, da, pa, IOSUPER_SIZE); - return -EINVAL; - } - - for (i = 0; i < 16; i++) - *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; - flush_iopgd_range(iopgd, iopgd + 15); - return 0; -} - -static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) -{ - u32 *iopgd = iopgd_offset(obj, da); - u32 *iopte = iopte_alloc(obj, iopgd, da); - - if (IS_ERR(iopte)) - return PTR_ERR(iopte); - - *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; - flush_iopte_range(iopte, iopte); - - dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", - __func__, da, pa, iopte, *iopte); - - return 0; -} - -static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) -{ - u32 *iopgd = iopgd_offset(obj, da); - u32 *iopte = iopte_alloc(obj, iopgd, da); - int i; - - if ((da | pa) & ~IOLARGE_MASK) { - dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", - __func__, da, pa, IOLARGE_SIZE); - return -EINVAL; - } - - if (IS_ERR(iopte)) - return PTR_ERR(iopte); - - for (i = 0; i < 16; i++) - *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; - flush_iopte_range(iopte, iopte + 15); - return 0; -} - -static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) -{ - int (*fn)(struct iommu *, u32, u32, u32); - u32 prot; - int err; - - if (!obj || !e) - return -EINVAL; - - switch (e->pgsz) { - case MMU_CAM_PGSZ_16M: - fn = iopgd_alloc_super; - break; - case MMU_CAM_PGSZ_1M: - fn = iopgd_alloc_section; - break; - case MMU_CAM_PGSZ_64K: - fn = iopte_alloc_large; - break; - case MMU_CAM_PGSZ_4K: - fn = iopte_alloc_page; - break; - default: - fn = NULL; - BUG(); - break; - } - - prot = get_iopte_attr(e); - - spin_lock(&obj->page_table_lock); - err = fn(obj, e->da, e->pa, prot); - spin_unlock(&obj->page_table_lock); - - return err; -} - -/** - * iopgtable_store_entry - Make an iommu pte entry - * @obj: target iommu - * @e: an iommu tlb entry info - **/ -int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) -{ - int err; - - flush_iotlb_page(obj, e->da); - err = iopgtable_store_entry_core(obj, e); -#ifdef PREFETCH_IOTLB - if (!err) - load_iotlb_entry(obj, e); -#endif - return err; -} -EXPORT_SYMBOL_GPL(iopgtable_store_entry); - -/** - * iopgtable_lookup_entry - Lookup an iommu pte entry - * @obj: target iommu - * @da: iommu device virtual address - * @ppgd: iommu pgd entry pointer to be returned - * @ppte: iommu pte entry pointer to be returned - **/ -void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) -{ - u32 *iopgd, *iopte = NULL; - - iopgd = iopgd_offset(obj, da); - if (!*iopgd) - goto out; - - if (iopgd_is_table(*iopgd)) - iopte = iopte_offset(iopgd, da); -out: - *ppgd = iopgd; - *ppte = iopte; -} -EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); - -static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) -{ - size_t bytes; - u32 *iopgd = iopgd_offset(obj, da); - int nent = 1; - - if (!*iopgd) - return 0; - - if (iopgd_is_table(*iopgd)) { - int i; - u32 *iopte = iopte_offset(iopgd, da); - - bytes = IOPTE_SIZE; - if (*iopte & IOPTE_LARGE) { - nent *= 16; - /* rewind to the 1st entry */ - iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); - } - bytes *= nent; - memset(iopte, 0, nent * sizeof(*iopte)); - flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); - - /* - * do table walk to check if this table is necessary or not - */ - iopte = iopte_offset(iopgd, 0); - for (i = 0; i < PTRS_PER_IOPTE; i++) - if (iopte[i]) - goto out; - - iopte_free(iopte); - nent = 1; /* for the next L1 entry */ - } else { - bytes = IOPGD_SIZE; - if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { - nent *= 16; - /* rewind to the 1st entry */ - iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); - } - bytes *= nent; - } - memset(iopgd, 0, nent * sizeof(*iopgd)); - flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); -out: - return bytes; -} - -/** - * iopgtable_clear_entry - Remove an iommu pte entry - * @obj: target iommu - * @da: iommu device virtual address - **/ -size_t iopgtable_clear_entry(struct iommu *obj, u32 da) -{ - size_t bytes; - - spin_lock(&obj->page_table_lock); - - bytes = iopgtable_clear_entry_core(obj, da); - flush_iotlb_page(obj, da); - - spin_unlock(&obj->page_table_lock); - - return bytes; -} -EXPORT_SYMBOL_GPL(iopgtable_clear_entry); - -static void iopgtable_clear_entry_all(struct iommu *obj) -{ - int i; - - spin_lock(&obj->page_table_lock); - - for (i = 0; i < PTRS_PER_IOPGD; i++) { - u32 da; - u32 *iopgd; - - da = i << IOPGD_SHIFT; - iopgd = iopgd_offset(obj, da); - - if (!*iopgd) - continue; - - if (iopgd_is_table(*iopgd)) - iopte_free(iopte_offset(iopgd, 0)); - - *iopgd = 0; - flush_iopgd_range(iopgd, iopgd); - } - - flush_iotlb_all(obj); - - spin_unlock(&obj->page_table_lock); -} - -/* - * Device IOMMU generic operations - */ -static irqreturn_t iommu_fault_handler(int irq, void *data) -{ - u32 da, errs; - u32 *iopgd, *iopte; - struct iommu *obj = data; - - if (!obj->refcount) - return IRQ_NONE; - - clk_enable(obj->clk); - errs = iommu_report_fault(obj, &da); - clk_disable(obj->clk); - if (errs == 0) - return IRQ_HANDLED; - - /* Fault callback or TLB/PTE Dynamic loading */ - if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) - return IRQ_HANDLED; - - iommu_disable(obj); - - iopgd = iopgd_offset(obj, da); - - if (!iopgd_is_table(*iopgd)) { - dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " - "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); - return IRQ_NONE; - } - - iopte = iopte_offset(iopgd, da); - - dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " - "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, - iopte, *iopte); - - return IRQ_NONE; -} - -static int device_match_by_alias(struct device *dev, void *data) -{ - struct iommu *obj = to_iommu(dev); - const char *name = data; - - pr_debug("%s: %s %s\n", __func__, obj->name, name); - - return strcmp(obj->name, name) == 0; -} - -/** - * iommu_set_da_range - Set a valid device address range - * @obj: target iommu - * @start Start of valid range - * @end End of valid range - **/ -int iommu_set_da_range(struct iommu *obj, u32 start, u32 end) -{ - - if (!obj) - return -EFAULT; - - if (end < start || !PAGE_ALIGN(start | end)) - return -EINVAL; - - obj->da_start = start; - obj->da_end = end; - - return 0; -} -EXPORT_SYMBOL_GPL(iommu_set_da_range); - -/** - * iommu_get - Get iommu handler - * @name: target iommu name - **/ -struct iommu *iommu_get(const char *name) -{ - int err = -ENOMEM; - struct device *dev; - struct iommu *obj; - - dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, - device_match_by_alias); - if (!dev) - return ERR_PTR(-ENODEV); - - obj = to_iommu(dev); - - mutex_lock(&obj->iommu_lock); - - if (obj->refcount++ == 0) { - err = iommu_enable(obj); - if (err) - goto err_enable; - flush_iotlb_all(obj); - } - - if (!try_module_get(obj->owner)) - goto err_module; - - mutex_unlock(&obj->iommu_lock); - - dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); - return obj; - -err_module: - if (obj->refcount == 1) - iommu_disable(obj); -err_enable: - obj->refcount--; - mutex_unlock(&obj->iommu_lock); - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(iommu_get); - -/** - * iommu_put - Put back iommu handler - * @obj: target iommu - **/ -void iommu_put(struct iommu *obj) -{ - if (!obj || IS_ERR(obj)) - return; - - mutex_lock(&obj->iommu_lock); - - if (--obj->refcount == 0) - iommu_disable(obj); - - module_put(obj->owner); - - mutex_unlock(&obj->iommu_lock); - - dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); -} -EXPORT_SYMBOL_GPL(iommu_put); - -int iommu_set_isr(const char *name, - int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, - void *priv), - void *isr_priv) -{ - struct device *dev; - struct iommu *obj; - - dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, - device_match_by_alias); - if (!dev) - return -ENODEV; - - obj = to_iommu(dev); - mutex_lock(&obj->iommu_lock); - if (obj->refcount != 0) { - mutex_unlock(&obj->iommu_lock); - return -EBUSY; - } - obj->isr = isr; - obj->isr_priv = isr_priv; - mutex_unlock(&obj->iommu_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(iommu_set_isr); - -/* - * OMAP Device MMU(IOMMU) detection - */ -static int __devinit omap_iommu_probe(struct platform_device *pdev) -{ - int err = -ENODEV; - void *p; - int irq; - struct iommu *obj; - struct resource *res; - struct iommu_platform_data *pdata = pdev->dev.platform_data; - - if (pdev->num_resources != 2) - return -EINVAL; - - obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); - if (!obj) - return -ENOMEM; - - obj->clk = clk_get(&pdev->dev, pdata->clk_name); - if (IS_ERR(obj->clk)) - goto err_clk; - - obj->nr_tlb_entries = pdata->nr_tlb_entries; - obj->name = pdata->name; - obj->dev = &pdev->dev; - obj->ctx = (void *)obj + sizeof(*obj); - obj->da_start = pdata->da_start; - obj->da_end = pdata->da_end; - - mutex_init(&obj->iommu_lock); - mutex_init(&obj->mmap_lock); - spin_lock_init(&obj->page_table_lock); - INIT_LIST_HEAD(&obj->mmap); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - err = -ENODEV; - goto err_mem; - } - - res = request_mem_region(res->start, resource_size(res), - dev_name(&pdev->dev)); - if (!res) { - err = -EIO; - goto err_mem; - } - - obj->regbase = ioremap(res->start, resource_size(res)); - if (!obj->regbase) { - err = -ENOMEM; - goto err_ioremap; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - err = -ENODEV; - goto err_irq; - } - err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, - dev_name(&pdev->dev), obj); - if (err < 0) - goto err_irq; - platform_set_drvdata(pdev, obj); - - p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); - if (!p) { - err = -ENOMEM; - goto err_pgd; - } - memset(p, 0, IOPGD_TABLE_SIZE); - clean_dcache_area(p, IOPGD_TABLE_SIZE); - obj->iopgd = p; - - BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); - - dev_info(&pdev->dev, "%s registered\n", obj->name); - return 0; - -err_pgd: - free_irq(irq, obj); -err_irq: - iounmap(obj->regbase); -err_ioremap: - release_mem_region(res->start, resource_size(res)); -err_mem: - clk_put(obj->clk); -err_clk: - kfree(obj); - return err; -} - -static int __devexit omap_iommu_remove(struct platform_device *pdev) -{ - int irq; - struct resource *res; - struct iommu *obj = platform_get_drvdata(pdev); - - platform_set_drvdata(pdev, NULL); - - iopgtable_clear_entry_all(obj); - free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); - - irq = platform_get_irq(pdev, 0); - free_irq(irq, obj); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - iounmap(obj->regbase); - - clk_put(obj->clk); - dev_info(&pdev->dev, "%s removed\n", obj->name); - kfree(obj); - return 0; -} - -static struct platform_driver omap_iommu_driver = { - .probe = omap_iommu_probe, - .remove = __devexit_p(omap_iommu_remove), - .driver = { - .name = "omap-iommu", - }, -}; - -static void iopte_cachep_ctor(void *iopte) -{ - clean_dcache_area(iopte, IOPTE_TABLE_SIZE); -} - -static int __init omap_iommu_init(void) -{ - struct kmem_cache *p; - const unsigned long flags = SLAB_HWCACHE_ALIGN; - size_t align = 1 << 10; /* L2 pagetable alignement */ - - p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, - iopte_cachep_ctor); - if (!p) - return -ENOMEM; - iopte_cachep = p; - - return platform_driver_register(&omap_iommu_driver); -} -module_init(omap_iommu_init); - -static void __exit omap_iommu_exit(void) -{ - kmem_cache_destroy(iopte_cachep); - - platform_driver_unregister(&omap_iommu_driver); -} -module_exit(omap_iommu_exit); - -MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); -MODULE_ALIAS("platform:omap-iommu"); -MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); -MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c deleted file mode 100644 index 79e7fedb860..00000000000 --- a/arch/arm/plat-omap/iovmm.c +++ /dev/null @@ -1,904 +0,0 @@ -/* - * omap iommu: simple virtual address space management - * - * Copyright (C) 2008-2009 Nokia Corporation - * - * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/device.h> -#include <linux/scatterlist.h> - -#include <asm/cacheflush.h> -#include <asm/mach/map.h> - -#include <plat/iommu.h> -#include <plat/iovmm.h> - -#include "iopgtable.h" - -/* - * A device driver needs to create address mappings between: - * - * - iommu/device address - * - physical address - * - mpu virtual address - * - * There are 4 possible patterns for them: - * - * |iova/ mapping iommu_ page - * | da pa va (d)-(p)-(v) function type - * --------------------------------------------------------------------------- - * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s - * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s - * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s - * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* - * - * - * 'iova': device iommu virtual address - * 'da': alias of 'iova' - * 'pa': physical address - * 'va': mpu virtual address - * - * 'c': contiguous memory area - * 'd': discontiguous memory area - * 'a': anonymous memory allocation - * '()': optional feature - * - * 'n': a normal page(4KB) size is used. - * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. - * - * '*': not yet, but feasible. - */ - -static struct kmem_cache *iovm_area_cachep; - -/* return total bytes of sg buffers */ -static size_t sgtable_len(const struct sg_table *sgt) -{ - unsigned int i, total = 0; - struct scatterlist *sg; - - if (!sgt) - return 0; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - size_t bytes; - - bytes = sg->length; - - if (!iopgsz_ok(bytes)) { - pr_err("%s: sg[%d] not iommu pagesize(%x)\n", - __func__, i, bytes); - return 0; - } - - total += bytes; - } - - return total; -} -#define sgtable_ok(x) (!!sgtable_len(x)) - -static unsigned max_alignment(u32 addr) -{ - int i; - unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; - for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) - ; - return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; -} - -/* - * calculate the optimal number sg elements from total bytes based on - * iommu superpages - */ -static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) -{ - unsigned nr_entries = 0, ent_sz; - - if (!IS_ALIGNED(bytes, PAGE_SIZE)) { - pr_err("%s: wrong size %08x\n", __func__, bytes); - return 0; - } - - while (bytes) { - ent_sz = max_alignment(da | pa); - ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); - nr_entries++; - da += ent_sz; - pa += ent_sz; - bytes -= ent_sz; - } - - return nr_entries; -} - -/* allocate and initialize sg_table header(a kind of 'superblock') */ -static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, - u32 da, u32 pa) -{ - unsigned int nr_entries; - int err; - struct sg_table *sgt; - - if (!bytes) - return ERR_PTR(-EINVAL); - - if (!IS_ALIGNED(bytes, PAGE_SIZE)) - return ERR_PTR(-EINVAL); - - if (flags & IOVMF_LINEAR) { - nr_entries = sgtable_nents(bytes, da, pa); - if (!nr_entries) - return ERR_PTR(-EINVAL); - } else - nr_entries = bytes / PAGE_SIZE; - - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) - return ERR_PTR(-ENOMEM); - - err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); - if (err) { - kfree(sgt); - return ERR_PTR(err); - } - - pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); - - return sgt; -} - -/* free sg_table header(a kind of superblock) */ -static void sgtable_free(struct sg_table *sgt) -{ - if (!sgt) - return; - - sg_free_table(sgt); - kfree(sgt); - - pr_debug("%s: sgt:%p\n", __func__, sgt); -} - -/* map 'sglist' to a contiguous mpu virtual area and return 'va' */ -static void *vmap_sg(const struct sg_table *sgt) -{ - u32 va; - size_t total; - unsigned int i; - struct scatterlist *sg; - struct vm_struct *new; - const struct mem_type *mtype; - - mtype = get_mem_type(MT_DEVICE); - if (!mtype) - return ERR_PTR(-EINVAL); - - total = sgtable_len(sgt); - if (!total) - return ERR_PTR(-EINVAL); - - new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); - if (!new) - return ERR_PTR(-ENOMEM); - va = (u32)new->addr; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - size_t bytes; - u32 pa; - int err; - - pa = sg_phys(sg); - bytes = sg->length; - - BUG_ON(bytes != PAGE_SIZE); - - err = ioremap_page(va, pa, mtype); - if (err) - goto err_out; - - va += bytes; - } - - flush_cache_vmap((unsigned long)new->addr, - (unsigned long)(new->addr + total)); - return new->addr; - -err_out: - WARN_ON(1); /* FIXME: cleanup some mpu mappings */ - vunmap(new->addr); - return ERR_PTR(-EAGAIN); -} - -static inline void vunmap_sg(const void *va) -{ - vunmap(va); -} - -static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) -{ - struct iovm_struct *tmp; - - list_for_each_entry(tmp, &obj->mmap, list) { - if ((da >= tmp->da_start) && (da < tmp->da_end)) { - size_t len; - - len = tmp->da_end - tmp->da_start; - - dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", - __func__, tmp->da_start, da, tmp->da_end, len, - tmp->flags); - - return tmp; - } - } - - return NULL; -} - -/** - * find_iovm_area - find iovma which includes @da - * @da: iommu device virtual address - * - * Find the existing iovma starting at @da - */ -struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) -{ - struct iovm_struct *area; - - mutex_lock(&obj->mmap_lock); - area = __find_iovm_area(obj, da); - mutex_unlock(&obj->mmap_lock); - - return area; -} -EXPORT_SYMBOL_GPL(find_iovm_area); - -/* - * This finds the hole(area) which fits the requested address and len - * in iovmas mmap, and returns the new allocated iovma. - */ -static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, - size_t bytes, u32 flags) -{ - struct iovm_struct *new, *tmp; - u32 start, prev_end, alignment; - - if (!obj || !bytes) - return ERR_PTR(-EINVAL); - - start = da; - alignment = PAGE_SIZE; - - if (~flags & IOVMF_DA_FIXED) { - /* Don't map address 0 */ - start = obj->da_start ? obj->da_start : alignment; - - if (flags & IOVMF_LINEAR) - alignment = iopgsz_max(bytes); - start = roundup(start, alignment); - } else if (start < obj->da_start || start > obj->da_end || - obj->da_end - start < bytes) { - return ERR_PTR(-EINVAL); - } - - tmp = NULL; - if (list_empty(&obj->mmap)) - goto found; - - prev_end = 0; - list_for_each_entry(tmp, &obj->mmap, list) { - - if (prev_end > start) - break; - - if (tmp->da_start > start && (tmp->da_start - start) >= bytes) - goto found; - - if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) - start = roundup(tmp->da_end + 1, alignment); - - prev_end = tmp->da_end; - } - - if ((start >= prev_end) && (obj->da_end - start >= bytes)) - goto found; - - dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", - __func__, da, bytes, flags); - - return ERR_PTR(-EINVAL); - -found: - new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); - if (!new) - return ERR_PTR(-ENOMEM); - - new->iommu = obj; - new->da_start = start; - new->da_end = start + bytes; - new->flags = flags; - - /* - * keep ascending order of iovmas - */ - if (tmp) - list_add_tail(&new->list, &tmp->list); - else - list_add(&new->list, &obj->mmap); - - dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", - __func__, new->da_start, start, new->da_end, bytes, flags); - - return new; -} - -static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) -{ - size_t bytes; - - BUG_ON(!obj || !area); - - bytes = area->da_end - area->da_start; - - dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", - __func__, area->da_start, area->da_end, bytes, area->flags); - - list_del(&area->list); - kmem_cache_free(iovm_area_cachep, area); -} - -/** - * da_to_va - convert (d) to (v) - * @obj: objective iommu - * @da: iommu device virtual address - * @va: mpu virtual address - * - * Returns mpu virtual addr which corresponds to a given device virtual addr - */ -void *da_to_va(struct iommu *obj, u32 da) -{ - void *va = NULL; - struct iovm_struct *area; - - mutex_lock(&obj->mmap_lock); - - area = __find_iovm_area(obj, da); - if (!area) { - dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); - goto out; - } - va = area->va; -out: - mutex_unlock(&obj->mmap_lock); - - return va; -} -EXPORT_SYMBOL_GPL(da_to_va); - -static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) -{ - unsigned int i; - struct scatterlist *sg; - void *va = _va; - void *va_end; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - struct page *pg; - const size_t bytes = PAGE_SIZE; - - /* - * iommu 'superpage' isn't supported with 'iommu_vmalloc()' - */ - pg = vmalloc_to_page(va); - BUG_ON(!pg); - sg_set_page(sg, pg, bytes, 0); - - va += bytes; - } - - va_end = _va + PAGE_SIZE * i; -} - -static inline void sgtable_drain_vmalloc(struct sg_table *sgt) -{ - /* - * Actually this is not necessary at all, just exists for - * consistency of the code readability. - */ - BUG_ON(!sgt); -} - -static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, - size_t len) -{ - unsigned int i; - struct scatterlist *sg; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - unsigned bytes; - - bytes = max_alignment(da | pa); - bytes = min_t(unsigned, bytes, iopgsz_max(len)); - - BUG_ON(!iopgsz_ok(bytes)); - - sg_set_buf(sg, phys_to_virt(pa), bytes); - /* - * 'pa' is cotinuous(linear). - */ - pa += bytes; - da += bytes; - len -= bytes; - } - BUG_ON(len); -} - -static inline void sgtable_drain_kmalloc(struct sg_table *sgt) -{ - /* - * Actually this is not necessary at all, just exists for - * consistency of the code readability - */ - BUG_ON(!sgt); -} - -/* create 'da' <-> 'pa' mapping from 'sgt' */ -static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, - const struct sg_table *sgt, u32 flags) -{ - int err; - unsigned int i, j; - struct scatterlist *sg; - u32 da = new->da_start; - - if (!obj || !sgt) - return -EINVAL; - - BUG_ON(!sgtable_ok(sgt)); - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - u32 pa; - int pgsz; - size_t bytes; - struct iotlb_entry e; - - pa = sg_phys(sg); - bytes = sg->length; - - flags &= ~IOVMF_PGSZ_MASK; - pgsz = bytes_to_iopgsz(bytes); - if (pgsz < 0) - goto err_out; - flags |= pgsz; - - pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, - i, da, pa, bytes); - - iotlb_init_entry(&e, da, pa, flags); - err = iopgtable_store_entry(obj, &e); - if (err) - goto err_out; - - da += bytes; - } - return 0; - -err_out: - da = new->da_start; - - for_each_sg(sgt->sgl, sg, i, j) { - size_t bytes; - - bytes = iopgtable_clear_entry(obj, da); - - BUG_ON(!iopgsz_ok(bytes)); - - da += bytes; - } - return err; -} - -/* release 'da' <-> 'pa' mapping */ -static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) -{ - u32 start; - size_t total = area->da_end - area->da_start; - - BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); - - start = area->da_start; - while (total > 0) { - size_t bytes; - - bytes = iopgtable_clear_entry(obj, start); - if (bytes == 0) - bytes = PAGE_SIZE; - else - dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", - __func__, start, bytes, area->flags); - - BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); - - total -= bytes; - start += bytes; - } - BUG_ON(total); -} - -/* template function for all unmapping */ -static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, - void (*fn)(const void *), u32 flags) -{ - struct sg_table *sgt = NULL; - struct iovm_struct *area; - - if (!IS_ALIGNED(da, PAGE_SIZE)) { - dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); - return NULL; - } - - mutex_lock(&obj->mmap_lock); - - area = __find_iovm_area(obj, da); - if (!area) { - dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); - goto out; - } - - if ((area->flags & flags) != flags) { - dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, - area->flags); - goto out; - } - sgt = (struct sg_table *)area->sgt; - - unmap_iovm_area(obj, area); - - fn(area->va); - - dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, - area->da_start, da, area->da_end, - area->da_end - area->da_start, area->flags); - - free_iovm_area(obj, area); -out: - mutex_unlock(&obj->mmap_lock); - - return sgt; -} - -static u32 map_iommu_region(struct iommu *obj, u32 da, - const struct sg_table *sgt, void *va, size_t bytes, u32 flags) -{ - int err = -ENOMEM; - struct iovm_struct *new; - - mutex_lock(&obj->mmap_lock); - - new = alloc_iovm_area(obj, da, bytes, flags); - if (IS_ERR(new)) { - err = PTR_ERR(new); - goto err_alloc_iovma; - } - new->va = va; - new->sgt = sgt; - - if (map_iovm_area(obj, new, sgt, new->flags)) - goto err_map; - - mutex_unlock(&obj->mmap_lock); - - dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", - __func__, new->da_start, bytes, new->flags, va); - - return new->da_start; - -err_map: - free_iovm_area(obj, new); -err_alloc_iovma: - mutex_unlock(&obj->mmap_lock); - return err; -} - -static inline u32 __iommu_vmap(struct iommu *obj, u32 da, - const struct sg_table *sgt, void *va, size_t bytes, u32 flags) -{ - return map_iommu_region(obj, da, sgt, va, bytes, flags); -} - -/** - * iommu_vmap - (d)-(p)-(v) address mapper - * @obj: objective iommu - * @sgt: address of scatter gather table - * @flags: iovma and page property - * - * Creates 1-n-1 mapping with given @sgt and returns @da. - * All @sgt element must be io page size aligned. - */ -u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, - u32 flags) -{ - size_t bytes; - void *va = NULL; - - if (!obj || !obj->dev || !sgt) - return -EINVAL; - - bytes = sgtable_len(sgt); - if (!bytes) - return -EINVAL; - bytes = PAGE_ALIGN(bytes); - - if (flags & IOVMF_MMIO) { - va = vmap_sg(sgt); - if (IS_ERR(va)) - return PTR_ERR(va); - } - - flags |= IOVMF_DISCONT; - flags |= IOVMF_MMIO; - - da = __iommu_vmap(obj, da, sgt, va, bytes, flags); - if (IS_ERR_VALUE(da)) - vunmap_sg(va); - - return da; -} -EXPORT_SYMBOL_GPL(iommu_vmap); - -/** - * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' - * @obj: objective iommu - * @da: iommu device virtual address - * - * Free the iommu virtually contiguous memory area starting at - * @da, which was returned by 'iommu_vmap()'. - */ -struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) -{ - struct sg_table *sgt; - /* - * 'sgt' is allocated before 'iommu_vmalloc()' is called. - * Just returns 'sgt' to the caller to free - */ - sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); - if (!sgt) - dev_dbg(obj->dev, "%s: No sgt\n", __func__); - return sgt; -} -EXPORT_SYMBOL_GPL(iommu_vunmap); - -/** - * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper - * @obj: objective iommu - * @da: contiguous iommu virtual memory - * @bytes: allocation size - * @flags: iovma and page property - * - * Allocate @bytes linearly and creates 1-n-1 mapping and returns - * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. - */ -u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) -{ - void *va; - struct sg_table *sgt; - - if (!obj || !obj->dev || !bytes) - return -EINVAL; - - bytes = PAGE_ALIGN(bytes); - - va = vmalloc(bytes); - if (!va) - return -ENOMEM; - - flags |= IOVMF_DISCONT; - flags |= IOVMF_ALLOC; - - sgt = sgtable_alloc(bytes, flags, da, 0); - if (IS_ERR(sgt)) { - da = PTR_ERR(sgt); - goto err_sgt_alloc; - } - sgtable_fill_vmalloc(sgt, va); - - da = __iommu_vmap(obj, da, sgt, va, bytes, flags); - if (IS_ERR_VALUE(da)) - goto err_iommu_vmap; - - return da; - -err_iommu_vmap: - sgtable_drain_vmalloc(sgt); - sgtable_free(sgt); -err_sgt_alloc: - vfree(va); - return da; -} -EXPORT_SYMBOL_GPL(iommu_vmalloc); - -/** - * iommu_vfree - release memory allocated by 'iommu_vmalloc()' - * @obj: objective iommu - * @da: iommu device virtual address - * - * Frees the iommu virtually continuous memory area starting at - * @da, as obtained from 'iommu_vmalloc()'. - */ -void iommu_vfree(struct iommu *obj, const u32 da) -{ - struct sg_table *sgt; - - sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); - if (!sgt) - dev_dbg(obj->dev, "%s: No sgt\n", __func__); - sgtable_free(sgt); -} -EXPORT_SYMBOL_GPL(iommu_vfree); - -static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, - size_t bytes, u32 flags) -{ - struct sg_table *sgt; - - sgt = sgtable_alloc(bytes, flags, da, pa); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); - - sgtable_fill_kmalloc(sgt, pa, da, bytes); - - da = map_iommu_region(obj, da, sgt, va, bytes, flags); - if (IS_ERR_VALUE(da)) { - sgtable_drain_kmalloc(sgt); - sgtable_free(sgt); - } - - return da; -} - -/** - * iommu_kmap - (d)-(p)-(v) address mapper - * @obj: objective iommu - * @da: contiguous iommu virtual memory - * @pa: contiguous physical memory - * @flags: iovma and page property - * - * Creates 1-1-1 mapping and returns @da again, which can be - * adjusted if 'IOVMF_DA_FIXED' is not set. - */ -u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, - u32 flags) -{ - void *va; - - if (!obj || !obj->dev || !bytes) - return -EINVAL; - - bytes = PAGE_ALIGN(bytes); - - va = ioremap(pa, bytes); - if (!va) - return -ENOMEM; - - flags |= IOVMF_LINEAR; - flags |= IOVMF_MMIO; - - da = __iommu_kmap(obj, da, pa, va, bytes, flags); - if (IS_ERR_VALUE(da)) - iounmap(va); - - return da; -} -EXPORT_SYMBOL_GPL(iommu_kmap); - -/** - * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' - * @obj: objective iommu - * @da: iommu device virtual address - * - * Frees the iommu virtually contiguous memory area starting at - * @da, which was passed to and was returned by'iommu_kmap()'. - */ -void iommu_kunmap(struct iommu *obj, u32 da) -{ - struct sg_table *sgt; - typedef void (*func_t)(const void *); - - sgt = unmap_vm_area(obj, da, (func_t)iounmap, - IOVMF_LINEAR | IOVMF_MMIO); - if (!sgt) - dev_dbg(obj->dev, "%s: No sgt\n", __func__); - sgtable_free(sgt); -} -EXPORT_SYMBOL_GPL(iommu_kunmap); - -/** - * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper - * @obj: objective iommu - * @da: contiguous iommu virtual memory - * @bytes: bytes for allocation - * @flags: iovma and page property - * - * Allocate @bytes linearly and creates 1-1-1 mapping and returns - * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. - */ -u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) -{ - void *va; - u32 pa; - - if (!obj || !obj->dev || !bytes) - return -EINVAL; - - bytes = PAGE_ALIGN(bytes); - - va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); - if (!va) - return -ENOMEM; - pa = virt_to_phys(va); - - flags |= IOVMF_LINEAR; - flags |= IOVMF_ALLOC; - - da = __iommu_kmap(obj, da, pa, va, bytes, flags); - if (IS_ERR_VALUE(da)) - kfree(va); - - return da; -} -EXPORT_SYMBOL_GPL(iommu_kmalloc); - -/** - * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' - * @obj: objective iommu - * @da: iommu device virtual address - * - * Frees the iommu virtually contiguous memory area starting at - * @da, which was passed to and was returned by'iommu_kmalloc()'. - */ -void iommu_kfree(struct iommu *obj, u32 da) -{ - struct sg_table *sgt; - - sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); - if (!sgt) - dev_dbg(obj->dev, "%s: No sgt\n", __func__); - sgtable_free(sgt); -} -EXPORT_SYMBOL_GPL(iommu_kfree); - - -static int __init iovmm_init(void) -{ - const unsigned long flags = SLAB_HWCACHE_ALIGN; - struct kmem_cache *p; - - p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, - flags, NULL); - if (!p) - return -ENOMEM; - iovm_area_cachep = p; - - return 0; -} -module_init(iovmm_init); - -static void __exit iovmm_exit(void) -{ - kmem_cache_destroy(iovm_area_cachep); -} -module_exit(iovmm_exit); - -MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); -MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 69ddc9f76c1..ad80112c227 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -29,6 +29,7 @@ #include <linux/kfifo.h> #include <linux/err.h> #include <linux/notifier.h> +#include <linux/module.h> #include <plat/mailbox.h> diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index 6c62af10871..4b15cd7926d 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c @@ -24,45 +24,40 @@ #include <linux/slab.h> #include <plat/mcbsp.h> -#include <plat/omap_device.h> #include <linux/pm_runtime.h> -/* XXX These "sideways" includes are a sign that something is wrong */ -#include "../mach-omap2/cm2xxx_3xxx.h" -#include "../mach-omap2/cm-regbits-34xx.h" - struct omap_mcbsp **mcbsp_ptr; -int omap_mcbsp_count, omap_mcbsp_cache_size; +int omap_mcbsp_count; + +#define omap_mcbsp_check_valid_id(id) (id < omap_mcbsp_count) +#define id_to_mcbsp_ptr(id) mcbsp_ptr[id]; static void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val) { - if (cpu_class_is_omap1()) { - ((u16 *)mcbsp->reg_cache)[reg / sizeof(u16)] = (u16)val; - __raw_writew((u16)val, mcbsp->io_base + reg); - } else if (cpu_is_omap2420()) { - ((u16 *)mcbsp->reg_cache)[reg / sizeof(u32)] = (u16)val; - __raw_writew((u16)val, mcbsp->io_base + reg); + void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step; + + if (mcbsp->pdata->reg_size == 2) { + ((u16 *)mcbsp->reg_cache)[reg] = (u16)val; + __raw_writew((u16)val, addr); } else { - ((u32 *)mcbsp->reg_cache)[reg / sizeof(u32)] = val; - __raw_writel(val, mcbsp->io_base + reg); + ((u32 *)mcbsp->reg_cache)[reg] = val; + __raw_writel(val, addr); } } static int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg, bool from_cache) { - if (cpu_class_is_omap1()) { - return !from_cache ? __raw_readw(mcbsp->io_base + reg) : - ((u16 *)mcbsp->reg_cache)[reg / sizeof(u16)]; - } else if (cpu_is_omap2420()) { - return !from_cache ? __raw_readw(mcbsp->io_base + reg) : - ((u16 *)mcbsp->reg_cache)[reg / sizeof(u32)]; + void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step; + + if (mcbsp->pdata->reg_size == 2) { + return !from_cache ? __raw_readw(addr) : + ((u16 *)mcbsp->reg_cache)[reg]; } else { - return !from_cache ? __raw_readl(mcbsp->io_base + reg) : - ((u32 *)mcbsp->reg_cache)[reg / sizeof(u32)]; + return !from_cache ? __raw_readl(addr) : + ((u32 *)mcbsp->reg_cache)[reg]; } } -#ifdef CONFIG_ARCH_OMAP3 static void omap_mcbsp_st_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val) { __raw_writel(val, mcbsp->st_data->io_base_st + reg); @@ -72,7 +67,6 @@ static int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg) { return __raw_readl(mcbsp->st_data->io_base_st + reg); } -#endif #define MCBSP_READ(mcbsp, reg) \ omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 0) @@ -187,7 +181,7 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config) MCBSP_WRITE(mcbsp, MCR2, config->mcr2); MCBSP_WRITE(mcbsp, MCR1, config->mcr1); MCBSP_WRITE(mcbsp, PCR0, config->pcr0); - if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) { + if (mcbsp->pdata->has_ccr) { MCBSP_WRITE(mcbsp, XCCR, config->xccr); MCBSP_WRITE(mcbsp, RCCR, config->rccr); } @@ -239,46 +233,28 @@ int omap_mcbsp_dma_reg_params(unsigned int id, unsigned int stream) } mcbsp = id_to_mcbsp_ptr(id); - data_reg = mcbsp->phys_dma_base; - - if (mcbsp->mcbsp_config_type < MCBSP_CONFIG_TYPE2) { + if (mcbsp->pdata->reg_size == 2) { if (stream) - data_reg += OMAP_MCBSP_REG_DRR1; + data_reg = OMAP_MCBSP_REG_DRR1; else - data_reg += OMAP_MCBSP_REG_DXR1; + data_reg = OMAP_MCBSP_REG_DXR1; } else { if (stream) - data_reg += OMAP_MCBSP_REG_DRR; + data_reg = OMAP_MCBSP_REG_DRR; else - data_reg += OMAP_MCBSP_REG_DXR; + data_reg = OMAP_MCBSP_REG_DXR; } - return data_reg; + return mcbsp->phys_dma_base + data_reg * mcbsp->pdata->reg_step; } EXPORT_SYMBOL(omap_mcbsp_dma_reg_params); -#ifdef CONFIG_ARCH_OMAP3 -static struct omap_device *find_omap_device_by_dev(struct device *dev) -{ - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); - return container_of(pdev, struct omap_device, pdev); -} - static void omap_st_on(struct omap_mcbsp *mcbsp) { unsigned int w; - struct omap_device *od; - od = find_omap_device_by_dev(mcbsp->dev); - - /* - * Sidetone uses McBSP ICLK - which must not idle when sidetones - * are enabled or sidetones start sounding ugly. - */ - w = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); - w &= ~(1 << (mcbsp->id - 2)); - omap2_cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE); + if (mcbsp->pdata->enable_st_clock) + mcbsp->pdata->enable_st_clock(mcbsp->id, 1); /* Enable McBSP Sidetone */ w = MCBSP_READ(mcbsp, SSELCR); @@ -292,9 +268,6 @@ static void omap_st_on(struct omap_mcbsp *mcbsp) static void omap_st_off(struct omap_mcbsp *mcbsp) { unsigned int w; - struct omap_device *od; - - od = find_omap_device_by_dev(mcbsp->dev); w = MCBSP_ST_READ(mcbsp, SSELCR); MCBSP_ST_WRITE(mcbsp, SSELCR, w & ~(ST_SIDETONEEN)); @@ -302,17 +275,13 @@ static void omap_st_off(struct omap_mcbsp *mcbsp) w = MCBSP_READ(mcbsp, SSELCR); MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN)); - w = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); - w |= 1 << (mcbsp->id - 2); - omap2_cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE); + if (mcbsp->pdata->enable_st_clock) + mcbsp->pdata->enable_st_clock(mcbsp->id, 0); } static void omap_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir) { u16 val, i; - struct omap_device *od; - - od = find_omap_device_by_dev(mcbsp->dev); val = MCBSP_ST_READ(mcbsp, SSELCR); @@ -340,9 +309,6 @@ static void omap_st_chgain(struct omap_mcbsp *mcbsp) { u16 w; struct omap_mcbsp_st_data *st_data = mcbsp->st_data; - struct omap_device *od; - - od = find_omap_device_by_dev(mcbsp->dev); w = MCBSP_ST_READ(mcbsp, SSELCR); @@ -525,14 +491,13 @@ void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) { struct omap_mcbsp *mcbsp; - if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) - return; - if (!omap_mcbsp_check_valid_id(id)) { printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); return; } mcbsp = id_to_mcbsp_ptr(id); + if (mcbsp->pdata->buffer_size == 0) + return; if (threshold && threshold <= mcbsp->max_tx_thres) MCBSP_WRITE(mcbsp, THRSH2, threshold - 1); @@ -548,14 +513,13 @@ void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) { struct omap_mcbsp *mcbsp; - if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) - return; - if (!omap_mcbsp_check_valid_id(id)) { printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); return; } mcbsp = id_to_mcbsp_ptr(id); + if (mcbsp->pdata->buffer_size == 0) + return; if (threshold && threshold <= mcbsp->max_rx_thres) MCBSP_WRITE(mcbsp, THRSH1, threshold - 1); @@ -625,6 +589,8 @@ u16 omap_mcbsp_get_tx_delay(unsigned int id) return -ENODEV; } mcbsp = id_to_mcbsp_ptr(id); + if (mcbsp->pdata->buffer_size == 0) + return 0; /* Returns the number of free locations in the buffer */ buffstat = MCBSP_READ(mcbsp, XBUFFSTAT); @@ -648,6 +614,8 @@ u16 omap_mcbsp_get_rx_delay(unsigned int id) return -ENODEV; } mcbsp = id_to_mcbsp_ptr(id); + if (mcbsp->pdata->buffer_size == 0) + return 0; /* Returns the number of used locations in the buffer */ buffstat = MCBSP_READ(mcbsp, RBUFFSTAT); @@ -683,46 +651,6 @@ int omap_mcbsp_get_dma_op_mode(unsigned int id) } EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode); -static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) -{ - struct omap_device *od; - - od = find_omap_device_by_dev(mcbsp->dev); - /* - * Enable wakup behavior, smart idle and all wakeups - * REVISIT: some wakeups may be unnecessary - */ - if (cpu_is_omap34xx() || cpu_is_omap44xx()) { - MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN); - } -} - -static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) -{ - struct omap_device *od; - - od = find_omap_device_by_dev(mcbsp->dev); - - /* - * Disable wakup behavior, smart idle and all wakeups - */ - if (cpu_is_omap34xx() || cpu_is_omap44xx()) { - /* - * HW bug workaround - If no_idle mode is taken, we need to - * go to smart_idle before going to always_idle, or the - * device will not hit retention anymore. - */ - - MCBSP_WRITE(mcbsp, WAKEUPEN, 0); - } -} -#else -static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {} -static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {} -static inline void omap_st_start(struct omap_mcbsp *mcbsp) {} -static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {} -#endif - int omap_mcbsp_request(unsigned int id) { struct omap_mcbsp *mcbsp; @@ -735,7 +663,7 @@ int omap_mcbsp_request(unsigned int id) } mcbsp = id_to_mcbsp_ptr(id); - reg_cache = kzalloc(omap_mcbsp_cache_size, GFP_KERNEL); + reg_cache = kzalloc(mcbsp->reg_cache_size, GFP_KERNEL); if (!reg_cache) { return -ENOMEM; } @@ -757,8 +685,9 @@ int omap_mcbsp_request(unsigned int id) pm_runtime_get_sync(mcbsp->dev); - /* Do procedure specific to omap34xx arch, if applicable */ - omap34xx_mcbsp_request(mcbsp); + /* Enable wakeup behavior */ + if (mcbsp->pdata->has_wakeup) + MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN); /* * Make sure that transmitter, receiver and sample-rate generator are @@ -795,8 +724,9 @@ err_clk_disable: if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) mcbsp->pdata->ops->free(id); - /* Do procedure specific to omap34xx arch, if applicable */ - omap34xx_mcbsp_free(mcbsp); + /* Disable wakeup behavior */ + if (mcbsp->pdata->has_wakeup) + MCBSP_WRITE(mcbsp, WAKEUPEN, 0); pm_runtime_put_sync(mcbsp->dev); @@ -825,8 +755,9 @@ void omap_mcbsp_free(unsigned int id) if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) mcbsp->pdata->ops->free(id); - /* Do procedure specific to omap34xx arch, if applicable */ - omap34xx_mcbsp_free(mcbsp); + /* Disable wakeup behavior */ + if (mcbsp->pdata->has_wakeup) + MCBSP_WRITE(mcbsp, WAKEUPEN, 0); pm_runtime_put_sync(mcbsp->dev); @@ -866,7 +797,7 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx) } mcbsp = id_to_mcbsp_ptr(id); - if (cpu_is_omap34xx()) + if (mcbsp->st_data) omap_st_start(mcbsp); /* Only enable SRG, if McBSP is master */ @@ -904,7 +835,7 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx) MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 7)); } - if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) { + if (mcbsp->pdata->has_ccr) { /* Release the transmitter and receiver */ w = MCBSP_READ_CACHE(mcbsp, XCCR); w &= ~(tx ? XDISABLE : 0); @@ -934,7 +865,7 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx) /* Reset transmitter */ tx &= 1; - if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) { + if (mcbsp->pdata->has_ccr) { w = MCBSP_READ_CACHE(mcbsp, XCCR); w |= (tx ? XDISABLE : 0); MCBSP_WRITE(mcbsp, XCCR, w); @@ -944,7 +875,7 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx) /* Reset receiver */ rx &= 1; - if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) { + if (mcbsp->pdata->has_ccr) { w = MCBSP_READ_CACHE(mcbsp, RCCR); w |= (rx ? RDISABLE : 0); MCBSP_WRITE(mcbsp, RCCR, w); @@ -961,39 +892,72 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx) MCBSP_WRITE(mcbsp, SPCR2, w & ~(1 << 6)); } - if (cpu_is_omap34xx()) + if (mcbsp->st_data) omap_st_stop(mcbsp); } EXPORT_SYMBOL(omap_mcbsp_stop); -/* - * The following functions are only required on an OMAP1-only build. - * mach-omap2/mcbsp.c contains the real functions - */ -#ifndef CONFIG_ARCH_OMAP2PLUS int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id) { - WARN(1, "%s: should never be called on an OMAP1-only kernel\n", - __func__); - return -EINVAL; + struct omap_mcbsp *mcbsp; + const char *src; + + if (!omap_mcbsp_check_valid_id(id)) { + pr_err("%s: Invalid id (%d)\n", __func__, id + 1); + return -EINVAL; + } + mcbsp = id_to_mcbsp_ptr(id); + + if (fck_src_id == MCBSP_CLKS_PAD_SRC) + src = "clks_ext"; + else if (fck_src_id == MCBSP_CLKS_PRCM_SRC) + src = "clks_fclk"; + else + return -EINVAL; + + if (mcbsp->pdata->set_clk_src) + return mcbsp->pdata->set_clk_src(mcbsp->dev, mcbsp->fclk, src); + else + return -EINVAL; } +EXPORT_SYMBOL(omap2_mcbsp_set_clks_src); void omap2_mcbsp1_mux_clkr_src(u8 mux) { - WARN(1, "%s: should never be called on an OMAP1-only kernel\n", - __func__); - return; + struct omap_mcbsp *mcbsp; + const char *src; + + if (mux == CLKR_SRC_CLKR) + src = "clkr"; + else if (mux == CLKR_SRC_CLKX) + src = "clkx"; + else + return; + + mcbsp = id_to_mcbsp_ptr(0); + if (mcbsp->pdata->mux_signal) + mcbsp->pdata->mux_signal(mcbsp->dev, "clkr", src); } +EXPORT_SYMBOL(omap2_mcbsp1_mux_clkr_src); void omap2_mcbsp1_mux_fsr_src(u8 mux) { - WARN(1, "%s: should never be called on an OMAP1-only kernel\n", - __func__); - return; + struct omap_mcbsp *mcbsp; + const char *src; + + if (mux == FSR_SRC_FSR) + src = "fsr"; + else if (mux == FSR_SRC_FSX) + src = "fsx"; + else + return; + + mcbsp = id_to_mcbsp_ptr(0); + if (mcbsp->pdata->mux_signal) + mcbsp->pdata->mux_signal(mcbsp->dev, "fsr", src); } -#endif +EXPORT_SYMBOL(omap2_mcbsp1_mux_fsr_src); -#ifdef CONFIG_ARCH_OMAP3 #define max_thres(m) (mcbsp->pdata->buffer_size) #define valid_threshold(m, val) ((val) <= max_thres(m)) #define THRESHOLD_PROP_BUILDER(prop) \ @@ -1084,6 +1048,17 @@ unlock: static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store); +static const struct attribute *additional_attrs[] = { + &dev_attr_max_tx_thres.attr, + &dev_attr_max_rx_thres.attr, + &dev_attr_dma_op_mode.attr, + NULL, +}; + +static const struct attribute_group additional_attr_group = { + .attrs = (struct attribute **)additional_attrs, +}; + static ssize_t st_taps_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1142,27 +1117,6 @@ out: static DEVICE_ATTR(st_taps, 0644, st_taps_show, st_taps_store); -static const struct attribute *additional_attrs[] = { - &dev_attr_max_tx_thres.attr, - &dev_attr_max_rx_thres.attr, - &dev_attr_dma_op_mode.attr, - NULL, -}; - -static const struct attribute_group additional_attr_group = { - .attrs = (struct attribute **)additional_attrs, -}; - -static inline int __devinit omap_additional_add(struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &additional_attr_group); -} - -static inline void __devexit omap_additional_remove(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &additional_attr_group); -} - static const struct attribute *sidetone_attrs[] = { &dev_attr_st_taps.attr, NULL, @@ -1172,10 +1126,9 @@ static const struct attribute_group sidetone_attr_group = { .attrs = (struct attribute **)sidetone_attrs, }; -static int __devinit omap_st_add(struct omap_mcbsp *mcbsp) +static int __devinit omap_st_add(struct omap_mcbsp *mcbsp, + struct resource *res) { - struct platform_device *pdev; - struct resource *res; struct omap_mcbsp_st_data *st_data; int err; @@ -1185,9 +1138,6 @@ static int __devinit omap_st_add(struct omap_mcbsp *mcbsp) goto err1; } - pdev = container_of(mcbsp->dev, struct platform_device, dev); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sidetone"); st_data->io_base_st = ioremap(res->start, resource_size(res)); if (!st_data->io_base_st) { err = -ENOMEM; @@ -1214,59 +1164,10 @@ static void __devexit omap_st_remove(struct omap_mcbsp *mcbsp) { struct omap_mcbsp_st_data *st_data = mcbsp->st_data; - if (st_data) { - sysfs_remove_group(&mcbsp->dev->kobj, &sidetone_attr_group); - iounmap(st_data->io_base_st); - kfree(st_data); - } -} - -static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) -{ - mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT; - if (cpu_is_omap34xx()) { - /* - * Initially configure the maximum thresholds to a safe value. - * The McBSP FIFO usage with these values should not go under - * 16 locations. - * If the whole FIFO without safety buffer is used, than there - * is a possibility that the DMA will be not able to push the - * new data on time, causing channel shifts in runtime. - */ - mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10; - mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10; - /* - * REVISIT: Set dmap_op_mode to THRESHOLD as default - * for mcbsp2 instances. - */ - if (omap_additional_add(mcbsp->dev)) - dev_warn(mcbsp->dev, - "Unable to create additional controls\n"); - - if (mcbsp->id == 2 || mcbsp->id == 3) - if (omap_st_add(mcbsp)) - dev_warn(mcbsp->dev, - "Unable to create sidetone controls\n"); - - } else { - mcbsp->max_tx_thres = -EINVAL; - mcbsp->max_rx_thres = -EINVAL; - } -} - -static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) -{ - if (cpu_is_omap34xx()) { - omap_additional_remove(mcbsp->dev); - - if (mcbsp->id == 2 || mcbsp->id == 3) - omap_st_remove(mcbsp); - } + sysfs_remove_group(&mcbsp->dev->kobj, &sidetone_attr_group); + iounmap(st_data->io_base_st); + kfree(st_data); } -#else -static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {} -static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) {} -#endif /* CONFIG_ARCH_OMAP3 */ /* * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. @@ -1316,7 +1217,7 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) } } mcbsp->phys_base = res->start; - omap_mcbsp_cache_size = resource_size(res); + mcbsp->reg_cache_size = resource_size(res); mcbsp->io_base = ioremap(res->start, resource_size(res)); if (!mcbsp->io_base) { ret = -ENOMEM; @@ -1364,15 +1265,52 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) mcbsp->pdata = pdata; mcbsp->dev = &pdev->dev; mcbsp_ptr[id] = mcbsp; - mcbsp->mcbsp_config_type = pdata->mcbsp_config_type; platform_set_drvdata(pdev, mcbsp); pm_runtime_enable(mcbsp->dev); - /* Initialize mcbsp properties for OMAP34XX if needed / applicable */ - omap34xx_device_init(mcbsp); + mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT; + if (mcbsp->pdata->buffer_size) { + /* + * Initially configure the maximum thresholds to a safe value. + * The McBSP FIFO usage with these values should not go under + * 16 locations. + * If the whole FIFO without safety buffer is used, than there + * is a possibility that the DMA will be not able to push the + * new data on time, causing channel shifts in runtime. + */ + mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10; + mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10; + + ret = sysfs_create_group(&mcbsp->dev->kobj, + &additional_attr_group); + if (ret) { + dev_err(mcbsp->dev, + "Unable to create additional controls\n"); + goto err_thres; + } + } else { + mcbsp->max_tx_thres = -EINVAL; + mcbsp->max_rx_thres = -EINVAL; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sidetone"); + if (res) { + ret = omap_st_add(mcbsp, res); + if (ret) { + dev_err(mcbsp->dev, + "Unable to create sidetone controls\n"); + goto err_st; + } + } return 0; +err_st: + if (mcbsp->pdata->buffer_size) + sysfs_remove_group(&mcbsp->dev->kobj, + &additional_attr_group); +err_thres: + clk_put(mcbsp->fclk); err_res: iounmap(mcbsp->io_base); err_ioremap: @@ -1392,7 +1330,12 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev) mcbsp->pdata->ops->free) mcbsp->pdata->ops->free(mcbsp->id); - omap34xx_device_exit(mcbsp); + if (mcbsp->pdata->buffer_size) + sysfs_remove_group(&mcbsp->dev->kobj, + &additional_attr_group); + + if (mcbsp->st_data) + omap_st_remove(mcbsp); clk_put(mcbsp->fclk); diff --git a/arch/arm/plat-omap/omap-pm-noop.c b/arch/arm/plat-omap/omap-pm-noop.c index b0471bb2d47..3dc3801aace 100644 --- a/arch/arm/plat-omap/omap-pm-noop.c +++ b/arch/arm/plat-omap/omap-pm-noop.c @@ -27,7 +27,7 @@ #include <plat/omap_device.h> static bool off_mode_enabled; -static u32 dummy_context_loss_counter; +static int dummy_context_loss_counter; /* * Device-driver-originated constraints (via board-*.c files) @@ -311,22 +311,32 @@ void omap_pm_disable_off_mode(void) #ifdef CONFIG_ARCH_OMAP2PLUS -u32 omap_pm_get_dev_context_loss_count(struct device *dev) +int omap_pm_get_dev_context_loss_count(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - u32 count; + int count; if (WARN_ON(!dev)) - return 0; + return -ENODEV; if (dev->parent == &omap_device_parent) { count = omap_device_get_context_loss_count(pdev); } else { WARN_ONCE(off_mode_enabled, "omap_pm: using dummy context loss counter; device %s should be converted to omap_device", dev_name(dev)); - if (off_mode_enabled) - dummy_context_loss_counter++; + count = dummy_context_loss_counter; + + if (off_mode_enabled) { + count++; + /* + * Context loss count has to be a non-negative value. + * Clear the sign bit to get a value range from 0 to + * INT_MAX. + */ + count &= INT_MAX; + dummy_context_loss_counter = count; + } } pr_debug("OMAP PM: context loss count for dev %s = %d\n", @@ -337,7 +347,7 @@ u32 omap_pm_get_dev_context_loss_count(struct device *dev) #else -u32 omap_pm_get_dev_context_loss_count(struct device *dev) +int omap_pm_get_dev_context_loss_count(struct device *dev) { return dummy_context_loss_counter; } diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index 02609eee056..e8d98693d2d 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -78,6 +78,7 @@ #undef DEBUG #include <linux/kernel.h> +#include <linux/export.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> @@ -85,6 +86,8 @@ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/notifier.h> #include <plat/omap_device.h> #include <plat/omap_hwmod.h> @@ -94,6 +97,23 @@ #define USE_WAKEUP_LAT 0 #define IGNORE_WAKEUP_LAT 1 +static int omap_device_register(struct platform_device *pdev); +static int omap_early_device_register(struct platform_device *pdev); +static struct omap_device *omap_device_alloc(struct platform_device *pdev, + struct omap_hwmod **ohs, int oh_cnt, + struct omap_device_pm_latency *pm_lats, + int pm_lats_cnt); +static void omap_device_delete(struct omap_device *od); + + +static struct omap_device_pm_latency omap_default_latency[] = { + { + .deactivate_func = omap_device_idle_hwmods, + .activate_func = omap_device_enable_hwmods, + .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, + } +}; + /* Private functions */ /** @@ -114,7 +134,7 @@ static int _omap_device_activate(struct omap_device *od, u8 ignore_lat) { struct timespec a, b, c; - pr_debug("omap_device: %s: activating\n", od->pdev.name); + dev_dbg(&od->pdev->dev, "omap_device: activating\n"); while (od->pm_lat_level > 0) { struct omap_device_pm_latency *odpl; @@ -138,25 +158,24 @@ static int _omap_device_activate(struct omap_device *od, u8 ignore_lat) c = timespec_sub(b, a); act_lat = timespec_to_ns(&c); - pr_debug("omap_device: %s: pm_lat %d: activate: elapsed time " - "%llu nsec\n", od->pdev.name, od->pm_lat_level, - act_lat); + dev_dbg(&od->pdev->dev, + "omap_device: pm_lat %d: activate: elapsed time " + "%llu nsec\n", od->pm_lat_level, act_lat); if (act_lat > odpl->activate_lat) { odpl->activate_lat_worst = act_lat; if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) { odpl->activate_lat = act_lat; - pr_warning("omap_device: %s.%d: new worst case " - "activate latency %d: %llu\n", - od->pdev.name, od->pdev.id, - od->pm_lat_level, act_lat); + dev_dbg(&od->pdev->dev, + "new worst case activate latency " + "%d: %llu\n", + od->pm_lat_level, act_lat); } else - pr_warning("omap_device: %s.%d: activate " - "latency %d higher than exptected. " - "(%llu > %d)\n", - od->pdev.name, od->pdev.id, - od->pm_lat_level, act_lat, - odpl->activate_lat); + dev_warn(&od->pdev->dev, + "activate latency %d " + "higher than exptected. (%llu > %d)\n", + od->pm_lat_level, act_lat, + odpl->activate_lat); } od->dev_wakeup_lat -= odpl->activate_lat; @@ -183,7 +202,7 @@ static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat) { struct timespec a, b, c; - pr_debug("omap_device: %s: deactivating\n", od->pdev.name); + dev_dbg(&od->pdev->dev, "omap_device: deactivating\n"); while (od->pm_lat_level < od->pm_lats_cnt) { struct omap_device_pm_latency *odpl; @@ -206,28 +225,26 @@ static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat) c = timespec_sub(b, a); deact_lat = timespec_to_ns(&c); - pr_debug("omap_device: %s: pm_lat %d: deactivate: elapsed time " - "%llu nsec\n", od->pdev.name, od->pm_lat_level, - deact_lat); + dev_dbg(&od->pdev->dev, + "omap_device: pm_lat %d: deactivate: elapsed time " + "%llu nsec\n", od->pm_lat_level, deact_lat); if (deact_lat > odpl->deactivate_lat) { odpl->deactivate_lat_worst = deact_lat; if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) { odpl->deactivate_lat = deact_lat; - pr_warning("omap_device: %s.%d: new worst case " - "deactivate latency %d: %llu\n", - od->pdev.name, od->pdev.id, - od->pm_lat_level, deact_lat); + dev_dbg(&od->pdev->dev, + "new worst case deactivate latency " + "%d: %llu\n", + od->pm_lat_level, deact_lat); } else - pr_warning("omap_device: %s.%d: deactivate " - "latency %d higher than exptected. " - "(%llu > %d)\n", - od->pdev.name, od->pdev.id, - od->pm_lat_level, deact_lat, - odpl->deactivate_lat); + dev_warn(&od->pdev->dev, + "deactivate latency %d " + "higher than exptected. (%llu > %d)\n", + od->pm_lat_level, deact_lat, + odpl->deactivate_lat); } - od->dev_wakeup_lat += odpl->activate_lat; od->pm_lat_level++; @@ -245,28 +262,27 @@ static void _add_clkdev(struct omap_device *od, const char *clk_alias, if (!clk_alias || !clk_name) return; - pr_debug("omap_device: %s: Creating %s -> %s\n", - dev_name(&od->pdev.dev), clk_alias, clk_name); + dev_dbg(&od->pdev->dev, "Creating %s -> %s\n", clk_alias, clk_name); - r = clk_get_sys(dev_name(&od->pdev.dev), clk_alias); + r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias); if (!IS_ERR(r)) { - pr_warning("omap_device: %s: alias %s already exists\n", - dev_name(&od->pdev.dev), clk_alias); + dev_warn(&od->pdev->dev, + "alias %s already exists\n", clk_alias); clk_put(r); return; } r = omap_clk_get_by_name(clk_name); if (IS_ERR(r)) { - pr_err("omap_device: %s: omap_clk_get_by_name for %s failed\n", - dev_name(&od->pdev.dev), clk_name); + dev_err(&od->pdev->dev, + "omap_clk_get_by_name for %s failed\n", clk_name); return; } - l = clkdev_alloc(r, clk_alias, dev_name(&od->pdev.dev)); + l = clkdev_alloc(r, clk_alias, dev_name(&od->pdev->dev)); if (!l) { - pr_err("omap_device: %s: clkdev_alloc for %s failed\n", - dev_name(&od->pdev.dev), clk_alias); + dev_err(&od->pdev->dev, + "clkdev_alloc for %s failed\n", clk_alias); return; } @@ -304,6 +320,96 @@ static void _add_hwmod_clocks_clkdev(struct omap_device *od, } +static struct dev_pm_domain omap_device_pm_domain; + +/** + * omap_device_build_from_dt - build an omap_device with multiple hwmods + * @pdev_name: name of the platform_device driver to use + * @pdev_id: this platform_device's connection ID + * @oh: ptr to the single omap_hwmod that backs this omap_device + * @pdata: platform_data ptr to associate with the platform_device + * @pdata_len: amount of memory pointed to by @pdata + * @pm_lats: pointer to a omap_device_pm_latency array for this device + * @pm_lats_cnt: ARRAY_SIZE() of @pm_lats + * @is_early_device: should the device be registered as an early device or not + * + * Function for building an omap_device already registered from device-tree + * + * Returns 0 or PTR_ERR() on error. + */ +static int omap_device_build_from_dt(struct platform_device *pdev) +{ + struct omap_hwmod **hwmods; + struct omap_device *od; + struct omap_hwmod *oh; + struct device_node *node = pdev->dev.of_node; + const char *oh_name; + int oh_cnt, i, ret = 0; + + oh_cnt = of_property_count_strings(node, "ti,hwmods"); + if (!oh_cnt || IS_ERR_VALUE(oh_cnt)) { + dev_warn(&pdev->dev, "No 'hwmods' to build omap_device\n"); + return -ENODEV; + } + + hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL); + if (!hwmods) { + ret = -ENOMEM; + goto odbfd_exit; + } + + for (i = 0; i < oh_cnt; i++) { + of_property_read_string_index(node, "ti,hwmods", i, &oh_name); + oh = omap_hwmod_lookup(oh_name); + if (!oh) { + dev_err(&pdev->dev, "Cannot lookup hwmod '%s'\n", + oh_name); + ret = -EINVAL; + goto odbfd_exit1; + } + hwmods[i] = oh; + } + + od = omap_device_alloc(pdev, hwmods, oh_cnt, NULL, 0); + if (!od) { + dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n", + oh_name); + ret = PTR_ERR(od); + goto odbfd_exit1; + } + + if (of_get_property(node, "ti,no_idle_on_suspend", NULL)) + omap_device_disable_idle_on_suspend(pdev); + + pdev->dev.pm_domain = &omap_device_pm_domain; + +odbfd_exit1: + kfree(hwmods); +odbfd_exit: + return ret; +} + +static int _omap_device_notifier_call(struct notifier_block *nb, + unsigned long event, void *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + switch (event) { + case BUS_NOTIFY_ADD_DEVICE: + if (pdev->dev.of_node) + omap_device_build_from_dt(pdev); + break; + + case BUS_NOTIFY_DEL_DEVICE: + if (pdev->archdata.od) + omap_device_delete(pdev->archdata.od); + break; + } + + return NOTIFY_DONE; +} + + /* Public functions for use by core code */ /** @@ -321,7 +427,7 @@ static void _add_hwmod_clocks_clkdev(struct omap_device *od, * return the context loss counter for that hwmod, otherwise return * zero. */ -u32 omap_device_get_context_loss_count(struct platform_device *pdev) +int omap_device_get_context_loss_count(struct platform_device *pdev) { struct omap_device *od; u32 ret = 0; @@ -343,7 +449,7 @@ u32 omap_device_get_context_loss_count(struct platform_device *pdev) * much memory to allocate before calling * omap_device_fill_resources(). Returns the count. */ -int omap_device_count_resources(struct omap_device *od) +static int omap_device_count_resources(struct omap_device *od) { int c = 0; int i; @@ -352,7 +458,7 @@ int omap_device_count_resources(struct omap_device *od) c += omap_hwmod_count_resources(od->hwmods[i]); pr_debug("omap_device: %s: counted %d total resources across %d " - "hwmods\n", od->pdev.name, c, od->hwmods_cnt); + "hwmods\n", od->pdev->name, c, od->hwmods_cnt); return c; } @@ -374,7 +480,8 @@ int omap_device_count_resources(struct omap_device *od) * functions to get device resources. Hacking around the existing * platform_device code wastes memory. Returns 0. */ -int omap_device_fill_resources(struct omap_device *od, struct resource *res) +static int omap_device_fill_resources(struct omap_device *od, + struct resource *res) { int c = 0; int i, r; @@ -389,6 +496,113 @@ int omap_device_fill_resources(struct omap_device *od, struct resource *res) } /** + * omap_device_alloc - allocate an omap_device + * @pdev: platform_device that will be included in this omap_device + * @oh: ptr to the single omap_hwmod that backs this omap_device + * @pdata: platform_data ptr to associate with the platform_device + * @pdata_len: amount of memory pointed to by @pdata + * @pm_lats: pointer to a omap_device_pm_latency array for this device + * @pm_lats_cnt: ARRAY_SIZE() of @pm_lats + * + * Convenience function for allocating an omap_device structure and filling + * hwmods, resources and pm_latency attributes. + * + * Returns an struct omap_device pointer or ERR_PTR() on error; + */ +static struct omap_device *omap_device_alloc(struct platform_device *pdev, + struct omap_hwmod **ohs, int oh_cnt, + struct omap_device_pm_latency *pm_lats, + int pm_lats_cnt) +{ + int ret = -ENOMEM; + struct omap_device *od; + struct resource *res = NULL; + int i, res_count; + struct omap_hwmod **hwmods; + + od = kzalloc(sizeof(struct omap_device), GFP_KERNEL); + if (!od) { + ret = -ENOMEM; + goto oda_exit1; + } + od->hwmods_cnt = oh_cnt; + + hwmods = kmemdup(ohs, sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL); + if (!hwmods) + goto oda_exit2; + + od->hwmods = hwmods; + od->pdev = pdev; + + /* + * HACK: Ideally the resources from DT should match, and hwmod + * should just add the missing ones. Since the name is not + * properly populated by DT, stick to hwmod resources only. + */ + if (pdev->num_resources && pdev->resource) + dev_warn(&pdev->dev, "%s(): resources already allocated %d\n", + __func__, pdev->num_resources); + + res_count = omap_device_count_resources(od); + if (res_count > 0) { + dev_dbg(&pdev->dev, "%s(): resources allocated from hwmod %d\n", + __func__, res_count); + res = kzalloc(sizeof(struct resource) * res_count, GFP_KERNEL); + if (!res) + goto oda_exit3; + + omap_device_fill_resources(od, res); + + ret = platform_device_add_resources(pdev, res, res_count); + kfree(res); + + if (ret) + goto oda_exit3; + } + + if (!pm_lats) { + pm_lats = omap_default_latency; + pm_lats_cnt = ARRAY_SIZE(omap_default_latency); + } + + od->pm_lats_cnt = pm_lats_cnt; + od->pm_lats = kmemdup(pm_lats, + sizeof(struct omap_device_pm_latency) * pm_lats_cnt, + GFP_KERNEL); + if (!od->pm_lats) + goto oda_exit3; + + pdev->archdata.od = od; + + for (i = 0; i < oh_cnt; i++) { + hwmods[i]->od = od; + _add_hwmod_clocks_clkdev(od, hwmods[i]); + } + + return od; + +oda_exit3: + kfree(hwmods); +oda_exit2: + kfree(od); +oda_exit1: + dev_err(&pdev->dev, "omap_device: build failed (%d)\n", ret); + + return ERR_PTR(ret); +} + +static void omap_device_delete(struct omap_device *od) +{ + if (!od) + return; + + od->pdev->archdata.od = NULL; + kfree(od->pm_lats); + kfree(od->hwmods); + kfree(od); +} + +/** * omap_device_build - build and register an omap_device with one omap_hwmod * @pdev_name: name of the platform_device driver to use * @pdev_id: this platform_device's connection ID @@ -405,7 +619,7 @@ int omap_device_fill_resources(struct omap_device *od, struct resource *res) * information. Returns ERR_PTR(-EINVAL) if @oh is NULL; otherwise, * passes along the return value of omap_device_build_ss(). */ -struct omap_device *omap_device_build(const char *pdev_name, int pdev_id, +struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, struct omap_hwmod *oh, void *pdata, int pdata_len, struct omap_device_pm_latency *pm_lats, @@ -438,18 +652,15 @@ struct omap_device *omap_device_build(const char *pdev_name, int pdev_id, * platform_device record. Returns an ERR_PTR() on error, or passes * along the return value of omap_device_register(). */ -struct omap_device *omap_device_build_ss(const char *pdev_name, int pdev_id, +struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id, struct omap_hwmod **ohs, int oh_cnt, void *pdata, int pdata_len, struct omap_device_pm_latency *pm_lats, int pm_lats_cnt, int is_early_device) { int ret = -ENOMEM; + struct platform_device *pdev; struct omap_device *od; - char *pdev_name2; - struct resource *res = NULL; - int i, res_count; - struct omap_hwmod **hwmods; if (!ohs || oh_cnt == 0 || !pdev_name) return ERR_PTR(-EINVAL); @@ -457,72 +668,40 @@ struct omap_device *omap_device_build_ss(const char *pdev_name, int pdev_id, if (!pdata && pdata_len > 0) return ERR_PTR(-EINVAL); - pr_debug("omap_device: %s: building with %d hwmods\n", pdev_name, - oh_cnt); - - od = kzalloc(sizeof(struct omap_device), GFP_KERNEL); - if (!od) - return ERR_PTR(-ENOMEM); + pdev = platform_device_alloc(pdev_name, pdev_id); + if (!pdev) { + ret = -ENOMEM; + goto odbs_exit; + } - od->hwmods_cnt = oh_cnt; + /* Set the dev_name early to allow dev_xxx in omap_device_alloc */ + if (pdev->id != -1) + dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); + else + dev_set_name(&pdev->dev, "%s", pdev->name); - hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, - GFP_KERNEL); - if (!hwmods) + od = omap_device_alloc(pdev, ohs, oh_cnt, pm_lats, pm_lats_cnt); + if (!od) goto odbs_exit1; - memcpy(hwmods, ohs, sizeof(struct omap_hwmod *) * oh_cnt); - od->hwmods = hwmods; - - pdev_name2 = kzalloc(strlen(pdev_name) + 1, GFP_KERNEL); - if (!pdev_name2) - goto odbs_exit2; - strcpy(pdev_name2, pdev_name); - - od->pdev.name = pdev_name2; - od->pdev.id = pdev_id; - - res_count = omap_device_count_resources(od); - if (res_count > 0) { - res = kzalloc(sizeof(struct resource) * res_count, GFP_KERNEL); - if (!res) - goto odbs_exit3; - } - omap_device_fill_resources(od, res); - - od->pdev.num_resources = res_count; - od->pdev.resource = res; - - ret = platform_device_add_data(&od->pdev, pdata, pdata_len); + ret = platform_device_add_data(pdev, pdata, pdata_len); if (ret) - goto odbs_exit4; - - od->pm_lats = pm_lats; - od->pm_lats_cnt = pm_lats_cnt; + goto odbs_exit2; if (is_early_device) - ret = omap_early_device_register(od); + ret = omap_early_device_register(pdev); else - ret = omap_device_register(od); - - for (i = 0; i < oh_cnt; i++) { - hwmods[i]->od = od; - _add_hwmod_clocks_clkdev(od, hwmods[i]); - } - + ret = omap_device_register(pdev); if (ret) - goto odbs_exit4; + goto odbs_exit2; - return od; + return pdev; -odbs_exit4: - kfree(res); -odbs_exit3: - kfree(pdev_name2); odbs_exit2: - kfree(hwmods); + omap_device_delete(od); odbs_exit1: - kfree(od); + platform_device_put(pdev); +odbs_exit: pr_err("omap_device: %s: build failed (%d)\n", pdev_name, ret); @@ -538,11 +717,11 @@ odbs_exit1: * platform_early_add_device() on the underlying platform_device. * Returns 0 by default. */ -int omap_early_device_register(struct omap_device *od) +static int omap_early_device_register(struct platform_device *pdev) { struct platform_device *devices[1]; - devices[0] = &(od->pdev); + devices[0] = pdev; early_platform_add_devices(devices, 1); return 0; } @@ -638,13 +817,13 @@ static struct dev_pm_domain omap_device_pm_domain = { * platform_device_register() on the underlying platform_device. * Returns the return value of platform_device_register(). */ -int omap_device_register(struct omap_device *od) +static int omap_device_register(struct platform_device *pdev) { - pr_debug("omap_device: %s: registering\n", od->pdev.name); + pr_debug("omap_device: %s: registering\n", pdev->name); - od->pdev.dev.parent = &omap_device_parent; - od->pdev.dev.pm_domain = &omap_device_pm_domain; - return platform_device_register(&od->pdev); + pdev->dev.parent = &omap_device_parent; + pdev->dev.pm_domain = &omap_device_pm_domain; + return platform_device_add(pdev); } @@ -671,8 +850,9 @@ int omap_device_enable(struct platform_device *pdev) od = to_omap_device(pdev); if (od->_state == OMAP_DEVICE_STATE_ENABLED) { - WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n", - od->pdev.name, od->pdev.id, __func__, od->_state); + dev_warn(&pdev->dev, + "omap_device: %s() called from invalid state %d\n", + __func__, od->_state); return -EINVAL; } @@ -710,8 +890,9 @@ int omap_device_idle(struct platform_device *pdev) od = to_omap_device(pdev); if (od->_state != OMAP_DEVICE_STATE_ENABLED) { - WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n", - od->pdev.name, od->pdev.id, __func__, od->_state); + dev_warn(&pdev->dev, + "omap_device: %s() called from invalid state %d\n", + __func__, od->_state); return -EINVAL; } @@ -742,8 +923,9 @@ int omap_device_shutdown(struct platform_device *pdev) if (od->_state != OMAP_DEVICE_STATE_ENABLED && od->_state != OMAP_DEVICE_STATE_IDLE) { - WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n", - od->pdev.name, od->pdev.id, __func__, od->_state); + dev_warn(&pdev->dev, + "omap_device: %s() called from invalid state %d\n", + __func__, od->_state); return -EINVAL; } @@ -837,6 +1019,42 @@ void __iomem *omap_device_get_rt_va(struct omap_device *od) return omap_hwmod_get_mpu_rt_va(od->hwmods[0]); } +/** + * omap_device_get_by_hwmod_name() - convert a hwmod name to + * device pointer. + * @oh_name: name of the hwmod device + * + * Returns back a struct device * pointer associated with a hwmod + * device represented by a hwmod_name + */ +struct device *omap_device_get_by_hwmod_name(const char *oh_name) +{ + struct omap_hwmod *oh; + + if (!oh_name) { + WARN(1, "%s: no hwmod name!\n", __func__); + return ERR_PTR(-EINVAL); + } + + oh = omap_hwmod_lookup(oh_name); + if (IS_ERR_OR_NULL(oh)) { + WARN(1, "%s: no hwmod for %s\n", __func__, + oh_name); + return ERR_PTR(oh ? PTR_ERR(oh) : -ENODEV); + } + if (IS_ERR_OR_NULL(oh->od)) { + WARN(1, "%s: no omap_device for %s\n", __func__, + oh_name); + return ERR_PTR(oh->od ? PTR_ERR(oh->od) : -ENODEV); + } + + if (IS_ERR_OR_NULL(oh->od->pdev)) + return ERR_PTR(oh->od->pdev ? PTR_ERR(oh->od->pdev) : -ENODEV); + + return &oh->od->pdev->dev; +} +EXPORT_SYMBOL(omap_device_get_by_hwmod_name); + /* * Public functions intended for use in omap_device_pm_latency * .activate_func and .deactivate_func function pointers @@ -917,8 +1135,13 @@ struct device omap_device_parent = { .parent = &platform_bus, }; +static struct notifier_block platform_nb = { + .notifier_call = _omap_device_notifier_call, +}; + static int __init omap_device_init(void) { + bus_register_notifier(&platform_bus_type, &platform_nb); return device_register(&omap_device_parent); } core_initcall(omap_device_init); diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 363c91e44ef..8b28664d1c6 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -19,7 +19,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/omapfb.h> #include <asm/tlb.h> #include <asm/cacheflush.h> @@ -29,10 +28,8 @@ #include <plat/sram.h> #include <plat/board.h> #include <plat/cpu.h> -#include <plat/vram.h> #include "sram.h" -#include "fb.h" /* XXX These "sideways" includes are a sign that something is wrong */ #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) @@ -41,16 +38,9 @@ #endif #define OMAP1_SRAM_PA 0x20000000 -#define OMAP1_SRAM_VA VMALLOC_END #define OMAP2_SRAM_PUB_PA (OMAP2_SRAM_PA + 0xf800) -#define OMAP2_SRAM_VA 0xfe400000 -#define OMAP2_SRAM_PUB_VA (OMAP2_SRAM_VA + 0x800) -#define OMAP3_SRAM_VA 0xfe400000 #define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000) -#define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000) -#define OMAP4_SRAM_VA 0xfe400000 #define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000) -#define OMAP4_SRAM_PUB_VA (OMAP4_SRAM_VA + 0x4000) #if defined(CONFIG_ARCH_OMAP2PLUS) #define SRAM_BOOTLOADER_SZ 0x00 @@ -73,9 +63,9 @@ #define ROUND_DOWN(value,boundary) ((value) & (~((boundary)-1))) static unsigned long omap_sram_start; -static unsigned long omap_sram_base; +static void __iomem *omap_sram_base; static unsigned long omap_sram_size; -static unsigned long omap_sram_ceil; +static void __iomem *omap_sram_ceil; /* * Depending on the target RAMFS firewall setup, the public usable amount of @@ -112,12 +102,9 @@ static int is_sram_locked(void) */ static void __init omap_detect_sram(void) { - unsigned long reserved; - if (cpu_class_is_omap2()) { if (is_sram_locked()) { if (cpu_is_omap34xx()) { - omap_sram_base = OMAP3_SRAM_PUB_VA; omap_sram_start = OMAP3_SRAM_PUB_PA; if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { @@ -126,25 +113,20 @@ static void __init omap_detect_sram(void) omap_sram_size = 0x8000; /* 32K */ } } else if (cpu_is_omap44xx()) { - omap_sram_base = OMAP4_SRAM_PUB_VA; omap_sram_start = OMAP4_SRAM_PUB_PA; omap_sram_size = 0xa000; /* 40K */ } else { - omap_sram_base = OMAP2_SRAM_PUB_VA; omap_sram_start = OMAP2_SRAM_PUB_PA; omap_sram_size = 0x800; /* 2K */ } } else { if (cpu_is_omap34xx()) { - omap_sram_base = OMAP3_SRAM_VA; omap_sram_start = OMAP3_SRAM_PA; omap_sram_size = 0x10000; /* 64K */ } else if (cpu_is_omap44xx()) { - omap_sram_base = OMAP4_SRAM_VA; omap_sram_start = OMAP4_SRAM_PA; omap_sram_size = 0xe000; /* 56K */ } else { - omap_sram_base = OMAP2_SRAM_VA; omap_sram_start = OMAP2_SRAM_PA; if (cpu_is_omap242x()) omap_sram_size = 0xa0000; /* 640K */ @@ -153,7 +135,6 @@ static void __init omap_detect_sram(void) } } } else { - omap_sram_base = OMAP1_SRAM_VA; omap_sram_start = OMAP1_SRAM_PA; if (cpu_is_omap7xx()) @@ -170,35 +151,14 @@ static void __init omap_detect_sram(void) omap_sram_size = 0x4000; } } - reserved = omapfb_reserve_sram(omap_sram_start, omap_sram_base, - omap_sram_size, - omap_sram_start + SRAM_BOOTLOADER_SZ, - omap_sram_size - SRAM_BOOTLOADER_SZ); - omap_sram_size -= reserved; - - reserved = omap_vram_reserve_sram(omap_sram_start, omap_sram_base, - omap_sram_size, - omap_sram_start + SRAM_BOOTLOADER_SZ, - omap_sram_size - SRAM_BOOTLOADER_SZ); - omap_sram_size -= reserved; - - omap_sram_ceil = omap_sram_base + omap_sram_size; } -static struct map_desc omap_sram_io_desc[] __initdata = { - { /* .length gets filled in at runtime */ - .virtual = OMAP1_SRAM_VA, - .pfn = __phys_to_pfn(OMAP1_SRAM_PA), - .type = MT_MEMORY - } -}; - /* * Note that we cannot use ioremap for SRAM, as clock init needs SRAM early. */ static void __init omap_map_sram(void) { - unsigned long base; + int cached = 1; if (omap_sram_size == 0) return; @@ -211,28 +171,18 @@ static void __init omap_map_sram(void) * the ARM may attempt to write cache lines back to SDRAM * which will cause the system to hang. */ - omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; + cached = 0; } - omap_sram_io_desc[0].virtual = omap_sram_base; - base = omap_sram_start; - base = ROUND_DOWN(base, PAGE_SIZE); - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); - omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); - iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); - - pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n", - (long long) __pfn_to_phys(omap_sram_io_desc[0].pfn), - omap_sram_io_desc[0].virtual, - omap_sram_io_desc[0].length); + omap_sram_start = ROUND_DOWN(omap_sram_start, PAGE_SIZE); + omap_sram_base = __arm_ioremap_exec(omap_sram_start, omap_sram_size, + cached); + if (!omap_sram_base) { + pr_err("SRAM: Could not map\n"); + return; + } - /* - * Normally devicemaps_init() would flush caches and tlb after - * mdesc->map_io(), but since we're called from map_io(), we - * must do it here. - */ - local_flush_tlb_all(); - flush_cache_all(); + omap_sram_ceil = omap_sram_base + omap_sram_size; /* * Looks like we need to preserve some bootloader code at the @@ -251,13 +201,18 @@ static void __init omap_map_sram(void) */ void *omap_sram_push_address(unsigned long size) { - if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) { + unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; + + available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ); + + if (size > available) { pr_err("Not enough space in SRAM\n"); return NULL; } - omap_sram_ceil -= size; - omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, FNCPY_ALIGN); + new_ceil -= size; + new_ceil = ROUND_DOWN(new_ceil, FNCPY_ALIGN); + omap_sram_ceil = IOMEM(new_ceil); return (void *)omap_sram_ceil; } |