From f00a75c094c340c4e7435665816c3273c870e849 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Oct 2009 15:17:45 +0100 Subject: ARM: Pass VMA to copy_user_highpage() implementations Our copy_user_highpage() implementations may require cache maintainence. Ensure that implementations have all necessary details to perform this maintainence. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 3a32af4cce3..a485ac3c869 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -117,11 +117,12 @@ #endif struct page; +struct vm_area_struct; struct cpu_user_fns { void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); }; #ifdef MULTI_USER @@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user; extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); #endif #define clear_user_highpage(page,vaddr) \ @@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ - __cpu_copy_user_highpage(to, from, vaddr) + __cpu_copy_user_highpage(to, from, vaddr, vma) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); -- cgit v1.2.3-70-g09d2 From 2ef7f3dbd7a70a48c3f09b498df528cb00ea03a4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 5 Nov 2009 13:29:36 +0000 Subject: ARM: Fix ptrace accesses Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 24 ++---------------- arch/arm/include/asm/smp_plat.h | 5 ++++ arch/arm/mm/flush.c | 51 +++++++++++++++++++++++++++++++++------ 3 files changed, 50 insertions(+), 30 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee..3d2ef54c7cb 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * processes address space. Really, we want to allow our "user * space" model to handle this. */ -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ - } while (0) - +extern void copy_to_user_page(struct vm_area_struct *, struct page *, + unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ @@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig } } -static inline void -vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) -{ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } -} - #ifndef CONFIG_CPU_CACHE_VIPT #define flush_cache_mm(mm) \ vivt_flush_cache_mm(mm) @@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, vivt_flush_cache_range(vma,start,end) #define flush_cache_page(vma,addr,pfn) \ vivt_flush_cache_page(vma,addr,pfn) -#define flush_ptrace_access(vma,page,ua,ka,len,write) \ - vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); -extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 59303e20084..e6215305544 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } +static inline int cache_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; +} + #endif diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6f3a4b7a3b8..e34f095e209 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) __flush_icache_all(); } +#else +#define flush_pfn_alias(pfn,vaddr) do { } while (0) +#endif +#ifdef CONFIG_SMP +static void flush_ptrace_access_other(void *args) +{ + __flush_icache_all(); +} +#endif + +static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) + unsigned long uaddr, void *kaddr, unsigned long len) { if (cache_is_vivt()) { - vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } return; } @@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && - vma->vm_flags & VM_EXEC) { + if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; - /* only flushing the kernel mapping on non-aliasing VIPT */ __cpuc_coherent_kern_range(addr, addr + len); +#ifdef CONFIG_SMP + if (cache_ops_need_broadcast()) + smp_call_function(flush_ptrace_access_other, + NULL, 1); +#endif } } -#else -#define flush_pfn_alias(pfn,vaddr) do { } while (0) + +/* + * Copy user data from/to a page which is mapped into a different + * processes address space. Really, we want to allow our "user + * space" model to handle this. + * + * Note that this code needs to run on the current CPU. + */ +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *dst, const void *src, + unsigned long len) +{ +#ifdef CONFIG_SMP + preempt_disable(); #endif + memcpy(dst, src, len); + flush_ptrace_access(vma, page, uaddr, dst, len); +#ifdef CONFIG_SMP + preempt_enable(); +#endif +} void __flush_dcache_page(struct address_space *mapping, struct page *page) { -- cgit v1.2.3-70-g09d2 From 252a9afff76097667429b583e8b5b170b47665a4 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 25 Jan 2010 11:42:22 -0600 Subject: arm: add mm API for DMA to vmalloc/vmap areas ARM cannot prevent cache movein, so this patch implements both the flush and invalidate pieces of the API. Signed-off-by: James Bottomley --- arch/arm/include/asm/cacheflush.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee..4ae503cb1b8 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -432,6 +432,16 @@ static inline void __flush_icache_all(void) : "r" (0)); #endif } +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} #define ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, -- cgit v1.2.3-70-g09d2 From 0f4f0672ac950c96cffaf84a666d35e817d7c3ca Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Tue, 2 Feb 2010 20:23:15 +0100 Subject: ARM: 5899/2: arm: provide a mechanism to reserve performance counters To add support for perf events and to allow the hardware counters to be shared with oprofile, we need a way to reserve access to the pmu (performance monitor unit). Platforms with PMU interrupts should register the interrupts in arch/arm/kernel/pmu.c Signed-off-by: Jamie Iles Signed-off-by: Russell King --- arch/arm/Kconfig | 5 +++ arch/arm/include/asm/pmu.h | 75 +++++++++++++++++++++++++++++++++ arch/arm/kernel/Makefile | 1 + arch/arm/kernel/pmu.c | 103 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 184 insertions(+) create mode 100644 arch/arm/include/asm/pmu.h create mode 100644 arch/arm/kernel/pmu.c (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 233a222752c..9e08891062b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -863,6 +863,11 @@ config XSCALE_PMU depends on CPU_XSCALE && !XSCALE_PMU_TIMER default y +config CPU_HAS_PMU + depends on CPU_V6 || CPU_V7 || XSCALE_PMU + default y + bool + if !MMU source "arch/arm/Kconfig-nommu" endif diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h new file mode 100644 index 00000000000..2829b9f981a --- /dev/null +++ b/arch/arm/include/asm/pmu.h @@ -0,0 +1,75 @@ +/* + * linux/arch/arm/include/asm/pmu.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PMU_H__ +#define __ARM_PMU_H__ + +#ifdef CONFIG_CPU_HAS_PMU + +struct pmu_irqs { + const int *irqs; + int num_irqs; +}; + +/** + * reserve_pmu() - reserve the hardware performance counters + * + * Reserve the hardware performance counters in the system for exclusive use. + * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR() + * encoded error on failure. + */ +extern const struct pmu_irqs * +reserve_pmu(void); + +/** + * release_pmu() - Relinquish control of the performance counters + * + * Release the performance counters and allow someone else to use them. + * Callers must have disabled the counters and released IRQs before calling + * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as + * a cookie. + */ +extern int +release_pmu(const struct pmu_irqs *irqs); + +/** + * init_pmu() - Initialise the PMU. + * + * Initialise the system ready for PMU enabling. This should typically set the + * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do + * the actual hardware initialisation. + */ +extern int +init_pmu(void); + +#else /* CONFIG_CPU_HAS_PMU */ + +static inline const struct pmu_irqs * +reserve_pmu(void) +{ + return ERR_PTR(-ENODEV); +} + +static inline int +release_pmu(const struct pmu_irqs *irqs) +{ + return -ENODEV; +} + +static inline int +init_pmu(void) +{ + return -ENODEV; +} + +#endif /* CONFIG_CPU_HAS_PMU */ + +#endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index dd00f747e2a..216890d804c 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o +obj-$(CONFIG_CPU_HAS_PMU) += pmu.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt ifneq ($(CONFIG_ARCH_EBSA110),y) diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c new file mode 100644 index 00000000000..a124312e343 --- /dev/null +++ b/arch/arm/kernel/pmu.c @@ -0,0 +1,103 @@ +/* + * linux/arch/arm/kernel/pmu.c + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include + +#include + +/* + * Define the IRQs for the system. We could use something like a platform + * device but that seems fairly heavyweight for this. Also, the performance + * counters can't be removed or hotplugged. + * + * Ordering is important: init_pmu() will use the ordering to set the affinity + * to the corresponding core. e.g. the first interrupt will go to cpu 0, the + * second goes to cpu 1 etc. + */ +static const int irqs[] = { +#if defined(CONFIG_ARCH_OMAP2) + 3, +#elif defined(CONFIG_ARCH_BCMRING) + IRQ_PMUIRQ, +#elif defined(CONFIG_MACH_REALVIEW_EB) + IRQ_EB11MP_PMU_CPU0, + IRQ_EB11MP_PMU_CPU1, + IRQ_EB11MP_PMU_CPU2, + IRQ_EB11MP_PMU_CPU3, +#elif defined(CONFIG_ARCH_OMAP3) + INT_34XX_BENCH_MPU_EMUL, +#elif defined(CONFIG_ARCH_IOP32X) + IRQ_IOP32X_CORE_PMU, +#elif defined(CONFIG_ARCH_IOP33X) + IRQ_IOP33X_CORE_PMU, +#elif defined(CONFIG_ARCH_PXA) + IRQ_PMU, +#endif +}; + +static const struct pmu_irqs pmu_irqs = { + .irqs = irqs, + .num_irqs = ARRAY_SIZE(irqs), +}; + +static volatile long pmu_lock; + +const struct pmu_irqs * +reserve_pmu(void) +{ + return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) : + &pmu_irqs; +} +EXPORT_SYMBOL_GPL(reserve_pmu); + +int +release_pmu(const struct pmu_irqs *irqs) +{ + if (WARN_ON(irqs != &pmu_irqs)) + return -EINVAL; + clear_bit_unlock(0, &pmu_lock); + return 0; +} +EXPORT_SYMBOL_GPL(release_pmu); + +static int +set_irq_affinity(int irq, + unsigned int cpu) +{ +#ifdef CONFIG_SMP + int err = irq_set_affinity(irq, cpumask_of(cpu)); + if (err) + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + return err; +#else + return 0; +#endif +} + +int +init_pmu(void) +{ + int i, err = 0; + + for (i = 0; i < pmu_irqs.num_irqs; ++i) { + err = set_irq_affinity(pmu_irqs.irqs[i], i); + if (err) + break; + } + + return err; +} +EXPORT_SYMBOL_GPL(init_pmu); -- cgit v1.2.3-70-g09d2 From 7ada189f5c8627662c23f49b3e68463f86fc511e Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Tue, 2 Feb 2010 20:24:58 +0100 Subject: ARM: 5900/2: arm: enable support for software perf events The perf events subsystem allows counting of both hardware and software events. This patch implements the bare minimum for software performance events. Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Jamie Iles Signed-off-by: Russell King --- arch/arm/Kconfig | 2 ++ arch/arm/include/asm/perf_event.h | 31 +++++++++++++++++++++++++++++++ arch/arm/mm/fault.c | 7 +++++++ 3 files changed, 40 insertions(+) create mode 100644 arch/arm/include/asm/perf_event.h (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9e08891062b..74d1e767f0b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -18,6 +18,8 @@ config ARM select HAVE_KRETPROBES if (HAVE_KPROBES) select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) select HAVE_GENERIC_DMA_COHERENT + select HAVE_PERF_EVENTS + select PERF_USE_VMALLOC help The ARM series is a line of low-power-consumption RISC chip designs licensed by ARM Ltd and targeted at embedded applications and diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h new file mode 100644 index 00000000000..49e3049aba3 --- /dev/null +++ b/arch/arm/include/asm/perf_event.h @@ -0,0 +1,31 @@ +/* + * linux/arch/arm/include/asm/perf_event.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PERF_EVENT_H__ +#define __ARM_PERF_EVENT_H__ + +/* + * NOP: on *most* (read: all supported) ARM platforms, the performance + * counter interrupts are regular interrupts and not an NMI. This + * means that when we receive the interrupt we can call + * perf_event_do_pending() that handles all of the work with + * interrupts enabled. + */ +static inline void +set_perf_event_pending(void) +{ +} + +/* ARM performance counters start from 1 (in the cp15 accesses) so use the + * same indexes here for consistency. */ +#define PERF_EVENT_INDEX_OFFSET 1 + +#endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 10e06801afb..9d40c341e07 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -302,6 +303,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) fault = __do_page_fault(mm, addr, fsr, tsk); up_read(&mm->mmap_sem); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); + if (fault & VM_FAULT_MAJOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); + else if (fault & VM_FAULT_MINOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); + /* * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR */ -- cgit v1.2.3-70-g09d2 From 0a0300dc8c4b3f3ce5c9ef5a0a4be5442590398f Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 12 Jan 2010 12:28:00 +0000 Subject: ARM: Consolidate clks_register() and similar Most machine classes want some way to register a block of clk_lookup structures, and most do it by implementing a clks_register() type function which walks an array, or by open-coding a loop. Consolidate all this into clkdev_add_table(). Acked-by: H Hartley Sweeten Reviewed-by: Kevin Hilman Acked-by: Eric Miao Signed-off-by: Russell King --- arch/arm/common/clkdev.c | 10 ++++++++++ arch/arm/include/asm/clkdev.h | 3 +++ arch/arm/mach-bcmring/core.c | 3 +-- arch/arm/mach-ep93xx/clock.c | 4 +--- arch/arm/mach-integrator/core.c | 3 +-- arch/arm/mach-integrator/integrator_cp.c | 4 +--- arch/arm/mach-mmp/clock.c | 8 -------- arch/arm/mach-mmp/clock.h | 2 -- arch/arm/mach-mmp/pxa168.c | 2 +- arch/arm/mach-mmp/pxa910.c | 2 +- arch/arm/mach-mx1/clock.c | 4 +--- arch/arm/mach-mx2/clock_imx21.c | 4 +--- arch/arm/mach-mx2/clock_imx27.c | 4 +--- arch/arm/mach-mx25/clock.c | 6 +----- arch/arm/mach-mx3/clock-imx35.c | 4 +--- arch/arm/mach-mx3/clock.c | 4 +--- arch/arm/mach-mxc91231/clock.c | 4 +--- arch/arm/mach-pxa/clock.c | 8 -------- arch/arm/mach-pxa/clock.h | 4 ---- arch/arm/mach-pxa/eseries.c | 2 +- arch/arm/mach-pxa/pxa25x.c | 4 ++-- arch/arm/mach-pxa/pxa27x.c | 2 +- arch/arm/mach-pxa/pxa300.c | 4 ++-- arch/arm/mach-pxa/pxa320.c | 2 +- arch/arm/mach-pxa/pxa3xx.c | 2 +- arch/arm/mach-realview/core.c | 5 +---- arch/arm/mach-u300/clock.c | 5 +---- arch/arm/mach-ux500/clock.c | 5 +---- arch/arm/mach-versatile/core.c | 3 +-- arch/arm/mach-w90x900/clock.c | 9 --------- arch/arm/mach-w90x900/clock.h | 1 - arch/arm/mach-w90x900/cpu.c | 2 +- arch/arm/plat-stmp3xxx/clock.c | 3 +-- 33 files changed, 40 insertions(+), 92 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c index aae5bc01acc..446b696196e 100644 --- a/arch/arm/common/clkdev.c +++ b/arch/arm/common/clkdev.c @@ -99,6 +99,16 @@ void clkdev_add(struct clk_lookup *cl) } EXPORT_SYMBOL(clkdev_add); +void __init clkdev_add_table(struct clk_lookup *cl, size_t num) +{ + mutex_lock(&clocks_mutex); + while (num--) { + list_add_tail(&cl->node, &clocks); + cl++; + } + mutex_unlock(&clocks_mutex); +} + #define MAX_DEV_ID 20 #define MAX_CON_ID 16 diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index b6ec7c627b3..7a0690da5e6 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -27,4 +27,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); +void clkdev_add_table(struct clk_lookup *, size_t); +int clk_add_alias(const char *, const char *, char *, struct device *); + #endif diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c index e590bbe0a7b..72e405df0fb 100644 --- a/arch/arm/mach-bcmring/core.c +++ b/arch/arm/mach-bcmring/core.c @@ -142,8 +142,7 @@ void __init bcmring_amba_init(void) chipcHw_busInterfaceClockEnable(bus_clock); - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 1d0f9d8aff2..bb3c6219644 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -445,7 +445,6 @@ static void __init ep93xx_dma_clock_init(void) static int __init ep93xx_clock_init(void) { u32 value; - int i; value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1); if (!(value & 0x00800000)) { /* PLL1 bypassed? */ @@ -474,8 +473,7 @@ static int __init ep93xx_clock_init(void) clk_f.rate / 1000000, clk_h.rate / 1000000, clk_p.rate / 1000000); - for (i = 0; i < ARRAY_SIZE(clocks); i++) - clkdev_add(&clocks[i]); + clkdev_add_table(clocks, ARRAY_SIZE(clocks)); return 0; } arch_initcall(ep93xx_clock_init); diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index a0f60e55da6..8b390e36ba6 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -144,8 +144,7 @@ static int __init integrator_init(void) { int i; - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index 3f35293d457..66ef86d6d9e 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c @@ -558,9 +558,7 @@ static void __init intcp_init(void) { int i; - for (i = 0; i < ARRAY_SIZE(cp_lookups); i++) - clkdev_add(&cp_lookups[i]); - + clkdev_add_table(cp_lookups, ARRAY_SIZE(cp_lookups)); platform_add_devices(intcp_devs, ARRAY_SIZE(intcp_devs)); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { diff --git a/arch/arm/mach-mmp/clock.c b/arch/arm/mach-mmp/clock.c index 2a46ed5cc2a..886e05648f0 100644 --- a/arch/arm/mach-mmp/clock.c +++ b/arch/arm/mach-mmp/clock.c @@ -88,11 +88,3 @@ unsigned long clk_get_rate(struct clk *clk) return rate; } EXPORT_SYMBOL(clk_get_rate); - -void clks_register(struct clk_lookup *clks, size_t num) -{ - int i; - - for (i = 0; i < num; i++) - clkdev_add(&clks[i]); -} diff --git a/arch/arm/mach-mmp/clock.h b/arch/arm/mach-mmp/clock.h index eefffbe683b..016ae94691c 100644 --- a/arch/arm/mach-mmp/clock.h +++ b/arch/arm/mach-mmp/clock.h @@ -68,5 +68,3 @@ struct clk clk_##_name = { \ extern struct clk clk_pxa168_gpio; extern struct clk clk_pxa168_timers; - -extern void clks_register(struct clk_lookup *, size_t); diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index 37dbdde17fa..1873c821df9 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c @@ -94,7 +94,7 @@ static int __init pxa168_init(void) mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(pxa168_mfp_addr_map); pxa_init_dma(IRQ_PXA168_DMA_INT0, 32); - clks_register(ARRAY_AND_SIZE(pxa168_clkregs)); + clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs)); } return 0; diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c index d4049508a4d..46f2d69bef3 100644 --- a/arch/arm/mach-mmp/pxa910.c +++ b/arch/arm/mach-mmp/pxa910.c @@ -131,7 +131,7 @@ static int __init pxa910_init(void) mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(pxa910_mfp_addr_map); pxa_init_dma(IRQ_PXA910_DMA_INT0, 32); - clks_register(ARRAY_AND_SIZE(pxa910_clkregs)); + clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs)); } return 0; diff --git a/arch/arm/mach-mx1/clock.c b/arch/arm/mach-mx1/clock.c index d1b588519ad..6cf2d4a7511 100644 --- a/arch/arm/mach-mx1/clock.c +++ b/arch/arm/mach-mx1/clock.c @@ -570,7 +570,6 @@ static struct clk_lookup lookups[] __initdata = { int __init mx1_clocks_init(unsigned long fref) { unsigned int reg; - int i; /* disable clocks we are able to */ __raw_writel(0, SCM_GCCR); @@ -592,8 +591,7 @@ int __init mx1_clocks_init(unsigned long fref) reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET; clko_clk.parent = (struct clk *)clko_clocks[reg]; - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); clk_enable(&hclk); clk_enable(&fclk); diff --git a/arch/arm/mach-mx2/clock_imx21.c b/arch/arm/mach-mx2/clock_imx21.c index 91901b5d56c..e82b489d121 100644 --- a/arch/arm/mach-mx2/clock_imx21.c +++ b/arch/arm/mach-mx2/clock_imx21.c @@ -968,7 +968,6 @@ static struct clk_lookup lookups[] = { */ int __init mx21_clocks_init(unsigned long lref, unsigned long href) { - int i; u32 cscr; external_low_reference = lref; @@ -986,8 +985,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href) else spll_clk.parent = &fpm_clk; - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); /* Turn off all clock gates */ __raw_writel(0, CCM_PCCR0); diff --git a/arch/arm/mach-mx2/clock_imx27.c b/arch/arm/mach-mx2/clock_imx27.c index b010bf9ceaa..18c53a6487f 100644 --- a/arch/arm/mach-mx2/clock_imx27.c +++ b/arch/arm/mach-mx2/clock_imx27.c @@ -719,7 +719,6 @@ static void __init to2_adjust_clocks(void) int __init mx27_clocks_init(unsigned long fref) { u32 cscr = __raw_readl(CCM_CSCR); - int i; external_high_reference = fref; @@ -736,8 +735,7 @@ int __init mx27_clocks_init(unsigned long fref) to2_adjust_clocks(); - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); /* Turn off all clocks we do not need */ __raw_writel(0, CCM_PCCR0); diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c index 6e838b85771..66916f10481 100644 --- a/arch/arm/mach-mx25/clock.c +++ b/arch/arm/mach-mx25/clock.c @@ -210,11 +210,7 @@ static struct clk_lookup lookups[] = { int __init mx25_clocks_init(unsigned long fref) { - int i; - - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); - + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); return 0; diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c index 7584b4c6c55..f3f41fa4f21 100644 --- a/arch/arm/mach-mx3/clock-imx35.c +++ b/arch/arm/mach-mx3/clock-imx35.c @@ -485,15 +485,13 @@ static struct clk_lookup lookups[] = { int __init mx35_clocks_init() { - int i; unsigned int ll = 0; #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) ll = (3 << 16); #endif - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); /* Turn off all clocks except the ones we need to survive, namely: * EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart diff --git a/arch/arm/mach-mx3/clock.c b/arch/arm/mach-mx3/clock.c index 27a318af0d2..b5c39a016db 100644 --- a/arch/arm/mach-mx3/clock.c +++ b/arch/arm/mach-mx3/clock.c @@ -578,12 +578,10 @@ static struct clk_lookup lookups[] = { int __init mx31_clocks_init(unsigned long fref) { u32 reg; - int i; ckih_rate = fref; - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); /* change the csi_clk parent if necessary */ reg = __raw_readl(MXC_CCM_CCMR); diff --git a/arch/arm/mach-mxc91231/clock.c b/arch/arm/mach-mxc91231/clock.c index ecfa37fef8a..5c85075d8a5 100644 --- a/arch/arm/mach-mxc91231/clock.c +++ b/arch/arm/mach-mxc91231/clock.c @@ -624,7 +624,6 @@ static struct clk_lookup lookups[] = { int __init mxc91231_clocks_init(unsigned long fref) { void __iomem *gpt_base; - int i; ckih_rate = fref; @@ -632,8 +631,7 @@ int __init mxc91231_clocks_init(unsigned long fref) sdhc_clk[0].parent = clk_sdhc_parent(&sdhc_clk[0]); sdhc_clk[1].parent = clk_sdhc_parent(&sdhc_clk[1]); - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); gpt_base = MXC91231_IO_ADDRESS(MXC91231_GPT1_BASE_ADDR); mxc_timer_init(&gpt_clk, gpt_base, MXC91231_INT_GPT); diff --git a/arch/arm/mach-pxa/clock.c b/arch/arm/mach-pxa/clock.c index 49ae3829231..abba0089a2a 100644 --- a/arch/arm/mach-pxa/clock.c +++ b/arch/arm/mach-pxa/clock.c @@ -78,11 +78,3 @@ const struct clkops clk_cken_ops = { .enable = clk_cken_enable, .disable = clk_cken_disable, }; - -void clks_register(struct clk_lookup *clks, size_t num) -{ - int i; - - for (i = 0; i < num; i++) - clkdev_add(&clks[i]); -} diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h index 978a3667e90..d8488742b80 100644 --- a/arch/arm/mach-pxa/clock.h +++ b/arch/arm/mach-pxa/clock.h @@ -67,7 +67,3 @@ extern void clk_pxa3xx_cken_enable(struct clk *); extern void clk_pxa3xx_cken_disable(struct clk *); #endif -void clks_register(struct clk_lookup *clks, size_t num); -int clk_add_alias(const char *alias, const char *alias_name, char *id, - struct device *dev); - diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c index 91417f03506..96ed1308163 100644 --- a/arch/arm/mach-pxa/eseries.c +++ b/arch/arm/mach-pxa/eseries.c @@ -128,6 +128,6 @@ static struct clk_lookup eseries_clkregs[] = { void eseries_register_clks(void) { - clks_register(eseries_clkregs, ARRAY_SIZE(eseries_clkregs)); + clkdev_add_table(eseries_clkregs, ARRAY_SIZE(eseries_clkregs)); } diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 2c1b0b70d01..0b9ad30bfd5 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -349,7 +349,7 @@ static int __init pxa25x_init(void) reset_status = RCSR; - clks_register(pxa25x_clkregs, ARRAY_SIZE(pxa25x_clkregs)); + clkdev_add_table(pxa25x_clkregs, ARRAY_SIZE(pxa25x_clkregs)); if ((ret = pxa_init_dma(IRQ_DMA, 16))) return ret; @@ -370,7 +370,7 @@ static int __init pxa25x_init(void) /* Only add HWUART for PXA255/26x; PXA210/250 do not have it. */ if (cpu_is_pxa255()) - clks_register(&pxa25x_hwuart_clkreg, 1); + clkdev_add(&pxa25x_hwuart_clkreg); return ret; } diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 6a0b73167e0..d783123e2d4 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -392,7 +392,7 @@ static int __init pxa27x_init(void) reset_status = RCSR; - clks_register(pxa27x_clkregs, ARRAY_SIZE(pxa27x_clkregs)); + clkdev_add_table(pxa27x_clkregs, ARRAY_SIZE(pxa27x_clkregs)); if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c index f4af6e2bef8..40bb16501d8 100644 --- a/arch/arm/mach-pxa/pxa300.c +++ b/arch/arm/mach-pxa/pxa300.c @@ -102,12 +102,12 @@ static int __init pxa300_init(void) if (cpu_is_pxa300() || cpu_is_pxa310()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa300_mfp_addr_map); - clks_register(ARRAY_AND_SIZE(common_clkregs)); + clkdev_add_table(ARRAY_AND_SIZE(common_clkregs)); } if (cpu_is_pxa310()) { mfp_init_addr(pxa310_mfp_addr_map); - clks_register(ARRAY_AND_SIZE(pxa310_clkregs)); + clkdev_add_table(ARRAY_AND_SIZE(pxa310_clkregs)); } return 0; diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c index c7373e74a10..8d614ecd8e9 100644 --- a/arch/arm/mach-pxa/pxa320.c +++ b/arch/arm/mach-pxa/pxa320.c @@ -90,7 +90,7 @@ static int __init pxa320_init(void) if (cpu_is_pxa320()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa320_mfp_addr_map); - clks_register(ARRAY_AND_SIZE(pxa320_clkregs)); + clkdev_add_table(ARRAY_AND_SIZE(pxa320_clkregs)); } return 0; diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index fcb0721f466..4d7c03e7250 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -634,7 +634,7 @@ static int __init pxa3xx_init(void) */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); - clks_register(pxa3xx_clkregs, ARRAY_SIZE(pxa3xx_clkregs)); + clkdev_add_table(pxa3xx_clkregs, ARRAY_SIZE(pxa3xx_clkregs)); if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 9f293438e02..90bd4ef71b2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -346,10 +346,7 @@ static struct clk_lookup lookups[] = { static int __init clk_init(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); return 0; } arch_initcall(clk_init); diff --git a/arch/arm/mach-u300/clock.c b/arch/arm/mach-u300/clock.c index 111f7ea32b3..c174ed1f369 100644 --- a/arch/arm/mach-u300/clock.c +++ b/arch/arm/mach-u300/clock.c @@ -1276,11 +1276,8 @@ static struct clk_lookup lookups[] = { static void __init clk_register(void) { - int i; - /* Register the lookups */ - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); } /* diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c index 20b6ebb6783..8359a73d004 100644 --- a/arch/arm/mach-ux500/clock.c +++ b/arch/arm/mach-ux500/clock.c @@ -85,11 +85,8 @@ static struct clk_lookup lookups[] = { static int __init clk_init(void) { - int i; - /* register the clock lookups */ - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); return 0; } arch_initcall(clk_init); diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index e13be7c444c..9ddb49b1cb7 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -851,8 +851,7 @@ void __init versatile_init(void) { int i; - for (i = 0; i < ARRAY_SIZE(lookups); i++) - clkdev_add(&lookups[i]); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); platform_device_register(&versatile_flash_device); platform_device_register(&versatile_i2c_device); diff --git a/arch/arm/mach-w90x900/clock.c b/arch/arm/mach-w90x900/clock.c index b785994bab0..2c371ff22e5 100644 --- a/arch/arm/mach-w90x900/clock.c +++ b/arch/arm/mach-w90x900/clock.c @@ -90,12 +90,3 @@ void nuc900_subclk_enable(struct clk *clk, int enable) __raw_writel(clken, W90X900_VA_CLKPWR + SUBCLK); } - - -void clks_register(struct clk_lookup *clks, size_t num) -{ - int i; - - for (i = 0; i < num; i++) - clkdev_add(&clks[i]); -} diff --git a/arch/arm/mach-w90x900/clock.h b/arch/arm/mach-w90x900/clock.h index f5816a06eed..c56ddab3d91 100644 --- a/arch/arm/mach-w90x900/clock.h +++ b/arch/arm/mach-w90x900/clock.h @@ -14,7 +14,6 @@ void nuc900_clk_enable(struct clk *clk, int enable); void nuc900_subclk_enable(struct clk *clk, int enable); -void clks_register(struct clk_lookup *clks, size_t num); struct clk { unsigned long cken; diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c index 20dc0c96214..6f5ca532883 100644 --- a/arch/arm/mach-w90x900/cpu.c +++ b/arch/arm/mach-w90x900/cpu.c @@ -208,6 +208,6 @@ void __init nuc900_map_io(struct map_desc *mach_desc, int mach_size) void __init nuc900_init_clocks(void) { - clks_register(nuc900_clkregs, ARRAY_SIZE(nuc900_clkregs)); + clkdev_add_table(nuc900_clkregs, ARRAY_SIZE(nuc900_clkregs)); } diff --git a/arch/arm/plat-stmp3xxx/clock.c b/arch/arm/plat-stmp3xxx/clock.c index 5d2f19a09e4..e593a2a801c 100644 --- a/arch/arm/plat-stmp3xxx/clock.c +++ b/arch/arm/plat-stmp3xxx/clock.c @@ -1126,9 +1126,8 @@ static int __init clk_init(void) if (ops && ops->set_parent) ops->set_parent(cl->clk, cl->clk->parent); } - - clkdev_add(cl); } + clkdev_add_table(onchip_clks, ARRAY_SIZE(onchip_clks)); return 0; } -- cgit v1.2.3-70-g09d2 From 18eabe2347ae7a11b3db768695913724166dfb0e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 31 Oct 2009 16:52:16 +0000 Subject: ARM: dma-mapping: introduce the idea of buffer ownership The DMA API has the notion of buffer ownership; make it explicit in the ARM implementation of this API. This gives us a set of hooks to allow us to deal with CPU cache issues arising from non-cache coherent DMA. Signed-off-by: Russell King Tested-By: Santosh Shilimkar Tested-By: Jamie Iles --- arch/arm/common/dmabounce.c | 4 ++- arch/arm/include/asm/dma-mapping.h | 64 ++++++++++++++++++++++++++++---------- arch/arm/mm/dma-mapping.c | 13 +++++--- 3 files changed, 58 insertions(+), 23 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index bc90364a96c..51499d68b16 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -277,7 +277,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, * We don't need to sync the DMA buffer since * it was allocated via the coherent allocators. */ - dma_cache_maint(ptr, size, dir); + __dma_single_cpu_to_dev(ptr, size, dir); } return dma_addr; @@ -315,6 +315,8 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, __cpuc_flush_kernel_dcache_area(ptr, size); } free_safe_buffer(dev->archdata.dmabounce, buf); + } else { + __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); } } diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a96300bf83f..e850f5c1607 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -57,19 +57,48 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) #endif /* - * DMA-consistent mapping functions. These allocate/free a region of - * uncached, unwrite-buffered mapped memory space for use with DMA - * devices. This is the "generic" version. The PCI specific version - * is in pci.h - * - * Note: Drivers should NOT use this function directly, as it will break - * platforms with CONFIG_DMABOUNCE. - * Use the driver DMA support - see dma-mapping.h (dma_sync_*) + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ extern void dma_cache_maint(const void *kaddr, size_t size, int rw); extern void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, int rw); +/* + * The DMA API is built upon the notion of "buffer ownership". A buffer + * is either exclusively owned by the CPU (and therefore may be accessed + * by it) or exclusively owned by the DMA device. These helper functions + * represent the transitions between these two ownership states. + * + * As above, these are private support functions and not part of the API. + * Drivers must not use these. + */ +static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + if (!arch_is_coherent()) + dma_cache_maint(kaddr, size, dir); +} + +static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + /* nothing to do */ +} + +static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + if (!arch_is_coherent()) + dma_cache_maint_page(page, off, size, dir); +} + +static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + /* nothing to do */ +} + /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits @@ -304,8 +333,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint(cpu_addr, size, dir); + __dma_single_cpu_to_dev(cpu_addr, size, dir); return virt_to_dma(dev, cpu_addr); } @@ -329,8 +357,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint_page(page, offset, size, dir); + __dma_page_cpu_to_dev(page, offset, size, dir); return page_to_dma(dev, page) + offset; } @@ -352,7 +379,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); } /** @@ -372,7 +399,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, + size, dir); } #endif /* CONFIG_DMABOUNCE */ @@ -400,7 +428,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); - dmabounce_sync_for_cpu(dev, handle, offset, size, dir); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) + return; + + __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -412,8 +443,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; - if (!arch_is_coherent()) - dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); + __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_for_cpu(struct device *dev, diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb5d36..a316c945952 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -573,8 +573,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) { - dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, - sg_dma_len(s), dir); + if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, + sg_dma_len(s), dir)) + continue; + + __dma_page_dev_to_cpu(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); @@ -597,9 +601,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, sg_dma_len(s), dir)) continue; - if (!arch_is_coherent()) - dma_cache_maint_page(sg_page(s), s->offset, - s->length, dir); + __dma_page_cpu_to_dev(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); -- cgit v1.2.3-70-g09d2 From 4ea0d7371e808628d11154b0d44140b70f05b998 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 16:27:17 +0000 Subject: ARM: dma-mapping: push buffer ownership down into dma-mapping.c Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/dma-mapping.h | 39 ++++++++++++++++++++++++-------------- arch/arm/mm/dma-mapping.c | 34 +++++++++++++++++++++++++++++---- 2 files changed, 55 insertions(+), 18 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index e850f5c1607..256ee1c9f51 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -56,47 +56,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) } #endif -/* - * Private support functions: these are not part of the API and are - * liable to change. Drivers must not use these. - */ -extern void dma_cache_maint(const void *kaddr, size_t size, int rw); -extern void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int rw); - /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed * by it) or exclusively owned by the DMA device. These helper functions * represent the transitions between these two ownership states. * - * As above, these are private support functions and not part of the API. - * Drivers must not use these. + * Note, however, that on later ARMs, this notion does not work due to + * speculative prefetches. We model our approach on the assumption that + * the CPU does do speculative prefetches, which means we clean caches + * before transfers and delay cache invalidation until transfer completion. + * + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + if (!arch_is_coherent()) - dma_cache_maint(kaddr, size, dir); + ___dma_single_cpu_to_dev(kaddr, size, dir); } static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + extern void ___dma_single_dev_to_cpu(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_dev_to_cpu(kaddr, size, dir); } static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { + extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, + size_t, enum dma_data_direction); + if (!arch_is_coherent()) - dma_cache_maint_page(page, off, size, dir); + ___dma_page_cpu_to_dev(page, off, size, dir); } static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_dev_to_cpu(page, off, size, dir); } /* diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index a316c945952..bbf87880b91 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -404,7 +404,7 @@ EXPORT_SYMBOL(dma_free_coherent); * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -void dma_cache_maint(const void *start, size_t size, int direction) +static void dma_cache_maint(const void *start, size_t size, int direction) { void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); @@ -431,7 +431,20 @@ void dma_cache_maint(const void *start, size_t size, int direction) inner_op(start, start + size); outer_op(__pa(start), __pa(start) + size); } -EXPORT_SYMBOL(dma_cache_maint); + +void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + dma_cache_maint(kaddr, size, dir); +} +EXPORT_SYMBOL(___dma_single_cpu_to_dev); + +void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + /* nothing to do */ +} +EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, size_t size, int direction) @@ -474,7 +487,7 @@ static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, outer_op(paddr, paddr + size); } -void dma_cache_maint_page(struct page *page, unsigned long offset, +static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, int dir) { /* @@ -499,7 +512,20 @@ void dma_cache_maint_page(struct page *page, unsigned long offset, left -= len; } while (left); } -EXPORT_SYMBOL(dma_cache_maint_page); + +void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + dma_cache_maint_page(page, off, size, dir); +} +EXPORT_SYMBOL(___dma_page_cpu_to_dev); + +void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + /* nothing to do */ +} +EXPORT_SYMBOL(___dma_page_dev_to_cpu); /** * dma_map_sg - map a set of SG buffers for streaming mode DMA -- cgit v1.2.3-70-g09d2 From a9c9147eb9b1dba0ce567a41897c7773b4d1b0bc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:19:58 +0000 Subject: ARM: dma-mapping: provide per-cpu type map/unmap functions Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/cacheflush.h | 9 +++++++++ arch/arm/kernel/asm-offsets.c | 5 +++++ arch/arm/mm/cache-fa.S | 26 ++++++++++++++++++++++++ arch/arm/mm/cache-v3.S | 24 ++++++++++++++++++++++ arch/arm/mm/cache-v4.S | 24 ++++++++++++++++++++++ arch/arm/mm/cache-v4wb.S | 26 ++++++++++++++++++++++++ arch/arm/mm/cache-v4wt.S | 25 +++++++++++++++++++++++ arch/arm/mm/cache-v6.S | 26 ++++++++++++++++++++++++ arch/arm/mm/cache-v7.S | 26 ++++++++++++++++++++++++ arch/arm/mm/dma-mapping.c | 29 +++++++++++---------------- arch/arm/mm/proc-arm1020.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm1020e.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm1022.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm1026.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm920.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm922.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm925.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm926.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm940.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm946.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-feroceon.S | 42 +++++++++++++++++++++++++++++++++++++++ arch/arm/mm/proc-mohawk.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-xsc3.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-xscale.S | 41 ++++++++++++++++++++++++++++++++++++++ 24 files changed, 598 insertions(+), 17 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee..4c733236e34 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -213,6 +213,9 @@ struct cpu_cache_fns { void (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_area)(void *, size_t); + void (*dma_map_area)(const void *, size_t, int); + void (*dma_unmap_area)(const void *, size_t, int); + void (*dma_inv_range)(const void *, const void *); void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); @@ -244,6 +247,8 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_inv_range cpu_cache.dma_inv_range #define dmac_clean_range cpu_cache.dma_clean_range #define dmac_flush_range cpu_cache.dma_flush_range @@ -270,10 +275,14 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_inv_range __glue(_CACHE,_dma_inv_range) #define dmac_clean_range __glue(_CACHE,_dma_clean_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_inv_range(const void *, const void *); extern void dmac_clean_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 4a881258bb1..883511522fc 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -12,6 +12,7 @@ */ #include #include +#include #include #include #include @@ -112,5 +113,9 @@ int main(void) #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif + BLANK(); + DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); + DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); + DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); return 0; } diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index a89444a3c01..8ebffdd6fcf 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq fa_dma_clean_range + bcs fa_dma_inv_range + b fa_dma_flush_range +ENDPROC(fa_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_unmap_area) + mov pc, lr +ENDPROC(fa_dma_unmap_area) + __INITDATA .type fa_cache_fns, #object @@ -215,6 +239,8 @@ ENTRY(fa_cache_fns) .long fa_coherent_kern_range .long fa_coherent_user_range .long fa_flush_kern_dcache_area + .long fa_dma_map_area + .long fa_dma_unmap_area .long fa_dma_inv_range .long fa_dma_clean_range .long fa_dma_flush_range diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2a482731ea3..6df52dc014b 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -121,6 +121,28 @@ ENTRY(v3_dma_flush_range) ENTRY(v3_dma_clean_range) mov pc, lr +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v3_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_map_area) + mov pc, lr +ENDPROC(v3_dma_unmap_area) +ENDPROC(v3_dma_map_area) + __INITDATA .type v3_cache_fns, #object @@ -131,6 +153,8 @@ ENTRY(v3_cache_fns) .long v3_coherent_kern_range .long v3_coherent_user_range .long v3_flush_kern_dcache_area + .long v3_dma_map_area + .long v3_dma_unmap_area .long v3_dma_inv_range .long v3_dma_clean_range .long v3_dma_flush_range diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5c7da3e372e..df3b423713b 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -133,6 +133,28 @@ ENTRY(v4_dma_flush_range) ENTRY(v4_dma_clean_range) mov pc, lr +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v4_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_map_area) + mov pc, lr +ENDPROC(v4_dma_unmap_area) +ENDPROC(v4_dma_map_area) + __INITDATA .type v4_cache_fns, #object @@ -143,6 +165,8 @@ ENTRY(v4_cache_fns) .long v4_coherent_kern_range .long v4_coherent_user_range .long v4_flush_kern_dcache_area + .long v4_dma_map_area + .long v4_dma_unmap_area .long v4_dma_inv_range .long v4_dma_clean_range .long v4_dma_flush_range diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 3dbedf1ec0e..32e7a744849 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range) .globl v4wb_dma_flush_range .set v4wb_dma_flush_range, v4wb_coherent_kern_range +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v4wb_dma_clean_range + bcs v4wb_dma_inv_range + b v4wb_dma_flush_range +ENDPROC(v4wb_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_unmap_area) + mov pc, lr +ENDPROC(v4wb_dma_unmap_area) + __INITDATA .type v4wb_cache_fns, #object @@ -226,6 +250,8 @@ ENTRY(v4wb_cache_fns) .long v4wb_coherent_kern_range .long v4wb_coherent_user_range .long v4wb_flush_kern_dcache_area + .long v4wb_dma_map_area + .long v4wb_dma_unmap_area .long v4wb_dma_inv_range .long v4wb_dma_clean_range .long v4wb_dma_flush_range diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index b3b7410270b..3d8dad5b265 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -172,6 +172,29 @@ ENTRY(v4wt_dma_clean_range) .globl v4wt_dma_flush_range .equ v4wt_dma_flush_range, v4wt_dma_inv_range +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v4wt_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_map_area) + mov pc, lr +ENDPROC(v4wt_dma_unmap_area) +ENDPROC(v4wt_dma_map_area) + __INITDATA .type v4wt_cache_fns, #object @@ -182,6 +205,8 @@ ENTRY(v4wt_cache_fns) .long v4wt_coherent_kern_range .long v4wt_coherent_user_range .long v4wt_flush_kern_dcache_area + .long v4wt_dma_map_area + .long v4wt_dma_unmap_area .long v4wt_dma_inv_range .long v4wt_dma_clean_range .long v4wt_dma_flush_range diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 4ba0a24ce6f..6f926dd0e0f 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -263,6 +263,30 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v6_dma_clean_range + bcs v6_dma_inv_range + b v6_dma_flush_range +ENDPROC(v6_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_unmap_area) + mov pc, lr +ENDPROC(v6_dma_unmap_area) + __INITDATA .type v6_cache_fns, #object @@ -273,6 +297,8 @@ ENTRY(v6_cache_fns) .long v6_coherent_kern_range .long v6_coherent_user_range .long v6_flush_kern_dcache_area + .long v6_dma_map_area + .long v6_dma_unmap_area .long v6_dma_inv_range .long v6_dma_clean_range .long v6_dma_flush_range diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 9073db849fb..e30d8bc6718 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -271,6 +271,30 @@ ENTRY(v7_dma_flush_range) mov pc, lr ENDPROC(v7_dma_flush_range) +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v7_dma_clean_range + bcs v7_dma_inv_range + b v7_dma_flush_range +ENDPROC(v7_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_unmap_area) + mov pc, lr +ENDPROC(v7_dma_unmap_area) + __INITDATA .type v7_cache_fns, #object @@ -281,6 +305,8 @@ ENTRY(v7_cache_fns) .long v7_coherent_kern_range .long v7_coherent_user_range .long v7_flush_kern_dcache_area + .long v7_dma_map_area + .long v7_dma_unmap_area .long v7_dma_inv_range .long v7_dma_clean_range .long v7_dma_flush_range diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0d68d2c83cd..efa8efa33f5 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -406,35 +406,31 @@ EXPORT_SYMBOL(dma_free_coherent); */ static void dma_cache_maint(const void *start, size_t size, int direction) { - void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); - BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); - switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; outer_op = outer_inv_range; break; case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; outer_op = outer_clean_range; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; outer_op = outer_flush_range; break; default: BUG(); } - inner_op(start, start + size); outer_op(__pa(start), __pa(start) + size); } void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + + dmac_map_area(kaddr, size, dir); dma_cache_maint(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_cpu_to_dev); @@ -442,12 +438,15 @@ EXPORT_SYMBOL(___dma_single_cpu_to_dev); void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + + dmac_unmap_area(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, void (*op)(const void *, const void *)) + size_t size, enum dma_data_direction dir, + void (*op)(const void *, size_t, int)) { /* * A single sg entry may refer to multiple physically contiguous @@ -471,12 +470,12 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; - op(vaddr, vaddr + len); + op(vaddr, len, dir); kunmap_high(page); } } else { vaddr = page_address(page) + offset; - op(vaddr, vaddr + len); + op(vaddr, len, dir); } offset = 0; page++; @@ -488,27 +487,23 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; - void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; outer_op = outer_inv_range; break; case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; outer_op = outer_clean_range; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; outer_op = outer_flush_range; break; default: BUG(); } - dma_cache_maint_page(page, off, size, inner_op); + dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; outer_op(paddr, paddr + size); @@ -518,7 +513,7 @@ EXPORT_SYMBOL(___dma_page_cpu_to_dev); void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 8012e24282b..c85f5eb4263 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -330,6 +330,30 @@ ENTRY(arm1020_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020_dma_clean_range + bcs arm1020_dma_inv_range + b arm1020_dma_flush_range +ENDPROC(arm1020_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020_dma_unmap_area) + ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_cache_all .long arm1020_flush_user_cache_all @@ -337,6 +361,8 @@ ENTRY(arm1020_cache_fns) .long arm1020_coherent_kern_range .long arm1020_coherent_user_range .long arm1020_flush_kern_dcache_area + .long arm1020_dma_map_area + .long arm1020_dma_unmap_area .long arm1020_dma_inv_range .long arm1020_dma_clean_range .long arm1020_dma_flush_range diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 41fe25d234f..5a3cf7620a2 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -316,6 +316,30 @@ ENTRY(arm1020e_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020e_dma_clean_range + bcs arm1020e_dma_inv_range + b arm1020e_dma_flush_range +ENDPROC(arm1020e_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020e_dma_unmap_area) + ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_cache_all .long arm1020e_flush_user_cache_all @@ -323,6 +347,8 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_coherent_kern_range .long arm1020e_coherent_user_range .long arm1020e_flush_kern_dcache_area + .long arm1020e_dma_map_area + .long arm1020e_dma_unmap_area .long arm1020e_dma_inv_range .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 20a5b1b31a7..fec8f587843 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -305,6 +305,30 @@ ENTRY(arm1022_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1022_dma_clean_range + bcs arm1022_dma_inv_range + b arm1022_dma_flush_range +ENDPROC(arm1022_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_unmap_area) + mov pc, lr +ENDPROC(arm1022_dma_unmap_area) + ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_cache_all .long arm1022_flush_user_cache_all @@ -312,6 +336,8 @@ ENTRY(arm1022_cache_fns) .long arm1022_coherent_kern_range .long arm1022_coherent_user_range .long arm1022_flush_kern_dcache_area + .long arm1022_dma_map_area + .long arm1022_dma_unmap_area .long arm1022_dma_inv_range .long arm1022_dma_clean_range .long arm1022_dma_flush_range diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 96aedb10fcc..9ece6f66649 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -299,6 +299,30 @@ ENTRY(arm1026_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1026_dma_clean_range + bcs arm1026_dma_inv_range + b arm1026_dma_flush_range +ENDPROC(arm1026_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_unmap_area) + mov pc, lr +ENDPROC(arm1026_dma_unmap_area) + ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_cache_all .long arm1026_flush_user_cache_all @@ -306,6 +330,8 @@ ENTRY(arm1026_cache_fns) .long arm1026_coherent_kern_range .long arm1026_coherent_user_range .long arm1026_flush_kern_dcache_area + .long arm1026_dma_map_area + .long arm1026_dma_unmap_area .long arm1026_dma_inv_range .long arm1026_dma_clean_range .long arm1026_dma_flush_range diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 471669e2d7c..6f6ab2747da 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -288,6 +288,30 @@ ENTRY(arm920_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm920_dma_clean_range + bcs arm920_dma_inv_range + b arm920_dma_flush_range +ENDPROC(arm920_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_unmap_area) + mov pc, lr +ENDPROC(arm920_dma_unmap_area) + ENTRY(arm920_cache_fns) .long arm920_flush_kern_cache_all .long arm920_flush_user_cache_all @@ -295,6 +319,8 @@ ENTRY(arm920_cache_fns) .long arm920_coherent_kern_range .long arm920_coherent_user_range .long arm920_flush_kern_dcache_area + .long arm920_dma_map_area + .long arm920_dma_unmap_area .long arm920_dma_inv_range .long arm920_dma_clean_range .long arm920_dma_flush_range diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index ee111b00fa4..4e4396b121c 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -290,6 +290,30 @@ ENTRY(arm922_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm922_dma_clean_range + bcs arm922_dma_inv_range + b arm922_dma_flush_range +ENDPROC(arm922_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_unmap_area) + mov pc, lr +ENDPROC(arm922_dma_unmap_area) + ENTRY(arm922_cache_fns) .long arm922_flush_kern_cache_all .long arm922_flush_user_cache_all @@ -297,6 +321,8 @@ ENTRY(arm922_cache_fns) .long arm922_coherent_kern_range .long arm922_coherent_user_range .long arm922_flush_kern_dcache_area + .long arm922_dma_map_area + .long arm922_dma_unmap_area .long arm922_dma_inv_range .long arm922_dma_clean_range .long arm922_dma_flush_range diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 8deb5bde58e..7c01c5d1108 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -341,6 +341,30 @@ ENTRY(arm925_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm925_dma_clean_range + bcs arm925_dma_inv_range + b arm925_dma_flush_range +ENDPROC(arm925_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_unmap_area) + mov pc, lr +ENDPROC(arm925_dma_unmap_area) + ENTRY(arm925_cache_fns) .long arm925_flush_kern_cache_all .long arm925_flush_user_cache_all @@ -348,6 +372,8 @@ ENTRY(arm925_cache_fns) .long arm925_coherent_kern_range .long arm925_coherent_user_range .long arm925_flush_kern_dcache_area + .long arm925_dma_map_area + .long arm925_dma_unmap_area .long arm925_dma_inv_range .long arm925_dma_clean_range .long arm925_dma_flush_range diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 64db6e275a4..72a01a4b80a 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -304,6 +304,30 @@ ENTRY(arm926_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm926_dma_clean_range + bcs arm926_dma_inv_range + b arm926_dma_flush_range +ENDPROC(arm926_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_unmap_area) + mov pc, lr +ENDPROC(arm926_dma_unmap_area) + ENTRY(arm926_cache_fns) .long arm926_flush_kern_cache_all .long arm926_flush_user_cache_all @@ -311,6 +335,8 @@ ENTRY(arm926_cache_fns) .long arm926_coherent_kern_range .long arm926_coherent_user_range .long arm926_flush_kern_dcache_area + .long arm926_dma_map_area + .long arm926_dma_unmap_area .long arm926_dma_inv_range .long arm926_dma_clean_range .long arm926_dma_flush_range diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 8196b9f401f..6bb58fca727 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -233,6 +233,30 @@ ENTRY(arm940_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm940_dma_clean_range + bcs arm940_dma_inv_range + b arm940_dma_flush_range +ENDPROC(arm940_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_unmap_area) + mov pc, lr +ENDPROC(arm940_dma_unmap_area) + ENTRY(arm940_cache_fns) .long arm940_flush_kern_cache_all .long arm940_flush_user_cache_all @@ -240,6 +264,8 @@ ENTRY(arm940_cache_fns) .long arm940_coherent_kern_range .long arm940_coherent_user_range .long arm940_flush_kern_dcache_area + .long arm940_dma_map_area + .long arm940_dma_unmap_area .long arm940_dma_inv_range .long arm940_dma_clean_range .long arm940_dma_flush_range diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 9a951239c86..ac0f9ba719d 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -275,6 +275,30 @@ ENTRY(arm946_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm946_dma_clean_range + bcs arm946_dma_inv_range + b arm946_dma_flush_range +ENDPROC(arm946_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_unmap_area) + mov pc, lr +ENDPROC(arm946_dma_unmap_area) + ENTRY(arm946_cache_fns) .long arm946_flush_kern_cache_all .long arm946_flush_user_cache_all @@ -282,6 +306,8 @@ ENTRY(arm946_cache_fns) .long arm946_coherent_kern_range .long arm946_coherent_user_range .long arm946_flush_kern_dcache_area + .long arm946_dma_map_area + .long arm946_dma_unmap_area .long arm946_dma_inv_range .long arm946_dma_clean_range .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index dbc39383e66..97e1d784f15 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -367,6 +367,44 @@ ENTRY(feroceon_range_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_dma_clean_range + bcs feroceon_dma_inv_range + b feroceon_dma_flush_range +ENDPROC(feroceon_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_range_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_range_dma_clean_range + bcs feroceon_range_dma_inv_range + b feroceon_range_dma_flush_range +ENDPROC(feroceon_range_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_unmap_area) + mov pc, lr +ENDPROC(feroceon_dma_unmap_area) + ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_cache_all .long feroceon_flush_user_cache_all @@ -374,6 +412,8 @@ ENTRY(feroceon_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_flush_kern_dcache_area + .long feroceon_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_dma_inv_range .long feroceon_dma_clean_range .long feroceon_dma_flush_range @@ -385,6 +425,8 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_range_flush_kern_dcache_area + .long feroceon_range_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_range_dma_inv_range .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9674d36cc97..55b7fbec654 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -268,6 +268,30 @@ ENTRY(mohawk_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq mohawk_dma_clean_range + bcs mohawk_dma_inv_range + b mohawk_dma_flush_range +ENDPROC(mohawk_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_unmap_area) + mov pc, lr +ENDPROC(mohawk_dma_unmap_area) + ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_cache_all .long mohawk_flush_user_cache_all @@ -275,6 +299,8 @@ ENTRY(mohawk_cache_fns) .long mohawk_coherent_kern_range .long mohawk_coherent_user_range .long mohawk_flush_kern_dcache_area + .long mohawk_dma_map_area + .long mohawk_dma_unmap_area .long mohawk_dma_inv_range .long mohawk_dma_clean_range .long mohawk_dma_flush_range diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 96456f54879..4e4ce889b3e 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -304,6 +304,30 @@ ENTRY(xsc3_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ data write barrier mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xsc3_dma_clean_range + bcs xsc3_dma_inv_range + b xsc3_dma_flush_range +ENDPROC(xsc3_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_unmap_area) + mov pc, lr +ENDPROC(xsc3_dma_unmap_area) + ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_cache_all .long xsc3_flush_user_cache_all @@ -311,6 +335,8 @@ ENTRY(xsc3_cache_fns) .long xsc3_coherent_kern_range .long xsc3_coherent_user_range .long xsc3_flush_kern_dcache_area + .long xsc3_dma_map_area + .long xsc3_dma_unmap_area .long xsc3_dma_inv_range .long xsc3_dma_clean_range .long xsc3_dma_flush_range diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 93df47265f2..a7999f94bf2 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -363,6 +363,43 @@ ENTRY(xscale_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + bcs xscale_dma_inv_range + b xscale_dma_flush_range +ENDPROC(xscale_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_a0_map_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + b xscale_dma_flush_range +ENDPROC(xscsale_dma_a0_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_unmap_area) + mov pc, lr +ENDPROC(xscale_dma_unmap_area) + ENTRY(xscale_cache_fns) .long xscale_flush_kern_cache_all .long xscale_flush_user_cache_all @@ -370,6 +407,8 @@ ENTRY(xscale_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area + .long xscale_dma_map_area + .long xscale_dma_unmap_area .long xscale_dma_inv_range .long xscale_dma_clean_range .long xscale_dma_flush_range @@ -394,6 +433,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area + .long xscale_dma_a0_map_area + .long xscale_dma_unmap_area .long xscale_dma_flush_range .long xscale_dma_clean_range .long xscale_dma_flush_range -- cgit v1.2.3-70-g09d2 From 702b94bff3c50542a6e4ab9a4f4cef093262fe65 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:24:19 +0000 Subject: ARM: dma-mapping: remove dmac_clean_range and dmac_inv_range These are now unused, and so can be removed. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/cacheflush.h | 23 ----------------------- arch/arm/mm/cache-fa.S | 6 ++---- arch/arm/mm/cache-v3.S | 29 +---------------------------- arch/arm/mm/cache-v4.S | 29 +---------------------------- arch/arm/mm/cache-v4wb.S | 6 ++---- arch/arm/mm/cache-v4wt.S | 15 +-------------- arch/arm/mm/cache-v6.S | 6 ++---- arch/arm/mm/cache-v7.S | 6 ++---- arch/arm/mm/proc-arm1020.S | 6 ++---- arch/arm/mm/proc-arm1020e.S | 6 ++---- arch/arm/mm/proc-arm1022.S | 6 ++---- arch/arm/mm/proc-arm1026.S | 6 ++---- arch/arm/mm/proc-arm920.S | 6 ++---- arch/arm/mm/proc-arm922.S | 6 ++---- arch/arm/mm/proc-arm925.S | 6 ++---- arch/arm/mm/proc-arm926.S | 6 ++---- arch/arm/mm/proc-arm940.S | 6 ++---- arch/arm/mm/proc-arm946.S | 6 ++---- arch/arm/mm/proc-feroceon.S | 12 ++++-------- arch/arm/mm/proc-mohawk.S | 6 ++---- arch/arm/mm/proc-xsc3.S | 6 ++---- arch/arm/mm/proc-xscale.S | 8 ++------ 22 files changed, 41 insertions(+), 171 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 4c733236e34..e2908858741 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -182,21 +182,6 @@ * DMA Cache Coherency * =================== * - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - start - virtual start address - * - end - virtual end address - * - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - start - virtual start address - * - end - virtual end address - * * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -216,8 +201,6 @@ struct cpu_cache_fns { void (*dma_map_area)(const void *, size_t, int); void (*dma_unmap_area)(const void *, size_t, int); - void (*dma_inv_range)(const void *, const void *); - void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); }; @@ -249,8 +232,6 @@ extern struct cpu_cache_fns cpu_cache; */ #define dmac_map_area cpu_cache.dma_map_area #define dmac_unmap_area cpu_cache.dma_unmap_area -#define dmac_inv_range cpu_cache.dma_inv_range -#define dmac_clean_range cpu_cache.dma_clean_range #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -277,14 +258,10 @@ extern void __cpuc_flush_dcache_area(void *, size_t); */ #define dmac_map_area __glue(_CACHE,_dma_map_area) #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) -#define dmac_inv_range __glue(_CACHE,_dma_inv_range) -#define dmac_clean_range __glue(_CACHE,_dma_clean_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); -extern void dmac_inv_range(const void *, const void *); -extern void dmac_clean_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *); #endif diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 8ebffdd6fcf..7148e53e607 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_inv_range) +fa_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry @@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_clean_range) +fa_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -241,7 +241,5 @@ ENTRY(fa_cache_fns) .long fa_flush_kern_dcache_area .long fa_dma_map_area .long fa_dma_unmap_area - .long fa_dma_inv_range - .long fa_dma_clean_range .long fa_dma_flush_range .size fa_cache_fns, . - fa_cache_fns diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 6df52dc014b..c2ff3c599fe 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -83,20 +83,6 @@ ENTRY(v3_coherent_user_range) ENTRY(v3_flush_kern_dcache_area) /* FALLTHROUGH */ -/* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_inv_range) - /* FALLTHROUGH */ - /* * dma_flush_range(start, end) * @@ -108,17 +94,6 @@ ENTRY(v3_dma_inv_range) ENTRY(v3_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_clean_range) mov pc, lr /* @@ -129,7 +104,7 @@ ENTRY(v3_dma_clean_range) */ ENTRY(v3_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v3_dma_inv_range + bne v3_dma_flush_range /* FALLTHROUGH */ /* @@ -155,7 +130,5 @@ ENTRY(v3_cache_fns) .long v3_flush_kern_dcache_area .long v3_dma_map_area .long v3_dma_unmap_area - .long v3_dma_inv_range - .long v3_dma_clean_range .long v3_dma_flush_range .size v3_cache_fns, . - v3_cache_fns diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index df3b423713b..4810f7e3e81 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -93,20 +93,6 @@ ENTRY(v4_coherent_user_range) ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ -/* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_inv_range) - /* FALLTHROUGH */ - /* * dma_flush_range(start, end) * @@ -120,17 +106,6 @@ ENTRY(v4_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_clean_range) mov pc, lr /* @@ -141,7 +116,7 @@ ENTRY(v4_dma_clean_range) */ ENTRY(v4_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v4_dma_inv_range + bne v4_dma_flush_range /* FALLTHROUGH */ /* @@ -167,7 +142,5 @@ ENTRY(v4_cache_fns) .long v4_flush_kern_dcache_area .long v4_dma_map_area .long v4_dma_unmap_area - .long v4_dma_inv_range - .long v4_dma_clean_range .long v4_dma_flush_range .size v4_cache_fns, . - v4_cache_fns diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 32e7a744849..df8368afa10 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_inv_range) +v4wb_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_clean_range) +v4wb_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -252,7 +252,5 @@ ENTRY(v4wb_cache_fns) .long v4wb_flush_kern_dcache_area .long v4wb_dma_map_area .long v4wb_dma_unmap_area - .long v4wb_dma_inv_range - .long v4wb_dma_clean_range .long v4wb_dma_flush_range .size v4wb_cache_fns, . - v4wb_cache_fns diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 3d8dad5b265..45c70312f43 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wt_dma_inv_range) +v4wt_dma_inv_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4wt_dma_clean_range) mov pc, lr /* @@ -207,7 +196,5 @@ ENTRY(v4wt_cache_fns) .long v4wt_flush_kern_dcache_area .long v4wt_dma_map_area .long v4wt_dma_unmap_area - .long v4wt_dma_inv_range - .long v4wt_dma_clean_range .long v4wt_dma_flush_range .size v4wt_cache_fns, . - v4wt_cache_fns diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 6f926dd0e0f..a11934e53fb 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_inv_range) +v6_dma_inv_range: tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE @@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_clean_range) +v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE @@ -299,7 +299,5 @@ ENTRY(v6_cache_fns) .long v6_flush_kern_dcache_area .long v6_dma_map_area .long v6_dma_unmap_area - .long v6_dma_inv_range - .long v6_dma_clean_range .long v6_dma_flush_range .size v6_cache_fns, . - v6_cache_fns diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index e30d8bc6718..b1cd0fd9120 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_inv_range) +v7_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 @@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_clean_range) +v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 @@ -307,7 +307,5 @@ ENTRY(v7_cache_fns) .long v7_flush_kern_dcache_area .long v7_dma_map_area .long v7_dma_unmap_area - .long v7_dma_inv_range - .long v7_dma_clean_range .long v7_dma_flush_range .size v7_cache_fns, . - v7_cache_fns diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index c85f5eb4263..72507c630ce 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020_dma_inv_range) +arm1020_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -295,7 +295,7 @@ ENTRY(arm1020_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020_dma_clean_range) +arm1020_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -363,8 +363,6 @@ ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_dcache_area .long arm1020_dma_map_area .long arm1020_dma_unmap_area - .long arm1020_dma_inv_range - .long arm1020_dma_clean_range .long arm1020_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 5a3cf7620a2..d2782980560 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020e_dma_inv_range) +arm1020e_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -284,7 +284,7 @@ ENTRY(arm1020e_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020e_dma_clean_range) +arm1020e_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -349,8 +349,6 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_dcache_area .long arm1020e_dma_map_area .long arm1020e_dma_unmap_area - .long arm1020e_dma_inv_range - .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index fec8f587843..ce13e4a827d 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1022_dma_inv_range) +arm1022_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -273,7 +273,7 @@ ENTRY(arm1022_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1022_dma_clean_range) +arm1022_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -338,8 +338,6 @@ ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_dcache_area .long arm1022_dma_map_area .long arm1022_dma_unmap_area - .long arm1022_dma_inv_range - .long arm1022_dma_clean_range .long arm1022_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 9ece6f66649..636672a29c6 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1026_dma_inv_range) +arm1026_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -267,7 +267,7 @@ ENTRY(arm1026_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1026_dma_clean_range) +arm1026_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -332,8 +332,6 @@ ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_dcache_area .long arm1026_dma_map_area .long arm1026_dma_unmap_area - .long arm1026_dma_inv_range - .long arm1026_dma_clean_range .long arm1026_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 6f6ab2747da..8be81992645 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm920_dma_inv_range) +arm920_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -262,7 +262,7 @@ ENTRY(arm920_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm920_dma_clean_range) +arm920_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -321,8 +321,6 @@ ENTRY(arm920_cache_fns) .long arm920_flush_kern_dcache_area .long arm920_dma_map_area .long arm920_dma_unmap_area - .long arm920_dma_inv_range - .long arm920_dma_clean_range .long arm920_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 4e4396b121c..c0ff8e4b107 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm922_dma_inv_range) +arm922_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -264,7 +264,7 @@ ENTRY(arm922_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm922_dma_clean_range) +arm922_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -323,8 +323,6 @@ ENTRY(arm922_cache_fns) .long arm922_flush_kern_dcache_area .long arm922_dma_map_area .long arm922_dma_unmap_area - .long arm922_dma_inv_range - .long arm922_dma_clean_range .long arm922_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 7c01c5d1108..3c6cffe400f 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm925_dma_inv_range) +arm925_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -308,7 +308,7 @@ ENTRY(arm925_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm925_dma_clean_range) +arm925_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -374,8 +374,6 @@ ENTRY(arm925_cache_fns) .long arm925_flush_kern_dcache_area .long arm925_dma_map_area .long arm925_dma_unmap_area - .long arm925_dma_inv_range - .long arm925_dma_clean_range .long arm925_dma_flush_range ENTRY(cpu_arm925_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 72a01a4b80a..75b707c9cce 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm926_dma_inv_range) +arm926_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -271,7 +271,7 @@ ENTRY(arm926_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm926_dma_clean_range) +arm926_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -337,8 +337,6 @@ ENTRY(arm926_cache_fns) .long arm926_flush_kern_dcache_area .long arm926_dma_map_area .long arm926_dma_unmap_area - .long arm926_dma_inv_range - .long arm926_dma_clean_range .long arm926_dma_flush_range ENTRY(cpu_arm926_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 6bb58fca727..1af1657819e 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_inv_range) +arm940_dma_inv_range: mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries @@ -192,7 +192,7 @@ ENTRY(arm940_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_clean_range) +arm940_dma_clean_range: ENTRY(cpu_arm940_dcache_clean_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -266,8 +266,6 @@ ENTRY(arm940_cache_fns) .long arm940_flush_kern_dcache_area .long arm940_dma_map_area .long arm940_dma_unmap_area - .long arm940_dma_inv_range - .long arm940_dma_clean_range .long arm940_dma_flush_range __INIT diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index ac0f9ba719d..1664b6aaff7 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area) * - end - virtual end address * (same as arm926) */ -ENTRY(arm946_dma_inv_range) +arm946_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -240,7 +240,7 @@ ENTRY(arm946_dma_inv_range) * * (same as arm926) */ -ENTRY(arm946_dma_clean_range) +arm946_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -308,8 +308,6 @@ ENTRY(arm946_cache_fns) .long arm946_flush_kern_dcache_area .long arm946_dma_map_area .long arm946_dma_unmap_area - .long arm946_dma_inv_range - .long arm946_dma_clean_range .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 97e1d784f15..53e63234384 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_inv_range) +feroceon_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -288,7 +288,7 @@ ENTRY(feroceon_dma_inv_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_inv_range) +feroceon_range_dma_inv_range: mrs r2, cpsr tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -314,7 +314,7 @@ ENTRY(feroceon_range_dma_inv_range) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_clean_range) +feroceon_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -324,7 +324,7 @@ ENTRY(feroceon_dma_clean_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_clean_range) +feroceon_range_dma_clean_range: mrs r2, cpsr cmp r1, r0 subne r1, r1, #1 @ top address is inclusive @@ -414,8 +414,6 @@ ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_dcache_area .long feroceon_dma_map_area .long feroceon_dma_unmap_area - .long feroceon_dma_inv_range - .long feroceon_dma_clean_range .long feroceon_dma_flush_range ENTRY(feroceon_range_cache_fns) @@ -427,8 +425,6 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_range_flush_kern_dcache_area .long feroceon_range_dma_map_area .long feroceon_dma_unmap_area - .long feroceon_range_dma_inv_range - .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 55b7fbec654..caa31154e7d 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(mohawk_dma_inv_range) +mohawk_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 @@ -241,7 +241,7 @@ ENTRY(mohawk_dma_inv_range) * * (same as v4wb) */ -ENTRY(mohawk_dma_clean_range) +mohawk_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -301,8 +301,6 @@ ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_dcache_area .long mohawk_dma_map_area .long mohawk_dma_unmap_area - .long mohawk_dma_inv_range - .long mohawk_dma_clean_range .long mohawk_dma_flush_range ENTRY(cpu_mohawk_dcache_clean_area) diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 4e4ce889b3e..046b3d88955 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_inv_range) +xsc3_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line @@ -278,7 +278,7 @@ ENTRY(xsc3_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_clean_range) +xsc3_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE @@ -337,8 +337,6 @@ ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_dcache_area .long xsc3_dma_map_area .long xsc3_dma_unmap_area - .long xsc3_dma_inv_range - .long xsc3_dma_clean_range .long xsc3_dma_flush_range ENTRY(cpu_xsc3_dcache_clean_area) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index a7999f94bf2..63037e2162f 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_inv_range) +xscale_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -336,7 +336,7 @@ ENTRY(xscale_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_clean_range) +xscale_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE @@ -409,8 +409,6 @@ ENTRY(xscale_cache_fns) .long xscale_flush_kern_dcache_area .long xscale_dma_map_area .long xscale_dma_unmap_area - .long xscale_dma_inv_range - .long xscale_dma_clean_range .long xscale_dma_flush_range /* @@ -436,8 +434,6 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_dma_a0_map_area .long xscale_dma_unmap_area .long xscale_dma_flush_range - .long xscale_dma_clean_range - .long xscale_dma_flush_range ENTRY(cpu_xscale_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry -- cgit v1.2.3-70-g09d2 From 31aa8fd6fd30b0f36416df7d09619768d26b4332 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 11:10:03 +0000 Subject: ARM: Add caller information to ioremap This allows the procfs vmallocinfo file to show who created the ioremap regions. Note: __builtin_return_address(0) doesn't do what's expected if its used in an inline function, so we leave __arm_ioremap callers in such places alone. Signed-off-by: Russell King --- arch/arm/include/asm/io.h | 11 +++++++-- arch/arm/mach-davinci/io.c | 2 +- arch/arm/mach-iop13xx/io.c | 7 +++--- arch/arm/mach-msm/io.c | 3 ++- arch/arm/mm/ioremap.c | 57 +++++++++++++++++++++++++++------------------- arch/arm/mm/nommu.c | 12 ++++++++++ arch/arm/plat-iop/io.c | 3 ++- arch/arm/plat-omap/io.c | 2 +- 8 files changed, 65 insertions(+), 32 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d2a59cfc30c..c980156f326 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); /* * __arm_ioremap takes CPU physical address. * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page + * The _caller variety takes a __builtin_return_address(0) value for + * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, + size_t, unsigned int, void *); +extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, + void *); + +extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); extern void __iounmap(volatile void __iomem *addr); /* diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c index 49912b48b1b..a1c0b6b99ed 100644 --- a/arch/arm/mach-davinci/io.c +++ b/arch/arm/mach-davinci/io.c @@ -24,7 +24,7 @@ void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type) if (BETWEEN(p, IO_PHYS, IO_SIZE)) return XLATE(p, IO_PHYS, IO_VIRT); - return __arm_ioremap(p, size, type); + return __arm_ioremap_caller(p, size, type, __builtin_return_address(0)); } EXPORT_SYMBOL(davinci_ioremap); diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c index 52958099781..48642e66c56 100644 --- a/arch/arm/mach-iop13xx/io.c +++ b/arch/arm/mach-iop13xx/io.c @@ -61,9 +61,9 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size, (cookie - IOP13XX_PCIE_LOWER_MEM_RA)); break; case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA: - retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA + + retval = __arm_ioremap_caller(IOP13XX_PBI_LOWER_MEM_PA + (cookie - IOP13XX_PBI_LOWER_MEM_RA), - size, mtype); + size, mtype, __builtin_return_address(0)); break; case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA: retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie); @@ -75,7 +75,8 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size, retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie); break; default: - retval = __arm_ioremap(cookie, size, mtype); + retval = __arm_ioremap_caller(cookie, size, mtype, + __builtin_return_address(0)); } return retval; diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index 1c5e7dac086..05f96b780aa 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c @@ -76,5 +76,6 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) mtype = MT_DEVICE_NONSHARED; } - return __arm_ioremap(phys_addr, size, mtype); + return __arm_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); } diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ab75c60f7c..28c8b950ef0 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm) * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * - * Note that get_vm_area() allocates a guard 4K page, so we need to mask - * the size back to 1MB aligned or we will overflow in the loop below. + * Note that get_vm_area_caller() allocates a guard 4K page, so we need to + * mask the size back to 1MB aligned or we will overflow in the loop below. */ static void unmap_area_sections(unsigned long virt, unsigned long size) { @@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, } #endif - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - * - * 'flags' are the extra L_PTE_ flags that you want to specify for this - * mapping. See for more information. - */ -void __iomem * -__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, - unsigned int mtype) +void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, + unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; int err; @@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, */ size = PAGE_ALIGN(offset + size); - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; @@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, flush_cache_vmap(addr, addr + size); return (void __iomem *) (offset + addr); } -EXPORT_SYMBOL(__arm_ioremap_pfn); -void __iomem * -__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) +void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, + unsigned int mtype, void *caller) { unsigned long last_addr; unsigned long offset = phys_addr & ~PAGE_MASK; @@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) if (!size || last_addr < phys_addr) return NULL; - return __arm_ioremap_pfn(pfn, offset, size, mtype); + return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, + caller); +} + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void __iomem * +__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, + unsigned int mtype) +{ + return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(__arm_ioremap_pfn); + +void __iomem * +__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) +{ + return __arm_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); } EXPORT_SYMBOL(__arm_ioremap); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 374a8311bc8..9bfeb6b9509 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, } EXPORT_SYMBOL(__arm_ioremap_pfn); +void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, + size_t size, unsigned int mtype, void *caller) +{ + return __arm_ioremap_pfn(pfn, offset, size, mtype); +} + void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { @@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, } EXPORT_SYMBOL(__arm_ioremap); +void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, + unsigned int mtype, void *caller) +{ + return __arm_ioremap(phys_addr, size, mtype); +} + void __iounmap(volatile void __iomem *addr) { } diff --git a/arch/arm/plat-iop/io.c b/arch/arm/plat-iop/io.c index ed0bbece0d6..e15bc17db90 100644 --- a/arch/arm/plat-iop/io.c +++ b/arch/arm/plat-iop/io.c @@ -34,7 +34,8 @@ void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size, retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie); break; default: - retval = __arm_ioremap(cookie, size, mtype); + retval = __arm_ioremap_caller(cookie, size, mtype, + __builtin_return_address(0)); } return retval; diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c index 0cfd54f519c..4cbd4fb3232 100644 --- a/arch/arm/plat-omap/io.c +++ b/arch/arm/plat-omap/io.c @@ -128,7 +128,7 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type) return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT); } #endif - return __arm_ioremap(p, size, type); + return __arm_ioremap_caller(p, size, type, __builtin_return_address(0)); } EXPORT_SYMBOL(omap_ioremap); -- cgit v1.2.3-70-g09d2 From 7921fc4a25800f4210bca96c05dea67a6b736d32 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 10 Jan 2010 17:05:08 +0000 Subject: ARM: remove old RTC support All RTC drivers have been converted to rtclib, so the old code providing the set_rtc function pointer, save_time_delta() and restore_time_delta() functions is obsolete. Remove it. Signed-off-by: Russell King --- arch/arm/include/asm/mach/time.h | 8 ----- arch/arm/kernel/time.c | 74 ---------------------------------------- 2 files changed, 82 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index b2cc1fcd040..8bffc3ff3ac 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -46,12 +46,4 @@ struct sys_timer { extern struct sys_timer *system_timer; extern void timer_tick(void); -/* - * Kernel time keeping support. - */ -struct timespec; -extern int (*set_rtc)(void); -extern void save_time_delta(struct timespec *delta, struct timespec *rtc); -extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); - #endif diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index d38cdf2c827..e26f966d117 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -10,11 +10,6 @@ * * This file contains the ARM-specific time handling details: * reading the RTC at bootup, etc... - * - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills */ #include #include @@ -77,11 +72,6 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -/* - * hook for setting the RTC's idea of the current time. - */ -int (*set_rtc)(void); - #ifndef CONFIG_GENERIC_TIME static unsigned long dummy_gettimeoffset(void) { @@ -89,36 +79,6 @@ static unsigned long dummy_gettimeoffset(void) } #endif -static unsigned long next_rtc_update; - -/* - * If we have an externally synchronized linux clock, then update - * CMOS clock accordingly every ~11 minutes. set_rtc() has to be - * called as close as possible to 500 ms before the new second - * starts. - */ -static inline void do_set_rtc(void) -{ - if (!ntp_synced() || set_rtc == NULL) - return; - - if (next_rtc_update && - time_before((unsigned long)xtime.tv_sec, next_rtc_update)) - return; - - if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) && - xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1)) - return; - - if (set_rtc()) - /* - * rtc update failed. Try again in 60s - */ - next_rtc_update = xtime.tv_sec + 60; - else - next_rtc_update = xtime.tv_sec + 660; -} - #ifdef CONFIG_LEDS static void dummy_leds_event(led_event_t evt) @@ -295,39 +255,6 @@ int do_settimeofday(struct timespec *tv) EXPORT_SYMBOL(do_settimeofday); #endif /* !CONFIG_GENERIC_TIME */ -/** - * save_time_delta - Save the offset between system time and RTC time - * @delta: pointer to timespec to store delta - * @rtc: pointer to timespec for current RTC time - * - * Return a delta between the system time and the RTC time, such - * that system time can be restored later with restore_time_delta() - */ -void save_time_delta(struct timespec *delta, struct timespec *rtc) -{ - set_normalized_timespec(delta, - xtime.tv_sec - rtc->tv_sec, - xtime.tv_nsec - rtc->tv_nsec); -} -EXPORT_SYMBOL(save_time_delta); - -/** - * restore_time_delta - Restore the current system time - * @delta: delta returned by save_time_delta() - * @rtc: pointer to timespec for current RTC time - */ -void restore_time_delta(struct timespec *delta, struct timespec *rtc) -{ - struct timespec ts; - - set_normalized_timespec(&ts, - delta->tv_sec + rtc->tv_sec, - delta->tv_nsec + rtc->tv_nsec); - - do_settimeofday(&ts); -} -EXPORT_SYMBOL(restore_time_delta); - #ifndef CONFIG_GENERIC_CLOCKEVENTS /* * Kernel system timer support. @@ -336,7 +263,6 @@ void timer_tick(void) { profile_tick(CPU_PROFILING); do_leds(); - do_set_rtc(); write_seqlock(&xtime_lock); do_timer(1); write_sequnlock(&xtime_lock); -- cgit v1.2.3-70-g09d2 From 2b0d8c251b8876d530a6bf671eb5425838fa698a Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 11 Jan 2010 23:17:34 +0100 Subject: ARM: 5880/1: arm: use generic infrastructure for early params The ARM setup code includes its own parser for early params, there's also one in the generic init code. This patch removes __early_init (and related code) from arch/arm/kernel/setup.c, and changes users to the generic early_init macro instead. The generic macro takes a char * argument, rather than char **, so we need to update the parser functions a little. Signed-off-by: Jeremy Kerr Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 12 -------- arch/arm/kernel/setup.c | 62 ++++++++++----------------------------- arch/arm/kernel/vmlinux.lds.S | 4 --- arch/arm/mach-footbridge/common.c | 7 +++-- arch/arm/mm/init.c | 12 ++++---- arch/arm/mm/mmu.c | 41 +++++++++++++------------- 6 files changed, 48 insertions(+), 90 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 5ccce0a9b03..f392fb4437a 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -223,18 +223,6 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -/* - * Early command line parameters. - */ -struct early_params { - const char *arg; - void (*fn)(char **p); -}; - -#define __early_param(name,fn) \ -static struct early_params __early_##fn __used \ -__attribute__((__section__(".early_param.init"))) = { name, fn } - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 5357e48f2c3..b01a56a03ed 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -418,10 +418,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" */ -static void __init early_mem(char **p) +static int __init early_mem(char *p) { static int usermem __initdata = 0; unsigned long size, start; + char *endp; /* * If the user specifies memory size, we @@ -434,52 +435,15 @@ static void __init early_mem(char **p) } start = PHYS_OFFSET; - size = memparse(*p, p); - if (**p == '@') - start = memparse(*p + 1, p); + size = memparse(p, &endp); + if (*endp == '@') + start = memparse(endp + 1, NULL); arm_add_memory(start, size); -} -__early_param("mem=", early_mem); -/* - * Initial parsing of the command line. - */ -static void __init parse_cmdline(char **cmdline_p, char *from) -{ - char c = ' ', *to = command_line; - int len = 0; - - for (;;) { - if (c == ' ') { - extern struct early_params __early_begin, __early_end; - struct early_params *p; - - for (p = &__early_begin; p < &__early_end; p++) { - int arglen = strlen(p->arg); - - if (memcmp(from, p->arg, arglen) == 0) { - if (to != command_line) - to -= 1; - from += arglen; - p->fn(&from); - - while (*from != ' ' && *from != '\0') - from++; - break; - } - } - } - c = *from++; - if (!c) - break; - if (COMMAND_LINE_SIZE <= ++len) - break; - *to++ = c; - } - *to = '\0'; - *cmdline_p = command_line; + return 0; } +early_param("mem", early_mem); static void __init setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) @@ -740,9 +704,15 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; - memcpy(boot_command_line, from, COMMAND_LINE_SIZE); - boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; - parse_cmdline(cmdline_p, from); + /* parse_early_param needs a boot_command_line */ + strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); + + /* populate command_line too for later use, preserving boot_command_line */ + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + + parse_early_param(); + paging_init(mdesc); request_standard_resources(&meminfo, mdesc); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 4957e13ef55..b16c07914b5 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -43,10 +43,6 @@ SECTIONS INIT_SETUP(16) - __early_begin = .; - *(.early_param.init) - __early_end = .; - INIT_CALLS CON_INITCALL SECURITY_INITCALL diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 41febc796b1..e3bc3f6f6b1 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -32,12 +32,13 @@ unsigned int mem_fclk_21285 = 50000000; EXPORT_SYMBOL(mem_fclk_21285); -static void __init early_fclk(char **arg) +static int __init early_fclk(char *arg) { - mem_fclk_21285 = simple_strtoul(*arg, arg, 0); + mem_fclk_21285 = simple_strtoul(arg, NULL, 0); + return 0; } -__early_param("mem_fclk_21285=", early_fclk); +early_param("mem_fclk_21285", early_fclk); static int __init parse_tag_memclk(const struct tag *tag) { diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a04ffbbbe25..a340569b991 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -32,19 +32,21 @@ static unsigned long phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; -static void __init early_initrd(char **p) +static int __init early_initrd(char *p) { unsigned long start, size; + char *endp; - start = memparse(*p, p); - if (**p == ',') { - size = memparse((*p) + 1, p); + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); phys_initrd_start = start; phys_initrd_size = size; } + return 0; } -__early_param("initrd=", early_initrd); +early_param("initrd", early_initrd); static int __init parse_tag_initrd(const struct tag *tag) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 1708da82da9..88f5d71248d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = { * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). */ -static void __init early_cachepolicy(char **p) +static int __init early_cachepolicy(char *p) { int i; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); - if (memcmp(*p, cache_policies[i].policy, len) == 0) { + if (memcmp(p, cache_policies[i].policy, len) == 0) { cachepolicy = i; cr_alignment &= ~cache_policies[i].cr_mask; cr_no_alignment &= ~cache_policies[i].cr_mask; - *p += len; break; } } @@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p) } flush_cache_all(); set_cr(cr_alignment); + return 0; } -__early_param("cachepolicy=", early_cachepolicy); +early_param("cachepolicy", early_cachepolicy); -static void __init early_nocache(char **__unused) +static int __init early_nocache(char *__unused) { char *p = "buffered"; printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nocache", early_nocache); +early_param("nocache", early_nocache); -static void __init early_nowrite(char **__unused) +static int __init early_nowrite(char *__unused) { char *p = "uncached"; printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nowb", early_nowrite); +early_param("nowb", early_nowrite); -static void __init early_ecc(char **p) +static int __init early_ecc(char *p) { - if (memcmp(*p, "on", 2) == 0) { + if (memcmp(p, "on", 2) == 0) ecc_mask = PMD_PROTECTION; - *p += 2; - } else if (memcmp(*p, "off", 3) == 0) { + else if (memcmp(p, "off", 3) == 0) ecc_mask = 0; - *p += 3; - } + return 0; } -__early_param("ecc=", early_ecc); +early_param("ecc", early_ecc); static int __init noalign_setup(char *__unused) { @@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; * bytes. This can be used to increase (or decrease) the vmalloc * area - the default is 128m. */ -static void __init early_vmalloc(char **arg) +static int __init early_vmalloc(char *arg) { - vmalloc_reserve = memparse(*arg, arg); + vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; @@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg) "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } + return 0; } -__early_param("vmalloc=", early_vmalloc); +early_param("vmalloc", early_vmalloc); #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) -- cgit v1.2.3-70-g09d2 From a9221de66d2d94e6e34c3f56bbdd744935020737 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 20 Jan 2010 17:02:54 +0000 Subject: ARM: add notify_die() support Kernel debuggers want to be informed of die() events, so that they can take some action to allow the problem to be inspected. Provide the hook in a similar manner to x86. Note that we currently don't implement the individual trap hooks. Acked-by: Jason Wessel Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 3 +-- arch/arm/kernel/traps.c | 35 +++++++++++++++++++++++++---------- 2 files changed, 26 insertions(+), 12 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 058e7e90881..ca88e6a8470 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285; struct pt_regs; -void die(const char *msg, struct pt_regs *regs, int err) - __attribute__((noreturn)); +void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 3f361a783f4..1621e5327b2 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -12,15 +12,17 @@ * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably * kill the offending process. */ -#include #include -#include #include #include -#include +#include +#include #include +#include +#include +#include +#include #include -#include #include #include @@ -224,14 +226,21 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #define S_SMP "" #endif -static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) +static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { struct task_struct *tsk = thread->task; static int die_counter; + int ret; printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); sysfs_printk_last_file(); + + /* trap and error numbers are mostly meaningless on ARM */ + ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); + if (ret == NOTIFY_STOP) + return ret; + print_modules(); __show_regs(regs); printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", @@ -243,6 +252,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p dump_backtrace(regs, tsk); dump_instr(KERN_EMERG, regs); } + + return ret; } DEFINE_SPINLOCK(die_lock); @@ -250,16 +261,21 @@ DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ -NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) +void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); + int ret; oops_enter(); spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); - __die(str, err, thread, regs); + ret = __die(str, err, thread, regs); + + if (regs && kexec_should_crash(thread->task)) + crash_kexec(regs); + bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); @@ -267,11 +283,10 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) if (in_interrupt()) panic("Fatal exception in interrupt"); - if (panic_on_oops) panic("Fatal exception"); - - do_exit(SIGSEGV); + if (ret != NOTIFY_STOP) + do_exit(SIGSEGV); } void arm_notify_die(const char *str, struct pt_regs *regs, -- cgit v1.2.3-70-g09d2 From 24b44a66fa240f6fc63343623ca730d39754047e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 20 Jan 2010 19:05:07 +0100 Subject: ARM: 5889/1: Add atomic64 routines for ARMv6k and above. In preparation for perf-events support, ARM needs to support atomic64_t operations. v6k and above support the ldrexd and strexd instructions to do just that. This patch adds atomic64 support to the ARM architecture. v6k and above make use of new instructions whilst older cores fall back on the generic solution using spinlocks. If and when v7-M cores are supported by Linux, they will need to fall back on the spinlock implementation too. Signed-off-by: Will Deacon Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/Kconfig | 1 + arch/arm/include/asm/atomic.h | 228 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 229 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b224216c11d..762ae536f90 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -12,6 +12,7 @@ config ARM select HAVE_IDE select RTC_LIB select SYS_SUPPORTS_APM_EMULATION + select GENERIC_ATOMIC64 if (!CPU_32v6K) select HAVE_OPROFILE select HAVE_ARCH_KGDB select HAVE_KPROBES if (!XIP_KERNEL) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index d0daeab2234..e8ddec2cb15 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#ifndef CONFIG_GENERIC_ATOMIC64 +typedef struct { + u64 __aligned(8) counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +static inline u64 atomic64_read(atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrexd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + u64 tmp; + + __asm__ __volatile__("@ atomic64_set\n" +"1: ldrexd %0, %H0, [%1]\n" +" strexd %0, %2, %H2, [%1]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline void atomic64_add(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_add\n" +"1: ldrexd %0, %H0, [%2]\n" +" adds %0, %0, %3\n" +" adc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_return\n" +"1: ldrexd %0, %H0, [%2]\n" +" adds %0, %0, %3\n" +" adc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic64_sub(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_sub\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, %3\n" +" sbc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_sub_return\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, %3\n" +" sbc %H0, %H0, %H3\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +{ + u64 oldval; + unsigned long res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic64_cmpxchg\n" + "ldrexd %1, %H1, [%2]\n" + "mov %0, #0\n" + "teq %1, %3\n" + "teqeq %H1, %H3\n" + "strexdeq %0, %4, %H4, [%2]" + : "=&r" (res), "=&r" (oldval) + : "r" (&ptr->counter), "r" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_xchg\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %1, %3, %H3, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&ptr->counter), "r" (new) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_dec_if_positive(atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +"1: ldrexd %0, %H0, [%2]\n" +" subs %0, %0, #1\n" +" sbc %H0, %H0, #0\n" +" teq %H0, #0\n" +" bmi 2f\n" +" strexd %1, %0, %H0, [%2]\n" +" teq %1, #0\n" +" bne 1b\n" +"2:" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +{ + u64 val; + unsigned long tmp; + int ret = 1; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_unless\n" +"1: ldrexd %0, %H0, [%3]\n" +" teq %0, %4\n" +" teqeq %H0, %H4\n" +" moveq %1, #0\n" +" beq 2f\n" +" adds %0, %0, %5\n" +" adc %H0, %H0, %H5\n" +" strexd %2, %0, %H0, [%3]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (val), "=&r" (ret), "=&r" (tmp) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (ret) + smp_mb(); + + return ret; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#else /* !CONFIG_GENERIC_ATOMIC64 */ +#include +#endif #include #endif #endif -- cgit v1.2.3-70-g09d2 From c5113b61baf7a9a8616eca83e20847e7fecdc679 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Mon, 25 Jan 2010 19:43:03 +0100 Subject: ARM: 5897/1: spinlock: don't use deprecated barriers on ARMv7 On ARMv7, the use of the cp15 operations for barriers is deprecated in favour of the isb, dsb, and dmb instructions. Change the locking functions to use the appropriate type of dsb for the architecture being built for. Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c91c64cab92..17eb355707d 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,6 +5,22 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +static inline void dsb_sev(void) +{ +#if __LINUX_ARM_ARCH__ >= 7 + __asm__ __volatile__ ( + "dsb\n" + "sev" + ); +#elif defined(CONFIG_CPU_32v6K) + __asm__ __volatile__ ( + "mcr p15, 0, %0, c7, c10, 4\n" + "sev" + : : "r" (0) + ); +#endif +} + /* * ARMv6 Spin-locking. * @@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __asm__ __volatile__( " str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev" -#endif : : "r" (&lock->lock), "r" (0) : "cc"); + + dsb_sev(); } /* @@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) __asm__ __volatile__( "str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev\n" -#endif : : "r" (&rw->lock), "r" (0) : "cc"); + + dsb_sev(); } /* write_can_lock - would write_trylock() succeed? */ @@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" -#ifdef CONFIG_CPU_32v6K -"\n cmp %0, #0\n" -" mcreq p15, 0, %0, c7, c10, 4\n" -" seveq" -#endif : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); + + if (tmp == 0) + dsb_sev(); } static inline int arch_read_trylock(arch_rwlock_t *rw) -- cgit v1.2.3-70-g09d2 From 11805bcfa411c816b7c76fc40724be6733c74ffc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 26 Jan 2010 19:09:42 +0100 Subject: ARM: 5905/1: ARM: Global ASID allocation on SMP The current ASID allocation algorithm doesn't ensure the notification of the other CPUs when the ASID rolls over. This may lead to two processes using the same ASID (but different generation) or multiple threads of the same process using different ASIDs. This patch adds the broadcasting of the ASID rollover event to the other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid" during the handling of the broadcast, the ASID numbering now starts at "smp_processor_id() + 1". At rollover, the cpu_last_asid will be set to NR_CPUS. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/mmu.h | 1 + arch/arm/include/asm/mmu_context.h | 15 +++++ arch/arm/mm/context.c | 124 ++++++++++++++++++++++++++++++++----- 3 files changed, 126 insertions(+), 14 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584d04a..68870c77667 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; + spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb329d..a0b3cac0547 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct mm_struct *, current_mm); +#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); static inline void check_context(struct mm_struct *mm) { + /* + * This code is executed with interrupts enabled. Therefore, + * mm->context.id cannot be updated to the latest ASID version + * on a different CPU (and condition below not triggered) + * without first getting an IPI to reset the context. The + * alternative is to take a read_lock on mm->context.id_lock + * (after changing its type to rwlock_t). + */ if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); @@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { +#ifdef CONFIG_SMP + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); + *crt_mm = next; +#endif check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa..b0ee9ba3cfa 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -10,12 +10,17 @@ #include #include #include +#include +#include #include #include static DEFINE_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct mm_struct *, current_mm); +#endif /* * We fork()ed a process, and we need a new context for the child @@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.id = 0; + spin_lock_init(&mm->context.id_lock); } +static void flush_context(void) +{ + /* set the reserved ASID before flushing the TLB */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); + isb(); + local_flush_tlb_all(); + if (icache_is_vivt_asid_tagged()) { + __flush_icache_all(); + dsb(); + } +} + +#ifdef CONFIG_SMP + +static void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + unsigned long flags; + + /* + * Locking needed for multi-threaded applications where the + * same mm->context.id could be set from different CPUs during + * the broadcast. This function is also called via IPI so the + * mm->context.id_lock has to be IRQ-safe. + */ + spin_lock_irqsave(&mm->context.id_lock, flags); + if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { + /* + * Old version of ASID found. Set the new one and + * reset mm_cpumask(mm). + */ + mm->context.id = asid; + cpumask_clear(mm_cpumask(mm)); + } + spin_unlock_irqrestore(&mm->context.id_lock, flags); + + /* + * Set the mm_cpumask(mm) bit for the current CPU. + */ + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +} + +/* + * Reset the ASID on the current CPU. This function call is broadcast + * from the CPU handling the ASID rollover and holding cpu_asid_lock. + */ +static void reset_context(void *info) +{ + unsigned int asid; + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = per_cpu(current_mm, cpu); + + /* + * Check if a current_mm was set on this CPU as it might still + * be in the early booting stages and using the reserved ASID. + */ + if (!mm) + return; + + smp_rmb(); + asid = cpu_last_asid + cpu + 1; + + flush_context(); + set_mm_context(mm, asid); + + /* set the new ASID */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); + isb(); +} + +#else + +static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + mm->context.id = asid; + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); +} + +#endif + void __new_context(struct mm_struct *mm) { unsigned int asid; spin_lock(&cpu_asid_lock); +#ifdef CONFIG_SMP + /* + * Check the ASID again, in case the change was broadcast from + * another CPU before we acquired the lock. + */ + if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + spin_unlock(&cpu_asid_lock); + return; + } +#endif + /* + * At this point, it is guaranteed that the current mm (with + * an old ASID) isn't active on any other CPU since the ASIDs + * are changed simultaneously via IPI. + */ asid = ++cpu_last_asid; if (asid == 0) asid = cpu_last_asid = ASID_FIRST_VERSION; @@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) * to start a new version and flush the TLB. */ if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = ++cpu_last_asid; - /* set the reserved ASID before flushing the TLB */ - asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" - : - : "r" (0)); - isb(); - flush_tlb_all(); - if (icache_is_vivt_asid_tagged()) { - __flush_icache_all(); - dsb(); - } + asid = cpu_last_asid + smp_processor_id() + 1; + flush_context(); +#ifdef CONFIG_SMP + smp_wmb(); + smp_call_function(reset_context, NULL, 1); +#endif + cpu_last_asid += NR_CPUS; } - spin_unlock(&cpu_asid_lock); - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); - mm->context.id = asid; + set_mm_context(mm, asid); + spin_unlock(&cpu_asid_lock); } -- cgit v1.2.3-70-g09d2 From ad187f956108e1c56b444706212bf08d84c0bee0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Feb 2010 11:36:23 +0000 Subject: ARM: vfp ptrace: no point flushing hw context for PTRACE_GETVFPREGS If we're only reading the VFP context via the ptrace call, there's no need to invalidate the hardware context - we only need to do that on PTRACE_SETVFPREGS. This allows more efficient monitoring of a traced task. Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 3 ++- arch/arm/kernel/ptrace.c | 6 ++++-- arch/arm/vfp/vfpmodule.c | 25 +++++++++++++++++++++++-- 3 files changed, 29 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 2dfb7d7a66e..b74970ec02c 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *); extern void iwmmxt_task_release(struct thread_info *); extern void iwmmxt_task_switch(struct thread_info *); -extern void vfp_sync_state(struct thread_info *thread); +extern void vfp_sync_hwstate(struct thread_info *); +extern void vfp_flush_hwstate(struct thread_info *); #endif diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index bdf002bab6a..08f899fb76a 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -700,7 +700,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data) union vfp_state *vfp = &thread->vfpstate; struct user_vfp __user *ufp = data; - vfp_sync_state(thread); + vfp_sync_hwstate(thread); /* copy the floating point registers */ if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, @@ -723,7 +723,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) union vfp_state *vfp = &thread->vfpstate; struct user_vfp __user *ufp = data; - vfp_sync_state(thread); + vfp_sync_hwstate(thread); /* copy the floating point registers */ if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, @@ -734,6 +734,8 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) if (get_user(vfp->hard.fpscr, &ufp->fpscr)) return -EFAULT; + vfp_flush_hwstate(thread); + return 0; } #endif diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 86a57aeeda4..def19f83d81 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -430,7 +430,11 @@ static inline void vfp_pm_init(void) { } * saved one. This function is used by the ptrace mechanism. */ #ifdef CONFIG_SMP -void vfp_sync_state(struct thread_info *thread) +void vfp_sync_hwstate(struct thread_info *thread) +{ +} + +void vfp_flush_hwstate(struct thread_info *thread) { /* * On SMP systems, the VFP state is automatically saved at every @@ -441,7 +445,7 @@ void vfp_sync_state(struct thread_info *thread) thread->vfpstate.hard.cpu = NR_CPUS; } #else -void vfp_sync_state(struct thread_info *thread) +void vfp_sync_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); @@ -457,6 +461,23 @@ void vfp_sync_state(struct thread_info *thread) */ fmxr(FPEXC, fpexc | FPEXC_EN); vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); + fmxr(FPEXC, fpexc); + } + + put_cpu(); +} + +void vfp_flush_hwstate(struct thread_info *thread) +{ + unsigned int cpu = get_cpu(); + + /* + * If the thread we're interested in is the current owner of the + * hardware VFP state, then we need to save its state. + */ + if (last_VFP_context[cpu] == &thread->vfpstate) { + u32 fpexc = fmrx(FPEXC); + fmxr(FPEXC, fpexc & ~FPEXC_EN); /* -- cgit v1.2.3-70-g09d2 From a7bd08c82e4f74387a39eeebb942712f23967420 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:46:33 +0100 Subject: ARM: 5927/1: Make delimiters of DMA area globally visibly. Adds DMA area to 'virtual memory map' startup message Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 19 +++++++++++-------- arch/arm/mm/dma-mapping.c | 3 --- arch/arm/mm/init.c | 6 ++++++ 3 files changed, 17 insertions(+), 11 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 5421d82a257..f5e693b8bab 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -76,6 +76,17 @@ */ #define IOREMAP_MAX_ORDER 24 +/* + * Size of DMA-consistent memory region. Must be multiple of 2M, + * between 2MB and 14MB inclusive. + */ +#ifndef CONSISTENT_DMA_SIZE +#define CONSISTENT_DMA_SIZE SZ_2M +#endif + +#define CONSISTENT_END (0xffe00000UL) +#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + #else /* CONFIG_MMU */ /* @@ -112,14 +123,6 @@ #endif /* !CONFIG_MMU */ -/* - * Size of DMA-consistent memory region. Must be multiple of 2M, - * between 2MB and 14MB inclusive. - */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE SZ_2M -#endif - /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb5d36..48eedab1609 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -29,9 +29,6 @@ #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" #endif -#define CONSISTENT_END (0xffe00000) -#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) - #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e8e3a74ac5b..bda481e6bc0 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -651,6 +651,9 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_MMU + " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM @@ -664,6 +667,9 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), MLK(FIXADDR_START, FIXADDR_TOP), +#ifdef CONFIG_MMU + MLM(CONSISTENT_BASE, CONSISTENT_END), +#endif MLM(VMALLOC_START, (unsigned long)VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM -- cgit v1.2.3-70-g09d2 From c931b4f655a1b86c929384e674eb8c31795f3bd7 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:47:17 +0100 Subject: ARM: 5928/1: Change type of VMALLOC_END to unsigned long. Makes it consistent with VMALLOC_START Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 4 ++-- arch/arm/include/asm/pgtable-nommu.h | 4 ++-- arch/arm/mach-davinci/include/mach/hardware.h | 2 +- arch/arm/mach-dove/include/mach/vmalloc.h | 2 +- arch/arm/mach-ep93xx/include/mach/vmalloc.h | 2 +- arch/arm/mach-gemini/include/mach/vmalloc.h | 2 +- arch/arm/mach-iop32x/include/mach/vmalloc.h | 2 +- arch/arm/mach-iop33x/include/mach/vmalloc.h | 2 +- arch/arm/mach-ixp2000/include/mach/vmalloc.h | 2 +- arch/arm/mach-ixp23xx/include/mach/vmalloc.h | 2 +- arch/arm/mach-ixp4xx/include/mach/vmalloc.h | 2 +- arch/arm/mach-kirkwood/include/mach/vmalloc.h | 2 +- arch/arm/mach-lh7a40x/include/mach/vmalloc.h | 2 +- arch/arm/mach-loki/include/mach/vmalloc.h | 2 +- arch/arm/mach-mmp/include/mach/vmalloc.h | 2 +- arch/arm/mach-mv78xx0/include/mach/vmalloc.h | 2 +- arch/arm/mach-nomadik/include/mach/vmalloc.h | 2 +- arch/arm/mach-ns9xxx/include/mach/vmalloc.h | 2 +- arch/arm/mach-orion5x/include/mach/vmalloc.h | 2 +- arch/arm/mach-pxa/include/mach/vmalloc.h | 2 +- arch/arm/mach-realview/include/mach/vmalloc.h | 2 +- arch/arm/mach-s3c24a0/include/mach/vmalloc.h | 2 +- arch/arm/mach-sa1100/include/mach/vmalloc.h | 2 +- arch/arm/mach-u300/include/mach/vmalloc.h | 2 +- arch/arm/mach-ux500/include/mach/vmalloc.h | 2 +- arch/arm/mach-w90x900/include/mach/vmalloc.h | 2 +- arch/arm/mm/init.c | 2 +- arch/arm/plat-mxc/include/mach/vmalloc.h | 2 +- arch/arm/plat-s3c/include/mach/vmalloc.h | 2 +- arch/arm/plat-stmp3xxx/include/mach/vmalloc.h | 2 +- 30 files changed, 32 insertions(+), 32 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index f5e693b8bab..4312ee5e3d0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -104,11 +104,11 @@ #endif #ifndef PHYS_OFFSET -#define PHYS_OFFSET (CONFIG_DRAM_BASE) +#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) #endif #ifndef END_MEM -#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) +#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index b011f2e939a..013cfcdc483 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp); * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff +#define VMALLOC_START 0UL +#define VMALLOC_END 0xffffffffUL #define FIRST_USER_ADDRESS (0) diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h index 41c89386e39..c45ba1f62a1 100644 --- a/arch/arm/mach-davinci/include/mach/hardware.h +++ b/arch/arm/mach-davinci/include/mach/hardware.h @@ -27,7 +27,7 @@ /* * I/O mapping */ -#define IO_PHYS 0x01c00000 +#define IO_PHYS 0x01c00000UL #define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */ #define IO_SIZE 0x00400000 #define IO_VIRT (IO_PHYS + IO_OFFSET) diff --git a/arch/arm/mach-dove/include/mach/vmalloc.h b/arch/arm/mach-dove/include/mach/vmalloc.h index 8b2c974755c..a28792cf761 100644 --- a/arch/arm/mach-dove/include/mach/vmalloc.h +++ b/arch/arm/mach-dove/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-dove/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfd800000 +#define VMALLOC_END 0xfd800000UL diff --git a/arch/arm/mach-ep93xx/include/mach/vmalloc.h b/arch/arm/mach-ep93xx/include/mach/vmalloc.h index aed21cd3fe2..1b3f25d03d3 100644 --- a/arch/arm/mach-ep93xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ep93xx/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-ep93xx/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-gemini/include/mach/vmalloc.h b/arch/arm/mach-gemini/include/mach/vmalloc.h index 83e536d9436..45371eb86fc 100644 --- a/arch/arm/mach-gemini/include/mach/vmalloc.h +++ b/arch/arm/mach-gemini/include/mach/vmalloc.h @@ -7,4 +7,4 @@ * (at your option) any later version. */ -#define VMALLOC_END 0xF0000000 +#define VMALLOC_END 0xf0000000UL diff --git a/arch/arm/mach-iop32x/include/mach/vmalloc.h b/arch/arm/mach-iop32x/include/mach/vmalloc.h index 85ceb09d85f..c4862d48e58 100644 --- a/arch/arm/mach-iop32x/include/mach/vmalloc.h +++ b/arch/arm/mach-iop32x/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-iop32x/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-iop33x/include/mach/vmalloc.h b/arch/arm/mach-iop33x/include/mach/vmalloc.h index f9f99dea9bc..48331dc2370 100644 --- a/arch/arm/mach-iop33x/include/mach/vmalloc.h +++ b/arch/arm/mach-iop33x/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-iop33x/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-ixp2000/include/mach/vmalloc.h b/arch/arm/mach-ixp2000/include/mach/vmalloc.h index d195e35aed3..61c8dae24f9 100644 --- a/arch/arm/mach-ixp2000/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp2000/include/mach/vmalloc.h @@ -17,4 +17,4 @@ * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ -#define VMALLOC_END 0xfb000000 +#define VMALLOC_END 0xfb000000UL diff --git a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h index dd519f678d1..896c56a1c00 100644 --- a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h @@ -7,4 +7,4 @@ * specific static I/O. */ -#define VMALLOC_END (0xec000000) +#define VMALLOC_END (0xec000000UL) diff --git a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h index 7b3580b53ad..9bcd64d5985 100644 --- a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h @@ -1,5 +1,5 @@ /* * arch/arm/mach-ixp4xx/include/mach/vmalloc.h */ -#define VMALLOC_END (0xFF000000) +#define VMALLOC_END (0xff000000UL) diff --git a/arch/arm/mach-kirkwood/include/mach/vmalloc.h b/arch/arm/mach-kirkwood/include/mach/vmalloc.h index 8f48260dcda..bf162ca3d2c 100644 --- a/arch/arm/mach-kirkwood/include/mach/vmalloc.h +++ b/arch/arm/mach-kirkwood/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-kirkwood/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h b/arch/arm/mach-lh7a40x/include/mach/vmalloc.h index 3fbd49490bb..d62da7358b1 100644 --- a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h +++ b/arch/arm/mach-lh7a40x/include/mach/vmalloc.h @@ -7,4 +7,4 @@ * version 2 as published by the Free Software Foundation. * */ -#define VMALLOC_END (0xe8000000) +#define VMALLOC_END (0xe8000000UL) diff --git a/arch/arm/mach-loki/include/mach/vmalloc.h b/arch/arm/mach-loki/include/mach/vmalloc.h index 8dc3bfcbf9f..5dcbd865443 100644 --- a/arch/arm/mach-loki/include/mach/vmalloc.h +++ b/arch/arm/mach-loki/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-loki/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-mmp/include/mach/vmalloc.h b/arch/arm/mach-mmp/include/mach/vmalloc.h index b60ccaf9fee..1d0bac003ad 100644 --- a/arch/arm/mach-mmp/include/mach/vmalloc.h +++ b/arch/arm/mach-mmp/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * linux/arch/arm/mach-mmp/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h index 1c4954386a8..ba26fe98e64 100644 --- a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h +++ b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-mv78xx0/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h index be12e31ea52..f83d574d944 100644 --- a/arch/arm/mach-nomadik/include/mach/vmalloc.h +++ b/arch/arm/mach-nomadik/include/mach/vmalloc.h @@ -1,2 +1,2 @@ -#define VMALLOC_END 0xe8000000 +#define VMALLOC_END 0xe8000000UL diff --git a/arch/arm/mach-ns9xxx/include/mach/vmalloc.h b/arch/arm/mach-ns9xxx/include/mach/vmalloc.h index fe964d3bcc4..c8651974c4b 100644 --- a/arch/arm/mach-ns9xxx/include/mach/vmalloc.h +++ b/arch/arm/mach-ns9xxx/include/mach/vmalloc.h @@ -11,6 +11,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xf0000000) +#define VMALLOC_END (0xf0000000UL) #endif /* ifndef __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/mach-orion5x/include/mach/vmalloc.h b/arch/arm/mach-orion5x/include/mach/vmalloc.h index 7147a297e97..06b50aeff7b 100644 --- a/arch/arm/mach-orion5x/include/mach/vmalloc.h +++ b/arch/arm/mach-orion5x/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-orion5x/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfd800000 +#define VMALLOC_END 0xfd800000UL diff --git a/arch/arm/mach-pxa/include/mach/vmalloc.h b/arch/arm/mach-pxa/include/mach/vmalloc.h index e90c5eeb81d..bfecfbf5f46 100644 --- a/arch/arm/mach-pxa/include/mach/vmalloc.h +++ b/arch/arm/mach-pxa/include/mach/vmalloc.h @@ -8,4 +8,4 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#define VMALLOC_END (0xe8000000) +#define VMALLOC_END (0xe8000000UL) diff --git a/arch/arm/mach-realview/include/mach/vmalloc.h b/arch/arm/mach-realview/include/mach/vmalloc.h index fe0de1b507a..a2a4c686140 100644 --- a/arch/arm/mach-realview/include/mach/vmalloc.h +++ b/arch/arm/mach-realview/include/mach/vmalloc.h @@ -18,4 +18,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define VMALLOC_END 0xf8000000 +#define VMALLOC_END 0xf8000000UL diff --git a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h index 4d4fe484958..91465682079 100644 --- a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h +++ b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h @@ -12,6 +12,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xE0000000) +#define VMALLOC_END (0xe0000000UL) #endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/mach-sa1100/include/mach/vmalloc.h b/arch/arm/mach-sa1100/include/mach/vmalloc.h index ec8fdc5a360..b3d00239848 100644 --- a/arch/arm/mach-sa1100/include/mach/vmalloc.h +++ b/arch/arm/mach-sa1100/include/mach/vmalloc.h @@ -1,4 +1,4 @@ /* * arch/arm/mach-sa1100/include/mach/vmalloc.h */ -#define VMALLOC_END (0xe8000000) +#define VMALLOC_END (0xe8000000UL) diff --git a/arch/arm/mach-u300/include/mach/vmalloc.h b/arch/arm/mach-u300/include/mach/vmalloc.h index b00c51a66fb..ec423b92b81 100644 --- a/arch/arm/mach-u300/include/mach/vmalloc.h +++ b/arch/arm/mach-u300/include/mach/vmalloc.h @@ -9,4 +9,4 @@ * End must be above the I/O registers and on an even 2MiB boundary. * Author: Linus Walleij */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h index 86cdbbce184..a4945cb4117 100644 --- a/arch/arm/mach-ux500/include/mach/vmalloc.h +++ b/arch/arm/mach-ux500/include/mach/vmalloc.h @@ -15,4 +15,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define VMALLOC_END 0xf0000000 +#define VMALLOC_END 0xf0000000UL diff --git a/arch/arm/mach-w90x900/include/mach/vmalloc.h b/arch/arm/mach-w90x900/include/mach/vmalloc.h index 2f9dfb92853..b067e44500a 100644 --- a/arch/arm/mach-w90x900/include/mach/vmalloc.h +++ b/arch/arm/mach-w90x900/include/mach/vmalloc.h @@ -18,6 +18,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xE0000000) +#define VMALLOC_END (0xe0000000UL) #endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index bda481e6bc0..3a207723947 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -670,7 +670,7 @@ void __init mem_init(void) #ifdef CONFIG_MMU MLM(CONSISTENT_BASE, CONSISTENT_END), #endif - MLM(VMALLOC_START, (unsigned long)VMALLOC_END), + MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h index 62d97623412..44243a27843 100644 --- a/arch/arm/plat-mxc/include/mach/vmalloc.h +++ b/arch/arm/plat-mxc/include/mach/vmalloc.h @@ -21,6 +21,6 @@ #define __ASM_ARCH_MXC_VMALLOC_H__ /* vmalloc ending address */ -#define VMALLOC_END 0xF4000000 +#define VMALLOC_END 0xf4000000UL #endif /* __ASM_ARCH_MXC_VMALLOC_H__ */ diff --git a/arch/arm/plat-s3c/include/mach/vmalloc.h b/arch/arm/plat-s3c/include/mach/vmalloc.h index bfd2ca6e307..299d95f365c 100644 --- a/arch/arm/plat-s3c/include/mach/vmalloc.h +++ b/arch/arm/plat-s3c/include/mach/vmalloc.h @@ -15,6 +15,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xE0000000) +#define VMALLOC_END (0xe0000000UL) #endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h b/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h index 541b880c186..943c1a29d64 100644 --- a/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h +++ b/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h @@ -9,4 +9,4 @@ * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ -#define VMALLOC_END (0xF0000000) +#define VMALLOC_END 0xf0000000UL -- cgit v1.2.3-70-g09d2 From 4b3073e1c53a256275f1079c0fbfbe85883d9275 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:40:18 +0000 Subject: MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself On VIVT ARM, when we have multiple shared mappings of the same file in the same MM, we need to ensure that we have coherency across all copies. We do this via make_coherent() by making the pages uncacheable. This used to work fine, until we allowed highmem with highpte - we now have a page table which is mapped as required, and is not available for modification via update_mmu_cache(). Ralf Beache suggested getting rid of the PTE value passed to update_mmu_cache(): On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables to construct a pointer to the pte again. Passing a pte_t * is much more elegant. Maybe we might even replace the pte argument with the pte_t? Ben Herrenschmidt would also like the pte pointer for PowerPC: Passing the ptep in there is exactly what I want. I want that -instead- of the PTE value, because I have issue on some ppc cases, for I$/D$ coherency, where set_pte_at() may decide to mask out the _PAGE_EXEC. So, pass in the mapped page table pointer into update_mmu_cache(), and remove the PTE value, updating all implementations and call sites to suit. Includes a fix from Stephen Rothwell: sparc: fix fallout from update_mmu_cache API change Signed-off-by: Stephen Rothwell Acked-by: Benjamin Herrenschmidt Signed-off-by: Russell King --- Documentation/cachetlb.txt | 6 +++--- arch/alpha/include/asm/pgtable.h | 2 +- arch/arm/include/asm/tlbflush.h | 3 ++- arch/arm/mm/fault-armv.c | 5 +++-- arch/avr32/include/asm/pgtable.h | 2 +- arch/avr32/mm/tlb.c | 4 ++-- arch/cris/include/asm/pgtable.h | 2 +- arch/frv/include/asm/pgtable.h | 2 +- arch/ia64/include/asm/pgtable.h | 2 +- arch/m32r/include/asm/tlbflush.h | 2 +- arch/m32r/mm/fault-nommu.c | 2 +- arch/m32r/mm/fault.c | 6 +++--- arch/m68k/include/asm/pgtable_mm.h | 2 +- arch/microblaze/include/asm/tlbflush.h | 2 +- arch/mips/include/asm/pgtable.h | 3 ++- arch/mn10300/include/asm/pgtable.h | 2 +- arch/mn10300/mm/mmu-context.c | 3 ++- arch/parisc/include/asm/pgtable.h | 2 +- arch/parisc/kernel/cache.c | 4 ++-- arch/powerpc/include/asm/pgtable.h | 2 +- arch/powerpc/mm/mem.c | 4 ++-- arch/s390/include/asm/pgtable.h | 2 +- arch/score/include/asm/pgtable.h | 3 ++- arch/sh/include/asm/pgtable.h | 3 ++- arch/sh/mm/fault_32.c | 2 +- arch/sparc/include/asm/pgtable_32.h | 4 ++-- arch/sparc/include/asm/pgtable_64.h | 2 +- arch/sparc/mm/fault_32.c | 4 ++-- arch/sparc/mm/init_64.c | 3 ++- arch/sparc/mm/nosun4c.c | 2 +- arch/sparc/mm/srmmu.c | 6 +++--- arch/sparc/mm/sun4c.c | 6 +++--- arch/um/include/asm/pgtable.h | 2 +- arch/x86/include/asm/pgtable_32.h | 2 +- arch/x86/include/asm/pgtable_64.h | 2 +- arch/xtensa/include/asm/pgtable.h | 2 +- arch/xtensa/mm/cache.c | 4 ++-- mm/hugetlb.c | 4 ++-- mm/memory.c | 14 +++++++------- mm/migrate.c | 2 +- 40 files changed, 69 insertions(+), 62 deletions(-) (limited to 'arch/arm/include') diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index da42ab414c4..74a8b6fefa2 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt @@ -88,12 +88,12 @@ changes occur: This is used primarily during fault processing. 5) void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) At the end of every page fault, this routine is invoked to tell the architecture specific code that a translation - described by "pte" now exists at virtual address "address" - for address space "vma->vm_mm", in the software page tables. + now exists at virtual address "address" for address space + "vma->vm_mm", in the software page tables. A port may use this information in any way it so chooses. For example, it could use this event to pre-load TLB diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 3f0c59f6d8a..71a24329414 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -329,7 +329,7 @@ extern pgd_t swapper_pg_dir[1024]; * tables contain all the necessary information. */ extern inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index c2f1605de35..e085e2c545e 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); * cache entries for the kernels virtual memory range are written * back to the page. */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); #endif diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index ae88f2c3a6d..c45f9bb318a 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -149,9 +149,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne * * Note that the pte lock will be held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h index fecdda16f44..a9ae30c41e7 100644 --- a/arch/avr32/include/asm/pgtable.h +++ b/arch/avr32/include/asm/pgtable.h @@ -325,7 +325,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); /* * Encode and decode a swap entry diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c index 06677be98ff..0da23109f81 100644 --- a/arch/avr32/mm/tlb.c +++ b/arch/avr32/mm/tlb.c @@ -101,7 +101,7 @@ static void update_dtlb(unsigned long address, pte_t pte) } void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { unsigned long flags; @@ -110,7 +110,7 @@ void update_mmu_cache(struct vm_area_struct *vma, return; local_irq_save(flags); - update_dtlb(address, pte); + update_dtlb(address, *ptep); local_irq_restore(flags); } diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index 1fcce00f01f..99ea6cd1b14 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -270,7 +270,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ * Actually I am not sure on what this could be used for. */ static inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 22c60692b55..c18b0d32e63 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -505,7 +505,7 @@ static inline int pte_file(pte_t pte) /* * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache */ -static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; unsigned long ampr; diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 69bf13857a9..c3286f42e50 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -462,7 +462,7 @@ pte_same (pte_t a, pte_t b) return pte_val(a) == pte_val(b); } -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init (void); diff --git a/arch/m32r/include/asm/tlbflush.h b/arch/m32r/include/asm/tlbflush.h index 0ef95307784..92614b0ccf1 100644 --- a/arch/m32r/include/asm/tlbflush.h +++ b/arch/m32r/include/asm/tlbflush.h @@ -92,6 +92,6 @@ static __inline__ void __flush_tlb_all(void) ); } -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #endif /* _ASM_M32R_TLBFLUSH_H */ diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c index 88469178ea6..888aab1157e 100644 --- a/arch/m32r/mm/fault-nommu.c +++ b/arch/m32r/mm/fault-nommu.c @@ -95,7 +95,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, * update_mmu_cache() *======================================================================*/ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, - pte_t pte) + pte_t *ptep) { BUG(); } diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 7274b47f4c2..28ee389e5f5 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -336,7 +336,7 @@ vmalloc_fault: addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - update_mmu_cache(NULL, addr, *pte_k); + update_mmu_cache(NULL, addr, pte_k); set_thread_fault_code(0); return; } @@ -349,7 +349,7 @@ vmalloc_fault: #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, - pte_t pte) + pte_t *ptep) { volatile unsigned long *entry1, *entry2; unsigned long pte_data, flags; @@ -365,7 +365,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, vaddr = (vaddr & PAGE_MASK) | get_asid(); - pte_data = pte_val(pte); + pte_data = pte_val(*ptep); #ifdef CONFIG_CHIP_OPSP entry1 = (unsigned long *)ITLB_BASE; diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index aca0e28581c..87174c904d2 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -115,7 +115,7 @@ extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode); * they are updated on demand. */ static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index eb31a0e8a77..10ec70cd873 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h @@ -38,7 +38,7 @@ static inline void local_flush_tlb_range(struct vm_area_struct *vma, #define flush_tlb_kernel_range(start, end) do { } while (0) -#define update_mmu_cache(vma, addr, pte) do { } while (0) +#define update_mmu_cache(vma, addr, ptep) do { } while (0) #define flush_tlb_all local_flush_tlb_all #define flush_tlb_mm local_flush_tlb_mm diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 1854336e56a..c56bf8afc09 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -362,8 +362,9 @@ extern void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_tlb(vma, address, pte); __update_cache(vma, address, pte); } diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index 6dc30fc827c..16d88577f3e 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -466,7 +466,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable) * the kernel page tables containing the necessary information by tlb-mn10300.S */ extern void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); #endif /* !__ASSEMBLY__ */ diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c index 31c9d27a75a..36ba02191d4 100644 --- a/arch/mn10300/mm/mmu-context.c +++ b/arch/mn10300/mm/mmu-context.c @@ -51,9 +51,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) /* * preemptively set a TLB entry */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pteu, ptel, cnx, flags; + pte_t pte = *ptep; addr &= PAGE_MASK; ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2); diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index a27d2e200fb..01c15035e78 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -410,7 +410,7 @@ extern void paging_init (void); #define PG_dcache_dirty PG_arch_1 -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); /* Encode and de-code a swap entry */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index b6ed34de14e..1054baa2fc6 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -68,9 +68,9 @@ flush_cache_all_local(void) EXPORT_SYMBOL(flush_cache_all_local); void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - struct page *page = pte_page(pte); + struct page *page = pte_page(*ptep); if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 21207e54825..89f158731ce 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -209,7 +209,7 @@ extern void paging_init(void); * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b9b152558f9..311224cdb7a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -494,13 +494,13 @@ EXPORT_SYMBOL(flush_icache_user_range); * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) + pte_t *ptep) { #ifdef CONFIG_PPC_STD_MMU unsigned long access = 0, trap; /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(pte) || address >= TASK_SIZE) + if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* We try to figure out if we are coming from an instruction diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index e2fa79cf061..9b5b9189c15 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -43,7 +43,7 @@ extern void vmem_map_init(void); * The S390 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 674934b4017..ccf38f06c57 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -272,8 +272,9 @@ extern void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_tlb(vma, address, pte); __update_cache(vma, address, pte); } diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index ba3046e4f06..1ff93ac1aa4 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -165,8 +165,9 @@ extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_cache(vma, address, pte); __update_tlb(vma, address, pte); } diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0a..1677b5ee191 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -371,7 +371,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, local_flush_tlb_one(get_asid(), address & PAGE_MASK); #endif - update_mmu_cache(NULL, address, entry); + update_mmu_cache(NULL, address, pte); return 0; } diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index e0cabe790ec..77f906d8cc2 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -330,9 +330,9 @@ BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *) #define FAULT_CODE_WRITE 0x2 #define FAULT_CODE_USER 0x4 -BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t) +BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *) -#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte) +#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep) BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long, unsigned long, unsigned int) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index f3cb790fa2a..f5b5fa76c02 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -706,7 +706,7 @@ extern unsigned long find_ecache_flush_span(unsigned long size); #define mmu_unlockarea(vaddr, len) do { } while(0) struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); /* Encode and de-code a swap entry */ #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b99f81c4906..43e20efb251 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -370,7 +370,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { extern void sun4c_update_mmu_cache(struct vm_area_struct *, - unsigned long,pte_t); + unsigned long,pte_t *); extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; @@ -447,7 +447,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, * on the CPU and doing a shrink_mmap() on this vma. */ sun4c_update_mmu_cache (find_vma(current->mm, address), address, - *ptep); + ptep); else do_sparc_fault(regs, text_fault, write, address); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 1886d37d411..9245a822a2f 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -289,12 +289,13 @@ static void flush_dcache(unsigned long pfn) } } -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; struct tsb *tsb; unsigned long tag, flags; unsigned long tsb_index, tsb_hash_shift; + pte_t pte = *ptep; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c index 196263f895b..4e62c27147c 100644 --- a/arch/sparc/mm/nosun4c.c +++ b/arch/sparc/mm/nosun4c.c @@ -62,7 +62,7 @@ pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address) return NULL; } -void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 367321a030d..df49b200ca4 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -694,7 +694,7 @@ extern void tsunami_setup_blockops(void); * The following code is a deadwood that may be necessary when * we start to make precise page flushes again. --zaitcev */ -static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) +static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) { #if 0 static unsigned long last; @@ -703,10 +703,10 @@ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad if (address == last) { val = srmmu_hwprobe(address); - if (val != 0 && pte_val(pte) != val) { + if (val != 0 && pte_val(*ptep) != val) { printk("swift_update_mmu_cache: " "addr %lx put %08x probed %08x from %p\n", - address, pte_val(pte), val, + address, pte_val(*ptep), val, __builtin_return_address(0)); srmmu_flush_whole_tlb(); } diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index a89baf0d875..18652534b91 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1887,7 +1887,7 @@ static void sun4c_check_pgt_cache(int low, int high) /* An experiment, turn off by default for now... -DaveM */ #define SUN4C_PRELOAD_PSEG -void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { unsigned long flags; int pseg; @@ -1929,7 +1929,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p start += PAGE_SIZE; } #ifndef SUN4C_PRELOAD_PSEG - sun4c_put_pte(address, pte_val(pte)); + sun4c_put_pte(address, pte_val(*ptep)); #endif local_irq_restore(flags); return; @@ -1940,7 +1940,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p add_lru(entry); } - sun4c_put_pte(address, pte_val(pte)); + sun4c_put_pte(address, pte_val(*ptep)); local_irq_restore(flags); } diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 9ce3f165111..a9f7251b4a8 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -345,7 +345,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); -#define update_mmu_cache(vma,address,pte) do ; while (0) +#define update_mmu_cache(vma,address,ptep) do ; while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 4) & 0x3f) diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 01fd9461d32..a2866839650 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -80,7 +80,7 @@ do { \ * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index c57a3011714..181be528c61 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -129,7 +129,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define pte_unmap(pte) /* NOP */ #define pte_unmap_nested(pte) /* NOP */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index a138770c358..76bf3555411 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -394,7 +394,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) #define kern_addr_valid(addr) (1) extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); /* * remap a physical page `pfn' of size `size' with page protection `prot' diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 3ba990c6767..85df4655d32 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -147,9 +147,9 @@ void flush_cache_page(struct vm_area_struct* vma, unsigned long address, #endif void -update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) +update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct page *page; if (!pfn_valid(pfn)) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e91b81b6367..94cd94df56e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2088,7 +2088,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, ptep); } } @@ -2559,7 +2559,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = pte_mkyoung(entry); if (huge_ptep_set_access_flags(vma, address, ptep, entry, flags & FAULT_FLAG_WRITE)) - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, ptep); out_page_table_lock: spin_unlock(&mm->page_table_lock); diff --git a/mm/memory.c b/mm/memory.c index 09e4b1be7b6..72fb5f39bcc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1593,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, /* Ok, finally just insert the thing.. */ entry = pte_mkspecial(pfn_pte(pfn, prot)); set_pte_at(mm, addr, pte, entry); - update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ + update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ retval = 0; out_unlock: @@ -2116,7 +2116,7 @@ reuse: entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, address, page_table, entry,1)) - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); ret |= VM_FAULT_WRITE; goto unlock; } @@ -2185,7 +2185,7 @@ gotten: * new page to be mapped directly into the secondary page table. */ set_pte_at_notify(mm, address, page_table, entry); - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); if (old_page) { /* * Only after switching the pte to the new page may @@ -2629,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, } /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, pte); + update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); out: @@ -2694,7 +2694,7 @@ setpte: set_pte_at(mm, address, page_table, entry); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); return 0; @@ -2855,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, set_pte_at(mm, address, page_table, entry); /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); } else { if (charged) mem_cgroup_uncharge_page(page); @@ -2992,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, pte); } else { /* * This is needed only for protection faults but the arch code diff --git a/mm/migrate.c b/mm/migrate.c index efddbf0926b..e58e5da25b9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -134,7 +134,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, page_add_file_rmap(new); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, addr, pte); + update_mmu_cache(vma, addr, ptep); unlock: pte_unmap_unlock(ptep, ptl); out: -- cgit v1.2.3-70-g09d2 From f1acb878b6070941e844dfc4ca1b3b9e5a70426c Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Sat, 13 Feb 2010 15:54:03 +0100 Subject: iop-adma: redundant/wrong tests in iop_*_count()? When we reach the loop, len is at least 1, we only stay in the loop when len is at least MAX_BYTE_COUNT + 1, MAX_BYTE_COUNT is subtracted in each iteration. So when we leave the loop, or didn't take it, len is at least 1. Testing whether len is non-zero appears redundant. Signed-off-by: Roel Kluin Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx-adma.h | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 1a8c7279a28..9b28f1243bd 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } @@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } @@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) i += slots_per_op; } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); - if (len) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = len; - } + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = len; } } -- cgit v1.2.3-70-g09d2