From a74b74a5555c741ed3df896096e33b853995631e Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Wed, 15 Dec 2010 07:20:16 +0800 Subject: ARM: pxa: PXA_ESERIES depends on FB_W100. As arch/arm/mach-pxa/eseries.c references w100fb_gpio_{read,write}() directly. Signed-off-by: Lennert Buytenhek Signed-off-by: Eric Miao --- arch/arm/mach-pxa/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index dd235ecc9d6..c93e73d54dd 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -540,6 +540,7 @@ config MACH_ICONTROL config ARCH_PXA_ESERIES bool "PXA based Toshiba e-series PDAs" select PXA25x + select FB_W100 config MACH_E330 bool "Toshiba e330" -- cgit v1.2.3-70-g09d2 From 1ae1b5f053cf36bd0f913e83f3b136fec8152d4d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 18 Dec 2010 13:57:00 +0000 Subject: ARM: smp: avoid incrementing mm_users on CPU startup We should not be incrementing mm_users when we startup a secondary CPU - doing so results in mm_users incrementing by one each time we hotplug a CPU, which will eventually wrap, and will cause problems. Other architectures such as x86 do not increment mm_users, but only mm_count, so we follow that pattern. Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8c195959025..9066473c0eb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -310,7 +310,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * All kernel threads share the same mm context; grab a * reference and switch to it. */ - atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); -- cgit v1.2.3-70-g09d2 From 39af22a79232373764904576f31572f1db76af10 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 15 Dec 2010 15:14:45 -0500 Subject: ARM: get rid of kmap_high_l1_vipt() Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is no longer necessary to carry an ad hoc version of kmap_atomic() added in commit 7e5a69e83b "ARM: 6007/1: fix highmem with VIPT cache and DMA" to cope with reentrancy. In fact, it is now actively wrong to rely on fixed kmap type indices (namely KM_L1_CACHE) as kmap_atomic() totally ignores them now and a concurrent instance of it may reuse any slot for any purpose. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/highmem.h | 3 -- arch/arm/mm/dma-mapping.c | 7 ++-- arch/arm/mm/flush.c | 7 ++-- arch/arm/mm/highmem.c | 87 ------------------------------------------ 4 files changed, 8 insertions(+), 96 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 1fc684e70ab..7080e2c8fa6 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page); extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); -extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); -extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); - /* * The following functions are already defined by * when CONFIG_HIGHMEM is not set. diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ac6a36142fc..809f1bf9fa2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, op(vaddr, len, dir); kunmap_high(page); } else if (cache_is_vipt()) { - pte_t saved_pte; - vaddr = kmap_high_l1_vipt(page, &saved_pte); + /* unmapped pages might still be cached */ + vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); - kunmap_high_l1_vipt(page, saved_pte); + kunmap_atomic(vaddr); } } else { vaddr = page_address(page) + offset; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 391ffae7509..c29f2839f1d 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) __cpuc_flush_dcache_area(addr, PAGE_SIZE); kunmap_high(page); } else if (cache_is_vipt()) { - pte_t saved_pte; - addr = kmap_high_l1_vipt(page, &saved_pte); + /* unmapped pages might still be cached */ + addr = kmap_atomic(page); __cpuc_flush_dcache_area(addr, PAGE_SIZE); - kunmap_high_l1_vipt(page, saved_pte); + kunmap_atomic(addr); } } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index c435fd9e1da..807c0573abb 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr) pte = TOP_PTE(vaddr); return pte_page(*pte); } - -#ifdef CONFIG_CPU_CACHE_VIPT - -#include - -/* - * The VIVT cache of a highmem page is always flushed before the page - * is unmapped. Hence unmapped highmem pages need no cache maintenance - * in that case. - * - * However unmapped pages may still be cached with a VIPT cache, and - * it is not possible to perform cache maintenance on them using physical - * addresses unfortunately. So we have no choice but to set up a temporary - * virtual mapping for that purpose. - * - * Yet this VIPT cache maintenance may be triggered from DMA support - * functions which are possibly called from interrupt context. As we don't - * want to keep interrupt disabled all the time when such maintenance is - * taking place, we therefore allow for some reentrancy by preserving and - * restoring the previous fixmap entry before the interrupted context is - * resumed. If the reentrancy depth is 0 then there is no need to restore - * the previous fixmap, and leaving the current one in place allow it to - * be reused the next time without a TLB flush (common with DMA). - */ - -static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); - -void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) -{ - unsigned int idx, cpu; - int *depth; - unsigned long vaddr, flags; - pte_t pte, *ptep; - - if (!in_interrupt()) - preempt_disable(); - - cpu = smp_processor_id(); - depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); - - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - ptep = TOP_PTE(vaddr); - pte = mk_pte(page, kmap_prot); - - raw_local_irq_save(flags); - (*depth)++; - if (pte_val(*ptep) == pte_val(pte)) { - *saved_pte = pte; - } else { - *saved_pte = *ptep; - set_pte_ext(ptep, pte, 0); - local_flush_tlb_kernel_page(vaddr); - } - raw_local_irq_restore(flags); - - return (void *)vaddr; -} - -void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) -{ - unsigned int idx, cpu = smp_processor_id(); - int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); - unsigned long vaddr, flags; - pte_t pte, *ptep; - - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - ptep = TOP_PTE(vaddr); - pte = mk_pte(page, kmap_prot); - - BUG_ON(pte_val(*ptep) != pte_val(pte)); - BUG_ON(*depth <= 0); - - raw_local_irq_save(flags); - (*depth)--; - if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { - set_pte_ext(ptep, saved_pte, 0); - local_flush_tlb_kernel_page(vaddr); - } - raw_local_irq_restore(flags); - - if (!in_interrupt()) - preempt_enable(); -} - -#endif /* CONFIG_CPU_CACHE_VIPT */ -- cgit v1.2.3-70-g09d2 From 25cbe45440ea89a3b0f6f7ed326d3d476d53068b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 15 Dec 2010 23:29:04 -0500 Subject: ARM: fix cache-xsc3l2 after stack based kmap_atomic() Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is actively wrong to rely on fixed kmap type indices (namely KM_L2_CACHE) as kmap_atomic() totally ignores them and a concurrent instance of it may happily reuse any slot for any purpose. Because kmap_atomic() is now able to deal with reentrancy, we can get rid of the ad hoc mapping here, and we even don't have to disable IRQs anymore (highmem case). While the code is made much simpler, there is a needless cache flush introduced by the usage of __kunmap_atomic(). It is not clear if the performance difference to remove that is worth the cost in code maintenance (I don't think there are that many highmem users on that platform if at all anyway). Signed-off-by: Nicolas Pitre --- arch/arm/mm/cache-xsc3l2.c | 57 +++++++++++++++++----------------------------- 1 file changed, 21 insertions(+), 36 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index c3154928bcc..5a32020471e 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -17,14 +17,10 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include +#include #include #include #include -#include -#include -#include -#include -#include "mm.h" #define CR_L2 (1 << 26) @@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void) dsb(); } +static inline void l2_unmap_va(unsigned long va) +{ #ifdef CONFIG_HIGHMEM -#define l2_map_save_flags(x) raw_local_save_flags(x) -#define l2_map_restore_flags(x) raw_local_irq_restore(x) -#else -#define l2_map_save_flags(x) ((x) = 0) -#define l2_map_restore_flags(x) ((void)(x)) + if (va != -1) + kunmap_atomic((void *)va); #endif +} -static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, - unsigned long flags) +static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) { #ifdef CONFIG_HIGHMEM unsigned long va = prev_va & PAGE_MASK; @@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, /* * Switching to a new page. Because cache ops are * using virtual addresses only, we must put a mapping - * in place for it. We also enable interrupts for a - * short while and disable them again to protect this - * mapping. + * in place for it. */ - unsigned long idx; - raw_local_irq_restore(flags); - idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); - va = __fix_to_virt(FIX_KMAP_BEGIN + idx); - raw_local_irq_restore(flags | PSR_I_BIT); - set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); - local_flush_tlb_kernel_page(va); + l2_unmap_va(prev_va); + va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); } return va + (pa_offset >> (32 - PAGE_SHIFT)); #else @@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, static void xsc3_l2_inv_range(unsigned long start, unsigned long end) { - unsigned long vaddr, flags; + unsigned long vaddr; if (start == 0 && end == -1ul) { xsc3_l2_inv_all(); @@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) } vaddr = -1; /* to force the first mapping */ - l2_map_save_flags(flags); /* * Clean and invalidate partial first cache line. */ if (start & (CACHE_LINE_SIZE - 1)) { - vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); + vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); start = (start | (CACHE_LINE_SIZE - 1)) + 1; @@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) * Invalidate all full cache lines between 'start' and 'end'. */ while (start < (end & ~(CACHE_LINE_SIZE - 1))) { - vaddr = l2_map_va(start, vaddr, flags); + vaddr = l2_map_va(start, vaddr); xsc3_l2_inv_mva(vaddr); start += CACHE_LINE_SIZE; } @@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) * Clean and invalidate partial last cache line. */ if (start < end) { - vaddr = l2_map_va(start, vaddr, flags); + vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); } - l2_map_restore_flags(flags); + l2_unmap_va(vaddr); dsb(); } static void xsc3_l2_clean_range(unsigned long start, unsigned long end) { - unsigned long vaddr, flags; + unsigned long vaddr; vaddr = -1; /* to force the first mapping */ - l2_map_save_flags(flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { - vaddr = l2_map_va(start, vaddr, flags); + vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); start += CACHE_LINE_SIZE; } - l2_map_restore_flags(flags); + l2_unmap_va(vaddr); dsb(); } @@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void) static void xsc3_l2_flush_range(unsigned long start, unsigned long end) { - unsigned long vaddr, flags; + unsigned long vaddr; if (start == 0 && end == -1ul) { xsc3_l2_flush_all(); @@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end) } vaddr = -1; /* to force the first mapping */ - l2_map_save_flags(flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { - vaddr = l2_map_va(start, vaddr, flags); + vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); start += CACHE_LINE_SIZE; } - l2_map_restore_flags(flags); + l2_unmap_va(vaddr); dsb(); } -- cgit v1.2.3-70-g09d2 From 6d3e6d3640052cac958d61c44597cc216f6ee09f Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 16 Dec 2010 14:56:34 -0500 Subject: ARM: fix cache-feroceon-l2 after stack based kmap_atomic() Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is actively wrong to rely on fixed kmap type indices (namely KM_L2_CACHE) as kmap_atomic() totally ignores them and a concurrent instance of it may happily reuse any slot for any purpose. Because kmap_atomic() is now able to deal with reentrancy, we can get rid of the ad hoc mapping here. While the code is made much simpler, there is a needless cache flush introduced by the usage of __kunmap_atomic(). It is not clear if the performance difference to remove that is worth the cost in code maintenance (I don't think there are that many highmem users on that platform anyway) but that should be reconsidered when/if someone cares enough to do some measurements. Signed-off-by: Nicolas Pitre --- arch/arm/mm/cache-feroceon-l2.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 6e77c042d8e..e0b0e7a4ec6 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -13,13 +13,9 @@ */ #include +#include #include -#include -#include -#include -#include #include -#include "mm.h" /* * Low-level cache maintenance operations. @@ -39,27 +35,30 @@ * between which we don't want to be preempted. */ -static inline unsigned long l2_start_va(unsigned long paddr) +static inline unsigned long l2_get_va(unsigned long paddr) { #ifdef CONFIG_HIGHMEM /* - * Let's do our own fixmap stuff in a minimal way here. * Because range ops can't be done on physical addresses, * we simply install a virtual mapping for it only for the * TLB lookup to occur, hence no need to flush the untouched - * memory mapping. This is protected with the disabling of - * interrupts by the caller. + * memory mapping afterwards (note: a cache flush may happen + * in some circumstances depending on the path taken in kunmap_atomic). */ - unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); - unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); - local_flush_tlb_kernel_page(vaddr); - return vaddr + (paddr & ~PAGE_MASK); + void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); + return (unsigned long)vaddr + (paddr & ~PAGE_MASK); #else return __phys_to_virt(paddr); #endif } +static inline void l2_put_va(unsigned long vaddr) +{ +#ifdef CONFIG_HIGHMEM + kunmap_atomic((void *)vaddr); +#endif +} + static inline void l2_clean_pa(unsigned long addr) { __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); @@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end) */ BUG_ON((start ^ end) >> PAGE_SHIFT); - raw_local_irq_save(flags); - va_start = l2_start_va(start); + va_start = l2_get_va(start); va_end = va_start + (end - start); + raw_local_irq_save(flags); __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" "mcr p15, 1, %1, c15, c9, 5" : : "r" (va_start), "r" (va_end)); raw_local_irq_restore(flags); + l2_put_va(va_start); } static inline void l2_clean_inv_pa(unsigned long addr) @@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end) */ BUG_ON((start ^ end) >> PAGE_SHIFT); - raw_local_irq_save(flags); - va_start = l2_start_va(start); + va_start = l2_get_va(start); va_end = va_start + (end - start); + raw_local_irq_save(flags); __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" "mcr p15, 1, %1, c15, c11, 5" : : "r" (va_start), "r" (va_end)); raw_local_irq_restore(flags); + l2_put_va(va_start); } static inline void l2_inv_all(void) -- cgit v1.2.3-70-g09d2 From 537de3a67c0c86586eacffde40673b727242dc3a Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Wed, 22 Dec 2010 04:52:05 +0100 Subject: ARM: 6536/1: Add missing SZ_{32,64,128} ... and also remove misleading comment stating that this header is auto-generated. Signed-off-by: Stephen Warren Acked-by: Uwe Kleine-Knig Signed-off-by: Russell King --- arch/arm/include/asm/sizes.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h index 4fc1565e4f9..316bb2b2be3 100644 --- a/arch/arm/include/asm/sizes.h +++ b/arch/arm/include/asm/sizes.h @@ -13,9 +13,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* DO NOT EDIT!! - this file automatically generated - * from .s file by awk -f s2h.awk - */ /* Size definitions * Copyright (C) ARM Limited 1998. All rights reserved. */ @@ -25,6 +22,9 @@ /* handy sizes */ #define SZ_16 0x00000010 +#define SZ_32 0x00000020 +#define SZ_64 0x00000040 +#define SZ_128 0x00000080 #define SZ_256 0x00000100 #define SZ_512 0x00000200 -- cgit v1.2.3-70-g09d2 From d13e5edd7284bedcf5952e1b6490e39ad843cb91 Mon Sep 17 00:00:00 2001 From: Todd Android Poynor Date: Thu, 23 Dec 2010 01:52:44 +0100 Subject: ARM: 6540/1: Stop irqsoff trace on return to user If the irqsoff tracer is in use, stop tracing the interrupt disable interval when returning to userspace. Tracing userspace execution time as interrupts disabled time is not helpful for kernel performance analysis purposes. Only do so if the irqsoff tracer is enabled, to avoid overhead for lockdep, which doesn't care. Signed-off-by: Todd Poynor Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 8bfa98757cd..80bf8cd88d7 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -29,6 +29,9 @@ ret_fast_syscall: ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne fast_work_pending +#if defined(CONFIG_IRQSOFF_TRACER) + asm_trace_hardirqs_on +#endif /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr @@ -65,6 +68,9 @@ ret_slow_syscall: tst r1, #_TIF_WORK_MASK bne work_pending no_work_pending: +#if defined(CONFIG_IRQSOFF_TRACER) + asm_trace_hardirqs_on +#endif /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr -- cgit v1.2.3-70-g09d2 From 7c0ab43e6ab09d72dc8dbac2521b2f819ccc4026 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Mon, 3 Jan 2011 02:26:53 +0100 Subject: ARM: 6605/1: Add missing include "asm/memory.h" This patch fixes below build error by adding the missing asm/memory.h, which is needed for arch_is_coherent(). $ make pxa3xx_defconfig; make CC init/do_mounts_rd.o In file included from include/linux/list_bl.h:5, from include/linux/rculist_bl.h:7, from include/linux/dcache.h:7, from include/linux/fs.h:381, from init/do_mounts_rd.c:3: include/linux/bit_spinlock.h: In function 'bit_spin_unlock': include/linux/bit_spinlock.h:61: error: implicit declaration of function 'arch_is_coherent' make[1]: *** [init/do_mounts_rd.o] Error 1 make: *** [init] Error 2 Signed-off-by: Axel Lin Acked-by: Peter Huewe Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 1120f18a6b1..80025948b8a 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -150,6 +150,7 @@ extern unsigned int user_debug; #define rmb() dmb() #define wmb() mb() #else +#include #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -- cgit v1.2.3-70-g09d2 From 82427de2c7c39ee7bcaa4cb0260b4e9b9ab19eb8 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Wed, 15 Dec 2010 07:20:16 +0800 Subject: ARM: pxa: PXA_ESERIES depends on FB_W100. As arch/arm/mach-pxa/eseries.c references w100fb_gpio_{read,write}() directly. Signed-off-by: Lennert Buytenhek Signed-off-by: Eric Miao --- arch/arm/mach-pxa/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index dd235ecc9d6..c93e73d54dd 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -540,6 +540,7 @@ config MACH_ICONTROL config ARCH_PXA_ESERIES bool "PXA based Toshiba e-series PDAs" select PXA25x + select FB_W100 config MACH_E330 bool "Toshiba e330" -- cgit v1.2.3-70-g09d2 From 823a2df258627b80df2e75056b850424a8eb5fed Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Wed, 29 Dec 2010 09:06:26 +0200 Subject: ARM: it8152: add IT8152_LAST_IRQ definition to fix build error The commit 6ac6b817f3f4c23c5febd960d8deb343e13af5f3 (ARM: pxa: encode IRQ number into .nr_irqs) removed definition of ITE_LAST_IRQ which caused the following build error: CC arch/arm/common/it8152.o arch/arm/common/it8152.c: In function 'it8152_init_irq': arch/arm/common/it8152.c:86: error: 'IT8152_LAST_IRQ' undeclared (first use in this function) arch/arm/common/it8152.c:86: error: (Each undeclared identifier is reported only once arch/arm/common/it8152.c:86: error: for each function it appears in.) make[2]: *** [arch/arm/common/it8152.o] Error 1 Defining the IT8152_LAST_IRQ in the arch/arm/include/hardware/it8152.c fixes the build. Signed-off-by: Mike Rapoport Signed-off-by: Eric Miao --- arch/arm/include/asm/hardware/it8152.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 21fa272301f..b2f95c72287 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -76,6 +76,7 @@ extern unsigned long it8152_base_address; IT8152_PD_IRQ(0) Audio controller (ACR) */ #define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) +#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40) /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ #define IT8152_LD_IRQ_COUNT 9 -- cgit v1.2.3-70-g09d2 From 24c78557741395e038e83f25367cf2bfd7f582b8 Mon Sep 17 00:00:00 2001 From: "Aric D. Blumer" Date: Wed, 29 Dec 2010 11:18:29 -0500 Subject: ARM: pxa: fix page table corruption on resume Before this patch, the following error would sometimes occur after a resume on pxa3xx: /path/to/mm/memory.c:144: bad pmd 8040542e. The problem was that a temporary page table mapping was being improperly restored. The PXA3xx resume code creates a temporary mapping of resume_turn_on_mmu to avoid a prefetch abort. The pxa3xx_resume_after_mmu code requires that the r1 register holding the address of this mapping not be modified, however, resume_turn_on_mmu does modify it. It is mostly correct in that r1 receives the base table address, but it may also get other bits in 13:0. This results in pxa3xx_resume_after_mmu restoring the original mapping to the wrong place, corrupting memory and leaving the temporary mapping in place. Signed-off-by: Matt Reimer Signed-off-by: Eric Miao --- arch/arm/mach-pxa/sleep.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 52c30b01a67..ae008110db4 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S @@ -353,8 +353,8 @@ resume_turn_on_mmu: @ Let us ensure we jump to resume_after_mmu only when the mcr above @ actually took effect. They call it the "cpwait" operation. - mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15 - sub pc, r2, r1, lsr #32 @ jump to virtual addr + mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15 + sub pc, r2, r0, lsr #32 @ jump to virtual addr nop nop nop -- cgit v1.2.3-70-g09d2