diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 4 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 6 | ||||
-rw-r--r-- | arch/arm/mm/copypage-fa.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v3.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/highmem.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/iomap.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 30 |
15 files changed, 73 insertions, 77 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 4cefb57d9ed..7edef912163 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -631,7 +631,8 @@ comment "Processor Features" config ARM_LPAE bool "Support for the Large Physical Address Extension" - depends on MMU && CPU_V7 + depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ + !CPU_32v4 && !CPU_32v3 help Say Y if you have an ARMv7 processor supporting the LPAE page table format and you would like to access memory beyond the @@ -882,6 +883,7 @@ config CACHE_XSC3L2 config ARM_L1_CACHE_SHIFT_6 bool + default y if CPU_V7 help Setting ARM L1 cache line size to 64 Bytes. diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 07c4bc8ea0a..a655d3da386 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -54,9 +54,15 @@ loop1: and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache +#ifdef CONFIG_PREEMPT + save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic +#endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr +#ifdef CONFIG_PREEMPT + restore_irqs_notrace r9 +#endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d2852e1635b..d130a5ece5d 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); fa_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from, */ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns fa_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index ac163de7dc0..49ee0c1a720 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); feroceon_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile ("\ mov r1, %2 \n\ mov r2, #0 \n\ @@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns feroceon_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index f72303e1d80..3935bddd476 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); v3_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from, */ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\n\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v3_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7d0a8c23034..ec8c3befb9c 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to) void v4_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { - void *kto = kmap_atomic(to, KM_USER1); + void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); @@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, raw_spin_unlock(&minicache_lock); - kunmap_atomic(kto, KM_USER1); + kunmap_atomic(kto); } /* @@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, */ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v4_mc_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index cb589cbb2b6..067d0fdd630 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); v4wb_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, */ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v4wb_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 30c7d048a32..b85c5da2e51 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); v4wt_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, */ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v4wt_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 3d9a1552cef..8b03a5814d0 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, { void *kto, *kfrom; - kfrom = kmap_atomic(from, KM_USER0); - kto = kmap_atomic(to, KM_USER1); + kfrom = kmap_atomic(from); + kto = kmap_atomic(to); copy_page(kto, kfrom); - kunmap_atomic(kto, KM_USER1); - kunmap_atomic(kfrom, KM_USER0); + kunmap_atomic(kto); + kunmap_atomic(kfrom); } /* @@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, */ static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); + void *kaddr = kmap_atomic(page); clear_page(kaddr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } /* diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index f9cde0702f1..03a2042aced 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); xsc3_mc_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile ("\ mov r1, %2 \n\ mov r2, #0 \n\ @@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns xsc3_mc_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 610c24ced31..439d106ae63 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to) void xscale_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { - void *kto = kmap_atomic(to, KM_USER1); + void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); @@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, raw_spin_unlock(&minicache_lock); - kunmap_atomic(kto, KM_USER1); + kunmap_atomic(kto); } /* @@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile( "mov r1, %2 \n\ mov r2, #0 \n\ @@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns xscale_mc_user_fns __initdata = { diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 807c0573abb..5a21505d755 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -36,7 +36,7 @@ void kunmap(struct page *page) } EXPORT_SYMBOL(kunmap); -void *__kmap_atomic(struct page *page) +void *kmap_atomic(struct page *page) { unsigned int idx; unsigned long vaddr; @@ -81,7 +81,7 @@ void *__kmap_atomic(struct page *page) return (void *)vaddr; } -EXPORT_SYMBOL(__kmap_atomic); +EXPORT_SYMBOL(kmap_atomic); void __kunmap_atomic(void *kvaddr) { diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 6ec1226fc62..245a55a0a5b 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -32,7 +32,6 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> -#include <asm/memblock.h> #include "mm.h" @@ -310,7 +309,7 @@ static void arm_memory_present(void) static bool arm_memblock_steal_permitted = true; -phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align) +phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) { phys_addr_t phys; diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c index e62956e1203..4614208369f 100644 --- a/arch/arm/mm/iomap.c +++ b/arch/arm/mm/iomap.c @@ -32,9 +32,6 @@ EXPORT_SYMBOL(pcibios_min_io); unsigned long pcibios_min_mem = 0x01000000; EXPORT_SYMBOL(pcibios_min_mem); -unsigned int pci_flags = PCI_REASSIGN_ALL_RSRC; -EXPORT_SYMBOL(pci_flags); - void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { if ((unsigned long)addr >= VMALLOC_START && diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 7e9b5bf910c..f1c8486f750 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -148,10 +148,6 @@ ENDPROC(cpu_v7_do_resume) * Initialise TLB, Caches, and MMU state ready to switch the MMU * on. Return in r0 the new CP15 C1 control register setting. * - * We automatically detect if we have a Harvard cache, and use the - * Harvard cache control instructions insead of the unified cache - * control instructions. - * * This should be able to cover all ARMv7 cores. * * It is assumed that: @@ -234,9 +230,7 @@ __v7_setup: mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_743622 - teq r6, #0x20 @ present in r2p0 - teqne r6, #0x21 @ present in r2p1 - teqne r6, #0x22 @ present in r2p2 + teq r5, #0x00200000 @ only present in r2p* mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register orreq r10, r10, #1 << 6 @ set bit #6 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register @@ -251,9 +245,7 @@ __v7_setup: #endif 3: mov r10, #0 -#ifdef HARVARD_CACHE mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate -#endif dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs @@ -330,16 +322,6 @@ __v7_ca5mp_proc_info: .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info /* - * ARM Ltd. Cortex A7 processor. - */ - .type __v7_ca7mp_proc_info, #object -__v7_ca7mp_proc_info: - .long 0x410fc070 - .long 0xff0ffff0 - __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV - .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info - - /* * ARM Ltd. Cortex A9 processor. */ .type __v7_ca9mp_proc_info, #object @@ -351,6 +333,16 @@ __v7_ca9mp_proc_info: #endif /* CONFIG_ARM_LPAE */ /* + * ARM Ltd. Cortex A7 processor. + */ + .type __v7_ca7mp_proc_info, #object +__v7_ca7mp_proc_info: + .long 0x410fc070 + .long 0xff0ffff0 + __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV + .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info + + /* * ARM Ltd. Cortex A15 processor. */ .type __v7_ca15mp_proc_info, #object |