diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 4 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 46 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 14 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 44 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm920.S | 21 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm926.S | 21 | ||||
-rw-r--r-- | arch/arm/mm/proc-sa1100.S | 25 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 44 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 50 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 28 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 25 |
17 files changed, 173 insertions, 190 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 88633fe01a5..67f75a0b66d 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -819,10 +819,10 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || SOC_IMX35 || SOC_IMX31 || MACH_REALVIEW_PBX || \ + REALVIEW_EB_A9MP || ARCH_IMX_V6_V7 || MACH_REALVIEW_PBX || \ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \ ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \ - ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX + ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX || ARCH_HIGHBANK default y select OUTER_CACHE select OUTER_CACHE_SYNC diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 3f9b9980478..8ac9e9f8479 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -29,7 +29,7 @@ #define CACHE_LINE_SIZE 32 static void __iomem *l2x0_base; -static DEFINE_SPINLOCK(l2x0_lock); +static DEFINE_RAW_SPINLOCK(l2x0_lock); static uint32_t l2x0_way_mask; /* Bitmask of active ways */ static uint32_t l2x0_size; @@ -126,9 +126,9 @@ static void l2x0_cache_sync(void) { unsigned long flags; - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void __l2x0_flush_all(void) @@ -145,9 +145,9 @@ static void l2x0_flush_all(void) unsigned long flags; /* clean all ways */ - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); __l2x0_flush_all(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_clean_all(void) @@ -155,11 +155,11 @@ static void l2x0_clean_all(void) unsigned long flags; /* clean all ways */ - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_inv_all(void) @@ -167,13 +167,13 @@ static void l2x0_inv_all(void) unsigned long flags; /* invalidate all ways */ - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); /* Invalidating when L2 is enabled is a nono */ BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_inv_range(unsigned long start, unsigned long end) @@ -181,7 +181,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) void __iomem *base = l2x0_base; unsigned long flags; - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); debug_writel(0x03); @@ -206,13 +206,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) } if (blk_end < end) { - spin_unlock_irqrestore(&l2x0_lock, flags); - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_INV_LINE_PA, 1); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_clean_range(unsigned long start, unsigned long end) @@ -225,7 +225,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) return; } - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); @@ -236,13 +236,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) } if (blk_end < end) { - spin_unlock_irqrestore(&l2x0_lock, flags); - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_flush_range(unsigned long start, unsigned long end) @@ -255,7 +255,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) return; } - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); @@ -268,24 +268,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) debug_writel(0x00); if (blk_end < end) { - spin_unlock_irqrestore(&l2x0_lock, flags); - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_disable(void) { unsigned long flags; - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); __l2x0_flush_all(); writel_relaxed(0, l2x0_base + L2X0_CTRL); dsb(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_unlock(__u32 cache_id) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b0ee9ba3cfa..93aac068da9 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -16,7 +16,7 @@ #include <asm/mmu_context.h> #include <asm/tlbflush.h> -static DEFINE_SPINLOCK(cpu_asid_lock); +static DEFINE_RAW_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; #ifdef CONFIG_SMP DEFINE_PER_CPU(struct mm_struct *, current_mm); @@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.id = 0; - spin_lock_init(&mm->context.id_lock); + raw_spin_lock_init(&mm->context.id_lock); } static void flush_context(void) @@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) * the broadcast. This function is also called via IPI so the * mm->context.id_lock has to be IRQ-safe. */ - spin_lock_irqsave(&mm->context.id_lock, flags); + raw_spin_lock_irqsave(&mm->context.id_lock, flags); if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { /* * Old version of ASID found. Set the new one and @@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) mm->context.id = asid; cpumask_clear(mm_cpumask(mm)); } - spin_unlock_irqrestore(&mm->context.id_lock, flags); + raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); /* * Set the mm_cpumask(mm) bit for the current CPU. @@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm) { unsigned int asid; - spin_lock(&cpu_asid_lock); + raw_spin_lock(&cpu_asid_lock); #ifdef CONFIG_SMP /* * Check the ASID again, in case the change was broadcast from @@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm) */ if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - spin_unlock(&cpu_asid_lock); + raw_spin_unlock(&cpu_asid_lock); return; } #endif @@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm) } set_mm_context(mm, asid); - spin_unlock(&cpu_asid_lock); + raw_spin_unlock(&cpu_asid_lock); } diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index b8061519ce7..7d0a8c23034 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -30,7 +30,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_RAW_SPINLOCK(minicache_lock); /* * ARMv4 mini-dcache optimised copy_user_highpage @@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); - spin_lock(&minicache_lock); + raw_spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(0xffff8000); mc_copy_user_page((void *)0xffff8000, kto); - spin_unlock(&minicache_lock); + raw_spin_unlock(&minicache_lock); kunmap_atomic(kto, KM_USER1); } diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 63cca009713..3d9a1552cef 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -27,7 +27,7 @@ #define from_address (0xffff8000) #define to_address (0xffffc000) -static DEFINE_SPINLOCK(v6_lock); +static DEFINE_RAW_SPINLOCK(v6_lock); /* * Copy the user page. No aliasing to deal with so we can just @@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, * Now copy the page using the same cache colour as the * pages ultimate destination. */ - spin_lock(&v6_lock); + raw_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); @@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, copy_page((void *)kto, (void *)kfrom); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } /* @@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad * Now clear the page using the same cache colour as * the pages ultimate destination. */ - spin_lock(&v6_lock); + raw_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } struct cpu_user_fns v6_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 649bbcd325b..610c24ced31 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -32,7 +32,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_RAW_SPINLOCK(minicache_lock); /* * XScale mini-dcache optimised copy_user_highpage @@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); - spin_lock(&minicache_lock); + raw_spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); - spin_unlock(&minicache_lock); + raw_spin_unlock(&minicache_lock); kunmap_atomic(kto, KM_USER1); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 235eb775fc7..e4e7f6cba1a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -18,12 +18,14 @@ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/highmem.h> +#include <linux/slab.h> #include <asm/memory.h> #include <asm/highmem.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/sizes.h> +#include <asm/mach/arch.h> #include "mm.h" @@ -117,26 +119,36 @@ static void __dma_free_buffer(struct page *page, size_t size) } #ifdef CONFIG_MMU -/* Sanity check size */ -#if (CONSISTENT_DMA_SIZE % SZ_2M) -#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" -#endif -#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) -#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PMD_SHIFT) -#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PMD_SHIFT) +#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT) +#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT) /* * These are the page tables (2MB each) covering uncached, DMA consistent allocations */ -static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; +static pte_t **consistent_pte; + +#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M + +unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; + +void __init init_consistent_dma_size(unsigned long size) +{ + unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M); + + BUG_ON(consistent_pte); /* Check we're called before DMA region init */ + BUG_ON(base < VMALLOC_END); + + /* Grow region to accommodate specified size */ + if (base < consistent_base) + consistent_base = base; +} #include "vmregion.h" static struct arm_vmregion_head consistent_head = { .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), - .vm_start = CONSISTENT_BASE, .vm_end = CONSISTENT_END, }; @@ -155,7 +167,17 @@ static int __init consistent_init(void) pmd_t *pmd; pte_t *pte; int i = 0; - u32 base = CONSISTENT_BASE; + unsigned long base = consistent_base; + unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT; + + consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); + if (!consistent_pte) { + pr_err("%s: no memory\n", __func__); + return -ENOMEM; + } + + pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END); + consistent_head.vm_start = base; do { pgd = pgd_offset(&init_mm, base); @@ -198,7 +220,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) size_t align; int bit; - if (!consistent_pte[0]) { + if (!consistent_pte) { printk(KERN_ERR "%s: not initialised\n", __func__); dump_stack(); return NULL; diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 7cab7917942..7599e2625c7 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -8,7 +8,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f8037ba338a..fbdd12ea3a5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> +#include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> #include <linux/of_fdt.h> @@ -660,9 +661,6 @@ void __init mem_init(void) " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" -#ifdef CONFIG_MMU - " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" -#endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM @@ -681,9 +679,6 @@ void __init mem_init(void) MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif MLK(FIXADDR_START, FIXADDR_TOP), -#ifdef CONFIG_MMU - MLM(CONSISTENT_BASE, CONSISTENT_END), -#endif MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM @@ -706,9 +701,6 @@ void __init mem_init(void) * be detected at build time already. */ #ifdef CONFIG_MMU - BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); - BUG_ON(VMALLOC_END > CONSISTENT_BASE); - BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); BUG_ON(TASK_SIZE > MODULES_VADDR); #endif diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 226f1804be1..dc8c550e6cb 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -273,6 +273,14 @@ static struct mem_type mem_types[] = { .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_SO] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_MT_UNCACHED, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | + PMD_SECT_UNCACHED | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, }; const struct mem_type *get_mem_type(unsigned int type) diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2e6849b41f6..88fb3d9e064 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -379,31 +379,26 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size -.equ cpu_arm920_suspend_size, 4 * 4 +.equ cpu_arm920_suspend_size, 4 * 3 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm920_do_suspend) - stmfd sp!, {r4 - r7, lr} + stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r5, c3, c0, 0 @ Domain ID - mrc p15, 0, r6, c2, c0, 0 @ TTB address - mrc p15, 0, r7, c1, c0, 0 @ Control register - stmia r0, {r4 - r7} - ldmfd sp!, {r4 - r7, pc} + mrc p15, 0, r6, c1, c0, 0 @ Control register + stmia r0, {r4 - r6} + ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_arm920_do_suspend) ENTRY(cpu_arm920_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches - ldmia r0, {r4 - r7} + ldmia r0, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ PID mcr p15, 0, r5, c3, c0, 0 @ Domain ID - mcr p15, 0, r6, c2, c0, 0 @ TTB address - mov r0, r7 @ control register - mov r2, r6, lsr #14 @ get TTB0 base - mov r2, r2, lsl #14 - ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE + mcr p15, 0, r1, c2, c0, 0 @ TTB address + mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_arm920_do_resume) #endif diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index cd8f79c3a28..9f8fd91f918 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -394,31 +394,26 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size -.equ cpu_arm926_suspend_size, 4 * 4 +.equ cpu_arm926_suspend_size, 4 * 3 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm926_do_suspend) - stmfd sp!, {r4 - r7, lr} + stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r5, c3, c0, 0 @ Domain ID - mrc p15, 0, r6, c2, c0, 0 @ TTB address - mrc p15, 0, r7, c1, c0, 0 @ Control register - stmia r0, {r4 - r7} - ldmfd sp!, {r4 - r7, pc} + mrc p15, 0, r6, c1, c0, 0 @ Control register + stmia r0, {r4 - r6} + ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_arm926_do_suspend) ENTRY(cpu_arm926_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches - ldmia r0, {r4 - r7} + ldmia r0, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ PID mcr p15, 0, r5, c3, c0, 0 @ Domain ID - mcr p15, 0, r6, c2, c0, 0 @ TTB address - mov r0, r7 @ control register - mov r2, r6, lsr #14 @ get TTB0 base - mov r2, r2, lsl #14 - ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE + mcr p15, 0, r1, c2, c0, 0 @ TTB address + mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_arm926_do_resume) #endif diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 69e7f2ef738..7d91545d089 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -168,20 +168,19 @@ ENTRY(cpu_sa1100_set_pte_ext) mov pc, lr .globl cpu_sa1100_suspend_size -.equ cpu_sa1100_suspend_size, 4*4 +.equ cpu_sa1100_suspend_size, 4 * 3 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_sa1100_do_suspend) - stmfd sp!, {r4 - r7, lr} + stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID - mrc p15, 0, r5, c2, c0, 0 @ translation table base addr - mrc p15, 0, r6, c13, c0, 0 @ PID - mrc p15, 0, r7, c1, c0, 0 @ control reg - stmia r0, {r4 - r7} @ store cp regs - ldmfd sp!, {r4 - r7, pc} + mrc p15, 0, r5, c13, c0, 0 @ PID + mrc p15, 0, r6, c1, c0, 0 @ control reg + stmia r0, {r4 - r6} @ store cp regs + ldmfd sp!, {r4 - r6, pc} ENDPROC(cpu_sa1100_do_suspend) ENTRY(cpu_sa1100_do_resume) - ldmia r0, {r4 - r7} @ load cp regs + ldmia r0, {r4 - r6} @ load cp regs mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache @@ -189,13 +188,9 @@ ENTRY(cpu_sa1100_do_resume) mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB mcr p15, 0, r4, c3, c0, 0 @ domain ID - mcr p15, 0, r5, c2, c0, 0 @ translation table base addr - mcr p15, 0, r6, c13, c0, 0 @ PID - mov r0, r7 @ control register - mov r2, r5, lsr #14 @ get TTB0 base - mov r2, r2, lsl #14 - ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE + mcr p15, 0, r1, c2, c0, 0 @ translation table base addr + mcr p15, 0, r5, c13, c0, 0 @ PID + mov r0, r6 @ control register b cpu_resume_mmu ENDPROC(cpu_sa1100_do_resume) #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index a923aa0fd00..d061d2fa550 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -128,20 +128,18 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size -.equ cpu_v6_suspend_size, 4 * 8 +.equ cpu_v6_suspend_size, 4 * 6 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_v6_do_suspend) - stmfd sp!, {r4 - r11, lr} + stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID - mrc p15, 0, r5, c13, c0, 1 @ Context ID - mrc p15, 0, r6, c3, c0, 0 @ Domain ID - mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0 - mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1 - mrc p15, 0, r9, c1, c0, 1 @ auxiliary control register - mrc p15, 0, r10, c1, c0, 2 @ co-processor access control - mrc p15, 0, r11, c1, c0, 0 @ control register - stmia r0, {r4 - r11} - ldmfd sp!, {r4- r11, pc} + mrc p15, 0, r5, c3, c0, 0 @ Domain ID + mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 + mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register + mrc p15, 0, r8, c1, c0, 2 @ co-processor access control + mrc p15, 0, r9, c1, c0, 0 @ control register + stmia r0, {r4 - r9} + ldmfd sp!, {r4- r9, pc} ENDPROC(cpu_v6_do_suspend) ENTRY(cpu_v6_do_resume) @@ -150,25 +148,21 @@ ENTRY(cpu_v6_do_resume) mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache mcr p15, 0, ip, c7, c10, 4 @ drain write buffer - ldmia r0, {r4 - r11} + mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID + ldmia r0, {r4 - r9} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID - mcr p15, 0, r5, c13, c0, 1 @ Context ID - mcr p15, 0, r6, c3, c0, 0 @ Domain ID - mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0 - mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1 - mcr p15, 0, r9, c1, c0, 1 @ auxiliary control register - mcr p15, 0, r10, c1, c0, 2 @ co-processor access control + mcr p15, 0, r5, c3, c0, 0 @ Domain ID + ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) + ALT_UP(orr r1, r1, #TTB_FLAGS_UP) + mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 + mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 + mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register + mcr p15, 0, r8, c1, c0, 2 @ co-processor access control mcr p15, 0, ip, c2, c0, 2 @ TTB control register mcr p15, 0, ip, c7, c5, 4 @ ISB - mov r0, r11 @ control register - mov r2, r7, lsr #14 @ get TTB0 base - mov r2, r2, lsl #14 - ldr r3, cpu_resume_l1_flags + mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_v6_do_resume) -cpu_resume_l1_flags: - ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) - ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) #endif string cpu_v6_name, "ARMv6-compatible processor" diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 9591c8e9fb8..2c559ac3814 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -217,56 +217,50 @@ ENDPROC(cpu_v7_set_pte_ext) /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size -.equ cpu_v7_suspend_size, 4 * 9 +.equ cpu_v7_suspend_size, 4 * 7 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v7_do_suspend) - stmfd sp!, {r4 - r11, lr} + stmfd sp!, {r4 - r10, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID - mrc p15, 0, r5, c13, c0, 1 @ Context ID - mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID - stmia r0!, {r4 - r6} + mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID + stmia r0!, {r4 - r5} mrc p15, 0, r6, c3, c0, 0 @ Domain ID - mrc p15, 0, r7, c2, c0, 0 @ TTB 0 - mrc p15, 0, r8, c2, c0, 1 @ TTB 1 - mrc p15, 0, r9, c1, c0, 0 @ Control register - mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register - mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control - stmia r0, {r6 - r11} - ldmfd sp!, {r4 - r11, pc} + mrc p15, 0, r7, c2, c0, 1 @ TTB 1 + mrc p15, 0, r8, c1, c0, 0 @ Control register + mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register + mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control + stmia r0, {r6 - r10} + ldmfd sp!, {r4 - r10, pc} ENDPROC(cpu_v7_do_suspend) ENTRY(cpu_v7_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache - ldmia r0!, {r4 - r6} + mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID + ldmia r0!, {r4 - r5} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID - mcr p15, 0, r5, c13, c0, 1 @ Context ID - mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID - ldmia r0, {r6 - r11} + mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID + ldmia r0, {r6 - r10} mcr p15, 0, r6, c3, c0, 0 @ Domain ID - mcr p15, 0, r7, c2, c0, 0 @ TTB 0 - mcr p15, 0, r8, c2, c0, 1 @ TTB 1 + ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) + ALT_UP(orr r1, r1, #TTB_FLAGS_UP) + mcr p15, 0, r1, c2, c0, 0 @ TTB 0 + mcr p15, 0, r7, c2, c0, 1 @ TTB 1 mcr p15, 0, ip, c2, c0, 2 @ TTB control register mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register - teq r4, r10 @ Is it already set? - mcrne p15, 0, r10, c1, c0, 1 @ No, so write it - mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control + teq r4, r9 @ Is it already set? + mcrne p15, 0, r9, c1, c0, 1 @ No, so write it + mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control ldr r4, =PRRR @ PRRR ldr r5, =NMRR @ NMRR mcr p15, 0, r4, c10, c2, 0 @ write PRRR mcr p15, 0, r5, c10, c2, 1 @ write NMRR isb dsb - mov r0, r9 @ control register - mov r2, r7, lsr #14 @ get TTB0 base - mov r2, r2, lsl #14 - ldr r3, cpu_resume_l1_flags + mov r0, r8 @ control register b cpu_resume_mmu ENDPROC(cpu_v7_do_resume) -cpu_resume_l1_flags: - ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) - ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) #endif __CPUINIT diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 755e1bf2268..abf0507a08a 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -406,24 +406,23 @@ ENTRY(cpu_xsc3_set_pte_ext) .align .globl cpu_xsc3_suspend_size -.equ cpu_xsc3_suspend_size, 4 * 7 +.equ cpu_xsc3_suspend_size, 4 * 6 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_xsc3_do_suspend) - stmfd sp!, {r4 - r10, lr} + stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID - mrc p15, 0, r8, c2, c0, 0 @ translation table base addr - mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg - mrc p15, 0, r10, c1, c0, 0 @ control reg + mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg + mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit - stmia r0, {r4 - r10} @ store cp regs - ldmia sp!, {r4 - r10, pc} + stmia r0, {r4 - r9} @ store cp regs + ldmia sp!, {r4 - r9, pc} ENDPROC(cpu_xsc3_do_suspend) ENTRY(cpu_xsc3_do_resume) - ldmia r0, {r4 - r10} @ load cp regs + ldmia r0, {r4 - r9} @ load cp regs mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer @@ -433,15 +432,10 @@ ENTRY(cpu_xsc3_do_resume) mcr p15, 0, r5, c15, c1, 0 @ CP access reg mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID - mcr p15, 0, r8, c2, c0, 0 @ translation table base addr - mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg - - @ temporarily map resume_turn_on_mmu into the page table, - @ otherwise prefetch abort occurs after MMU is turned on - mov r0, r10 @ control register - mov r2, r8, lsr #14 @ get TTB0 base - mov r2, r2, lsl #14 - ldr r3, =0x542e @ section flags + orr r1, r1, #0x18 @ cache the page table in L2 + mcr p15, 0, r1, c2, c0, 0 @ translation table base addr + mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg + mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xsc3_do_resume) #endif diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index fbc06e55b87..3277904beba 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -520,24 +520,23 @@ ENTRY(cpu_xscale_set_pte_ext) .align .globl cpu_xscale_suspend_size -.equ cpu_xscale_suspend_size, 4 * 7 +.equ cpu_xscale_suspend_size, 4 * 6 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_xscale_do_suspend) - stmfd sp!, {r4 - r10, lr} + stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID - mrc p15, 0, r8, c2, c0, 0 @ translation table base addr - mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg - mrc p15, 0, r10, c1, c0, 0 @ control reg + mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit - stmia r0, {r4 - r10} @ store cp regs - ldmfd sp!, {r4 - r10, pc} + stmia r0, {r4 - r9} @ store cp regs + ldmfd sp!, {r4 - r9, pc} ENDPROC(cpu_xscale_do_suspend) ENTRY(cpu_xscale_do_resume) - ldmia r0, {r4 - r10} @ load cp regs + ldmia r0, {r4 - r9} @ load cp regs mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB @@ -545,13 +544,9 @@ ENTRY(cpu_xscale_do_resume) mcr p15, 0, r5, c15, c1, 0 @ CP access reg mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID - mcr p15, 0, r8, c2, c0, 0 @ translation table base addr - mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg - mov r0, r10 @ control register - mov r2, r8, lsr #14 @ get TTB0 base - mov r2, r2, lsl #14 - ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE + mcr p15, 0, r1, c2, c0, 0 @ translation table base addr + mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xscale_do_resume) #endif |