summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/cache-v3.S137
-rw-r--r--arch/arm/mm/cache-v4.S2
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/mmu.c75
-rw-r--r--arch/arm/mm/proc-arm740.S30
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-syms.c2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S19
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/tcm.h17
20 files changed, 121 insertions, 201 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 025d1732873..4045c4931a3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
- select CPU_CACHE_V3 # although the core is v4t
+ select CPU_CACHE_V4
select CPU_CP15_MPU
select CPU_PABRT_LEGACY
help
@@ -469,9 +469,6 @@ config CPU_PABRT_V7
bool
# The cache model
-config CPU_CACHE_V3
- bool
-
config CPU_CACHE_V4
bool
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 4e333fa2756..9e51be96f63 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
-obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index dd3d59122cc..48bc3c0a87c 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
outer_cache.inv_range = feroceon_l2_inv_range;
outer_cache.clean_range = feroceon_l2_clean_range;
outer_cache.flush_range = feroceon_l2_flush_range;
+ outer_cache.inv_all = l2_inv_all;
enable_l2();
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c2f37390308..c465faca51b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
int lockregs;
int i;
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
lockregs = 8;
break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
- & L2X0_CACHE_ID_PART_MASK;
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
.flush_all = l2x0_flush_all,
.inv_all = l2x0_inv_all,
.disable = l2x0_disable,
- .set_debug = pl310_set_debug,
},
};
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
data->save();
of_init = true;
- l2x0_init(l2x0_base, aux_val, aux_mask);
-
memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+ l2x0_init(l2x0_base, aux_val, aux_mask);
return 0;
}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644
index 8a3fadece8d..00000000000
--- a/arch/arm/mm/cache-v3.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * linux/arch/arm/mm/cache-v3.S
- *
- * Copyright (C) 1997-2002 Russell king
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include "proc-macros.S"
-
-/*
- * flush_icache_all()
- *
- * Unconditionally clean and invalidate the entire icache.
- */
-ENTRY(v3_flush_icache_all)
- mov pc, lr
-ENDPROC(v3_flush_icache_all)
-
-/*
- * flush_user_cache_all()
- *
- * Invalidate all cache entries in a particular address
- * space.
- *
- * - mm - mm_struct describing address space
- */
-ENTRY(v3_flush_user_cache_all)
- /* FALLTHROUGH */
-/*
- * flush_kern_cache_all()
- *
- * Clean and invalidate the entire cache.
- */
-ENTRY(v3_flush_kern_cache_all)
- /* FALLTHROUGH */
-
-/*
- * flush_user_cache_range(start, end, flags)
- *
- * Invalidate a range of cache entries in the specified
- * address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - flags - vma_area_struct flags describing address space
- */
-ENTRY(v3_flush_user_cache_range)
- mov ip, #0
- mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * coherent_kern_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_kern_range)
- /* FALLTHROUGH */
-
-/*
- * coherent_user_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_user_range)
- mov r0, #0
- mov pc, lr
-
-/*
- * flush_kern_dcache_area(void *page, size_t size)
- *
- * Ensure no D cache aliasing occurs, either with itself or
- * the I cache
- *
- * - addr - kernel address
- * - size - region size
- */
-ENTRY(v3_flush_kern_dcache_area)
- /* FALLTHROUGH */
-
-/*
- * dma_flush_range(start, end)
- *
- * Clean and invalidate the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_flush_range)
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_unmap_area)
- teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
- /* FALLTHROUGH */
-
-/*
- * dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_map_area)
- mov pc, lr
-ENDPROC(v3_dma_unmap_area)
-ENDPROC(v3_dma_map_area)
-
- .globl v3_flush_kern_cache_louis
- .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v3
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 43e5d77be67..a7ba68f59f0 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
ENTRY(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
- mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
+ mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
#else
/* FALLTHROUGH */
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a5a4b2bc42b..2ac37372ef5 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all();
local_flush_tlb_all();
+ dummy_flush_tlb_a15_erratum();
}
atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c7e3759f16d..e9db6b4bf65 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -342,6 +342,7 @@ static int __init atomic_pool_init(void)
{
struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ gfp_t gfp = GFP_KERNEL | GFP_DMA;
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
@@ -361,8 +362,8 @@ static int __init atomic_pool_init(void)
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
- ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, atomic_pool_init);
+ ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
+ atomic_pool_init);
if (ptr) {
int i;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e95a996ab78..a84ff763ac3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
#include <asm/mach/pci.h>
#include "mm.h"
+#include "tcm.h"
/*
* empty_zero_page is a special page that is used for
@@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
{
- pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
/*
- * Try a section mapping - end, addr and phys must all be aligned
- * to a section boundary. Note that PMDs refer to the individual
- * L1 entries, whereas PGDs refer to a group of L1 entries making
- * up one logical pointer to an L2 table.
+ * In classic MMU format, puds and pmds are folded in to
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+ * group of L1 entries making up one logical pointer to
+ * an L2 table (2MB), where as PMDs refer to the individual
+ * L1 entries (1MB). Hence increment to get the correct
+ * offset for odd 1MB sections.
+ * (See arch/arm/include/asm/pgtable-2level.h)
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
- pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
- if (addr & SECTION_SIZE)
- pmd++;
+ if (addr & SECTION_SIZE)
+ pmd++;
#endif
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
+ flush_pmd_entry(pmd);
+}
- flush_pmd_entry(p);
- } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
/*
- * No need to loop; pte's aren't interested in the
- * individual L1 entries.
+ * With LPAE, we must loop over to map
+ * all the pmds for the given range.
*/
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
- }
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * Try a section mapping - addr, next and phys must all be
+ * aligned to a section boundary.
+ */
+ if (type->prot_sect &&
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ map_init_section(pmd, addr, next, phys, type);
+ } else {
+ alloc_init_pte(pmd, addr, next,
+ __phys_to_pfn(phys), type);
+ }
+
+ phys += next - addr;
+
+ } while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
+ tcm_init();
top_pmd = pmd_off_k(0xffff0000);
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index dc5de5d53f2..fde2d2a794c 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -77,24 +77,27 @@ __arm740_setup:
mcr p15, 0, r0, c6, c0 @ set area 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
+ cmp r3, #0
+ moveq r0, #0
+ beq 2f
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
+2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
mov r0, #0x06
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
+ .long 0
b __arm740_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
.long cpu_arm740_name
.long arm740_processor_functions
.long 0
.long 0
- .long v3_cache_fns @ cache model
+ .long v4_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5..2556cf1c2da 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f1803f7e297..344c8a548cc 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d..0b60dd3d742 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd8..d92dfd08142 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 3e6210b4d6d..054b491ff76 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -17,7 +17,9 @@
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
+#ifdef CONFIG_MMU
EXPORT_SYMBOL(cpu_set_pte_ext);
+#endif
#else
EXPORT_SYMBOL(processor);
#endif
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bcaaa8de932..5c07ee4fe3e 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3a3c015f8d5..f584d3f5b37 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca15mp_setup
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
+ * Qualcomm Inc. Krait processors.
+ */
+ .type __krait_proc_info, #object
+__krait_proc_info:
+ .long 0x510f0400 @ Required ID value
+ .long 0xff0ffc00 @ Mask for ID
+ /*
+ * Some Krait processors don't indicate support for SDIV and UDIV
+ * instructions in the ARM instruction set, even though they actually
+ * do support them.
+ */
+ __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+ .size __krait_proc_info, . - __krait_proc_info
+
+ /*
* Match any ARMv7 processor core.
*/
.type __v7_proc_info, #object
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index eb93d6487f3..e8efd83b6f2 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 25510361aa1..e766f889bfd 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
new file mode 100644
index 00000000000..8015ad434a4
--- /dev/null
+++ b/arch/arm/mm/tcm.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#ifdef CONFIG_HAVE_TCM
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+inline void tcm_init(void)
+{
+}
+#endif