summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig10
-rw-r--r--arch/arm/mm/cache-xsc3l2.c182
-rw-r--r--arch/arm/mm/init.c32
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S22
6 files changed, 213 insertions, 37 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 3a6c8ec34cd..ed15f876c72 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -187,7 +187,7 @@ config CPU_ARM926T
ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || \
ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || \
ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || \
- ARCH_NS9XXX || ARCH_DAVINCI
+ ARCH_NS9XXX || ARCH_DAVINCI || ARCH_MX2
default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || \
ARCH_OMAP730 || ARCH_OMAP16XX || \
ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || \
@@ -742,3 +742,11 @@ config CACHE_L2X0
select OUTER_CACHE
help
This option enables the L2x0 PrimeCell.
+
+config CACHE_XSC3L2
+ bool "Enable the L2 cache on XScale3"
+ depends on CPU_XSC3
+ default y
+ select OUTER_CACHE
+ help
+ This option enables the L2 cache on XScale3.
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
new file mode 100644
index 00000000000..158bd96763d
--- /dev/null
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -0,0 +1,182 @@
+/*
+ * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
+ *
+ * Copyright (C) 2007 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/spinlock.h>
+
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+#define CR_L2 (1 << 26)
+
+#define CACHE_LINE_SIZE 32
+#define CACHE_LINE_SHIFT 5
+#define CACHE_WAY_PER_SET 8
+
+#define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
+#define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
+
+static inline int xsc3_l2_present(void)
+{
+ unsigned long l2ctype;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
+
+ return !!(l2ctype & 0xf8);
+}
+
+static inline void xsc3_l2_clean_mva(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
+}
+
+static inline void xsc3_l2_clean_pa(unsigned long addr)
+{
+ xsc3_l2_clean_mva(__phys_to_virt(addr));
+}
+
+static inline void xsc3_l2_inv_mva(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
+}
+
+static inline void xsc3_l2_inv_pa(unsigned long addr)
+{
+ xsc3_l2_inv_mva(__phys_to_virt(addr));
+}
+
+static inline void xsc3_l2_inv_all(void)
+{
+ unsigned long l2ctype, set_way;
+ int set, way;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
+
+ for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
+ for (way = 0; way < CACHE_WAY_PER_SET; way++) {
+ set_way = (way << 29) | (set << 5);
+ __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
+ }
+ }
+
+ dsb();
+}
+
+static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
+{
+ if (start == 0 && end == -1ul) {
+ xsc3_l2_inv_all();
+ return;
+ }
+
+ /*
+ * Clean and invalidate partial first cache line.
+ */
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ xsc3_l2_clean_pa(start & ~(CACHE_LINE_SIZE - 1));
+ xsc3_l2_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
+ start = (start | (CACHE_LINE_SIZE - 1)) + 1;
+ }
+
+ /*
+ * Clean and invalidate partial last cache line.
+ */
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
+ xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
+ end &= ~(CACHE_LINE_SIZE - 1);
+ }
+
+ /*
+ * Invalidate all full cache lines between 'start' and 'end'.
+ */
+ while (start != end) {
+ xsc3_l2_inv_pa(start);
+ start += CACHE_LINE_SIZE;
+ }
+
+ dsb();
+}
+
+static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
+{
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ xsc3_l2_clean_pa(start);
+ start += CACHE_LINE_SIZE;
+ }
+
+ dsb();
+}
+
+/*
+ * optimize L2 flush all operation by set/way format
+ */
+static inline void xsc3_l2_flush_all(void)
+{
+ unsigned long l2ctype, set_way;
+ int set, way;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
+
+ for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
+ for (way = 0; way < CACHE_WAY_PER_SET; way++) {
+ set_way = (way << 29) | (set << 5);
+ __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
+ }
+ }
+
+ dsb();
+}
+
+static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
+{
+ if (start == 0 && end == -1ul) {
+ xsc3_l2_flush_all();
+ return;
+ }
+
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ xsc3_l2_clean_pa(start);
+ xsc3_l2_inv_pa(start);
+ start += CACHE_LINE_SIZE;
+ }
+
+ dsb();
+}
+
+static int __init xsc3_l2_init(void)
+{
+ if (!cpu_is_xsc3() || !xsc3_l2_present())
+ return 0;
+
+ if (!(get_cr() & CR_L2)) {
+ pr_info("XScale3 L2 cache enabled.\n");
+ adjust_cr(CR_L2, CR_L2);
+ xsc3_l2_inv_all();
+ }
+
+ outer_cache.inv_range = xsc3_l2_inv_range;
+ outer_cache.clean_range = xsc3_l2_clean_range;
+ outer_cache.flush_range = xsc3_l2_flush_range;
+
+ return 0;
+}
+core_initcall(xsc3_l2_init);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e6352946dde..30a69d67d67 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -156,9 +156,9 @@ static int __init check_initrd(struct meminfo *mi)
}
if (initrd_node == -1) {
- printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond "
+ printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
"physical memory - disabling initrd\n",
- phys_initrd_start, end);
+ phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0;
}
#endif
@@ -239,25 +239,33 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
+ /*
+ * Reserve any special node zero regions.
+ */
+ if (node == 0)
+ reserve_node_zero(pgdat);
+
#ifdef CONFIG_BLK_DEV_INITRD
/*
* If the initrd is in this node, reserve its memory.
*/
if (node == initrd_node) {
- reserve_bootmem_node(pgdat, phys_initrd_start,
- phys_initrd_size, BOOTMEM_DEFAULT);
- initrd_start = __phys_to_virt(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
+ int res = reserve_bootmem_node(pgdat, phys_initrd_start,
+ phys_initrd_size, BOOTMEM_EXCLUSIVE);
+
+ if (res == 0) {
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ } else {
+ printk(KERN_ERR
+ "INITRD: 0x%08lx+0x%08lx overlaps in-use "
+ "memory region - disabling initrd\n",
+ phys_initrd_start, phys_initrd_size);
+ }
}
#endif
/*
- * Finally, reserve any node zero regions.
- */
- if (node == 0)
- reserve_node_zero(pgdat);
-
- /*
* initialise the zones within this node.
*/
memset(zone_size, 0, sizeof(zone_size));
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 303a7ff6bfd..b81dbf9ffb7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -259,7 +259,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
* caller shouldn't need to know that small detail.
*
* 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
+ * mapping. See <asm/pgtable.h> for more information.
*/
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index d64f8e6f75a..eda733d3045 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -231,7 +231,7 @@ cpu_arm720_name:
.align
/*
- * See linux/include/asm-arm/procinfo.h for a definition of this structure.
+ * See <asm/procinfo.h> for a definition of this structure.
*/
.section ".proc.info.init", #alloc, #execinstr
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 3533741a76f..6ff53c24510 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -52,11 +52,6 @@
#define CACHESIZE 32768
/*
- * Run with L2 enabled.
- */
-#define L2_CACHE_ENABLE 1
-
-/*
* This macro is used to wait for a CP15 write and is needed when we
* have to ensure that the last operation to the coprocessor was
* completed before continuing with operation.
@@ -265,12 +260,9 @@ ENTRY(xsc3_dma_inv_range)
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
- mcrne p15, 1, r0, c7, c11, 1 @ clean L2 line
tst r1, #CACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line
- mcrne p15, 1, r1, c7, c11, 1 @ clean L2 line
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line
- mcr p15, 1, r0, c7, c7, 1 @ invalidate L2 line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
@@ -288,7 +280,6 @@ ENTRY(xsc3_dma_inv_range)
ENTRY(xsc3_dma_clean_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
- mcr p15, 1, r0, c7, c11, 1 @ clean L2 line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
@@ -306,8 +297,6 @@ ENTRY(xsc3_dma_clean_range)
ENTRY(xsc3_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
- mcr p15, 1, r0, c7, c11, 1 @ clean L2 line
- mcr p15, 1, r0, c7, c7, 1 @ invalidate L2 line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
@@ -347,9 +336,7 @@ ENTRY(cpu_xsc3_switch_mm)
mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
-#ifdef L2_CACHE_ENABLE
orr r0, r0, #0x18 @ cache the page table in L2
-#endif
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
cpwait_ret lr, ip
@@ -378,12 +365,10 @@ ENTRY(cpu_xsc3_set_pte_ext)
orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
@ combined with user -> user r/w
-#if L2_CACHE_ENABLE
@ If it's cacheable, it needs to be in L2 also.
eor ip, r1, #L_PTE_CACHEABLE
tst ip, #L_PTE_CACHEABLE
orreq r2, r2, #PTE_EXT_TEX(0x5)
-#endif
tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
movne r2, #0 @ no -> fault
@@ -408,9 +393,7 @@ __xsc3_setup:
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
-#if L2_CACHE_ENABLE
orr r4, r4, #0x18 @ cache the page table in L2
-#endif
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
mov r0, #0 @ don't allow CP access
@@ -418,9 +401,7 @@ __xsc3_setup:
mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
and r0, r0, #2 @ preserve bit P bit setting
-#if L2_CACHE_ENABLE
orr r0, r0, #(1 << 10) @ enable L2 for LLR cache
-#endif
mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
adr r5, xsc3_crval
@@ -429,9 +410,6 @@ __xsc3_setup:
bic r0, r0, r5 @ ..V. ..R. .... ..A.
orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
@ ...I Z..S .... .... (uc)
-#if L2_CACHE_ENABLE
- orr r0, r0, #0x04000000 @ L2 enable
-#endif
mov pc, lr
.size __xsc3_setup, . - __xsc3_setup