summaryrefslogtreecommitdiffstats
path: root/arch/m68k/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r--arch/m68k/mm/Makefile8
-rw-r--r--arch/m68k/mm/cache.c30
-rw-r--r--arch/m68k/mm/init_mm.c36
-rw-r--r--arch/m68k/mm/kmap.c3
-rw-r--r--arch/m68k/mm/mcfmmu.c199
-rw-r--r--arch/m68k/mm/memory.c8
6 files changed, 271 insertions, 13 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index 09cadf1058d..cfbf3205724 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -4,6 +4,8 @@
obj-y := init.o
-obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o
-obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
-obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
+obj-$(CONFIG_MMU) += cache.o fault.o
+obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
+obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o
+obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
+
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index 5437fff5fe0..3d84c1f2ffb 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -52,9 +52,9 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
unsigned long *descaddr;
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
- "pmove %%psr,%1@"
- : "=a&" (descaddr)
- : "a" (&mmusr), "a" (vaddr), "d" (get_fs().seg));
+ "pmove %%psr,%1"
+ : "=a&" (descaddr), "=m" (mmusr)
+ : "a" (vaddr), "d" (get_fs().seg));
if (mmusr & (MMU_I|MMU_B|MMU_L))
return 0;
descaddr = phys_to_virt((unsigned long)descaddr);
@@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
void flush_icache_range(unsigned long address, unsigned long endaddr)
{
-
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ unsigned long start, end;
+ start = address & ICACHE_SET_MASK;
+ end = endaddr & ICACHE_SET_MASK;
+ if (start > end) {
+ flush_cf_icache(0, end);
+ end = ICACHE_MAX_ADDR;
+ }
+ flush_cf_icache(start, end);
+ } else if (CPU_IS_040_OR_060) {
address &= PAGE_MASK;
do {
@@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ unsigned long start, end;
+ start = addr & ICACHE_SET_MASK;
+ end = (addr + len) & ICACHE_SET_MASK;
+ if (start > end) {
+ flush_cf_icache(0, end);
+ end = ICACHE_MAX_ADDR;
+ }
+ flush_cf_icache(start, end);
+
+ } else if (CPU_IS_040_OR_060) {
asm volatile ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index bbe525434cc..89f3b203814 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -24,6 +24,7 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
+#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/io.h>
#ifdef CONFIG_ATARI
@@ -75,6 +76,38 @@ extern void init_pointer_table(unsigned long ptable);
extern pmd_t *zero_pgtable;
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+#define VECTORS &vectors[0]
+#else
+#define VECTORS _ramvec
+#endif
+
+void __init print_memmap(void)
+{
+#define UL(x) ((unsigned long) (x))
+#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
+#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
+
+ pr_notice("Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
+ " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
+ MLK(VECTORS, VECTORS + 256),
+ MLM(KMAP_START, KMAP_END),
+ MLM(VMALLOC_START, VMALLOC_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_stext, _etext),
+ MLK_ROUNDUP(_sdata, _edata),
+ MLK_ROUNDUP(_sbss, _ebss));
+}
+
void __init mem_init(void)
{
pg_data_t *pgdat;
@@ -106,7 +139,7 @@ void __init mem_init(void)
}
}
-#ifndef CONFIG_SUN3
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -125,6 +158,7 @@ void __init mem_init(void)
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10));
+ print_memmap();
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 69345849454..1cc2bed4c3d 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -171,7 +171,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
break;
}
} else {
- physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
+ _PAGE_DIRTY | _PAGE_READWRITE);
switch (cacheflag) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
new file mode 100644
index 00000000000..875b800ef0d
--- /dev/null
+++ b/arch/m68k/mm/mcfmmu.c
@@ -0,0 +1,199 @@
+/*
+ * Based upon linux/arch/m68k/mm/sun3mmu.c
+ * Based upon linux/arch/ppc/mm/mmu_context.c
+ *
+ * Implementations of mm routines specific to the Coldfire MMU.
+ *
+ * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/mcf_pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern unsigned long num_pages;
+
+void free_initmem(void)
+{
+}
+
+/*
+ * ColdFire paging_init derived from sun3.
+ */
+void __init paging_init(void)
+{
+ pgd_t *pg_dir;
+ pte_t *pg_table;
+ unsigned long address, size;
+ unsigned long next_pgtable, bootmem_end;
+ unsigned long zones_size[MAX_NR_ZONES];
+ enum zone_type zone;
+ int i;
+
+ empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+ memset((void *) empty_zero_page, 0, PAGE_SIZE);
+
+ pg_dir = swapper_pg_dir;
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+ size = num_pages * sizeof(pte_t);
+ size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+ next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+
+ bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
+ pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+
+ address = PAGE_OFFSET;
+ while (address < (unsigned long)high_memory) {
+ pg_table = (pte_t *) next_pgtable;
+ next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
+ pgd_val(*pg_dir) = (unsigned long) pg_table;
+ pg_dir++;
+
+ /* now change pg_table to kernel virtual addresses */
+ for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
+ pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ if (address >= (unsigned long) high_memory)
+ pte_val(pte) = 0;
+
+ set_pte(pg_table, pte);
+ address += PAGE_SIZE;
+ }
+ }
+
+ current->mm = NULL;
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ zones_size[zone] = 0x0;
+ zones_size[ZONE_DMA] = num_pages;
+ free_area_init(zones_size);
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
+{
+ unsigned long flags, mmuar, mmutr;
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int asid;
+
+ local_irq_save(flags);
+
+ mmuar = (dtlb) ? mmu_read(MMUAR) :
+ regs->pc + (extension_word * sizeof(long));
+
+ mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
+ if (!mm) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pgd = pgd_offset(mm, mmuar);
+ if (pgd_none(*pgd)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pmd = pmd_offset(pgd, mmuar);
+ if (pmd_none(*pmd)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
+ : pte_offset_map(pmd, mmuar);
+ if (pte_none(*pte) || !pte_present(*pte)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ if (write) {
+ if (!pte_write(*pte)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+ set_pte(pte, pte_mkdirty(*pte));
+ }
+
+ set_pte(pte, pte_mkyoung(*pte));
+ asid = mm->context & 0xff;
+ if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
+ set_pte(pte, pte_wrprotect(*pte));
+
+ mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
+ if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
+ mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
+ mmu_write(MMUTR, mmutr);
+
+ mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+ ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+ if (dtlb)
+ mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+ else
+ mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * Initialize the context management stuff.
+ * The following was taken from arch/ppc/mmu_context.c
+ */
+void __init mmu_context_init(void)
+{
+ /*
+ * Some processors have too few contexts to reserve one for
+ * init_mm, and require using context 0 for a normal task.
+ * Other processors reserve the use of context zero for the kernel.
+ * This code assumes FIRST_CONTEXT < 32.
+ */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+ atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+}
+
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP. If they do then thicfpgalloc.hs will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ * -- paulus
+ */
+void steal_context(void)
+{
+ struct mm_struct *mm;
+ /*
+ * free up context `next_mmu_context'
+ * if we shouldn't free context 0, don't...
+ */
+ if (next_mmu_context < FIRST_CONTEXT)
+ next_mmu_context = FIRST_CONTEXT;
+ mm = context_mm[next_mmu_context];
+ flush_tlb_mm(mm);
+ destroy_context(mm);
+}
+
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 34c77ce24fb..a5dbb74fe1d 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -203,7 +203,9 @@ static inline void pushcl040(unsigned long paddr)
void cache_clear (unsigned long paddr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ flush_cf_bcache(0, DCACHE_MAX_ADDR);
+ } else if (CPU_IS_040_OR_060) {
int tmp;
/*
@@ -250,7 +252,9 @@ EXPORT_SYMBOL(cache_clear);
void cache_push (unsigned long paddr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ flush_cf_bcache(0, DCACHE_MAX_ADDR);
+ } else if (CPU_IS_040_OR_060) {
int tmp = PAGE_SIZE;
/*