summaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mn10300/mm')
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache.c121
-rw-r--r--arch/mn10300/mm/dma-alloc.c56
-rw-r--r--arch/mn10300/mm/extable.c26
-rw-r--r--arch/mn10300/mm/fault.c405
-rw-r--r--arch/mn10300/mm/init.c160
-rw-r--r--arch/mn10300/mm/misalignment.c661
-rw-r--r--arch/mn10300/mm/mmu-context.c80
-rw-r--r--arch/mn10300/mm/pgtable.c197
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S207
12 files changed, 2408 insertions, 0 deletions
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
new file mode 100644
index 00000000000..28b9d983db0
--- /dev/null
+++ b/arch/mn10300/mm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the MN10300-specific memory management code
+#
+
+obj-y := \
+ init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
+ misalignment.o dma-alloc.o
+
+ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+obj-y += cache.o cache-mn10300.o
+ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
+obj-y += cache-flush-mn10300.o
+endif
+endif
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
new file mode 100644
index 00000000000..c8ed1cbac10
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-mn10300.S
@@ -0,0 +1,192 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+
+ .am33_2
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+###############################################################################
+#
+# void mn10300_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_flush_end
+
+ # read the addresses tagged in the cache's tag RAM and attempt to flush
+ # those addresses specifically
+ # - we rely on the hardware to filter out invalid tag entry addresses
+ mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_dcache_flush_loop:
+ mov (a0),d0
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
+ # cache
+ mov d0,(a1) # conditional purge
+
+mn10300_dcache_flush_skip:
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_dcache_flush_loop
+
+mn10300_dcache_flush_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_flush_page(unsigned start)
+# void mn10300_dcache_flush_range(unsigned start, unsigned end)
+# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush_page:
+ mov PAGE_SIZE,d1
+mn10300_dcache_flush_range2:
+ add d0,d1
+mn10300_dcache_flush_range:
+ movm [d2,d3],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_dcache_flush_range_end
+
+ # round start addr down
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush all instances of an address from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
+ # cache
+
+mn10300_dcache_flush_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_dcache_flush_range_loop
+
+mn10300_dcache_flush_range_end:
+ ret [d2,d3],8
+
+###############################################################################
+#
+# void mn10300_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_flush_inv_end
+
+ # hit each line in the dcache with an unconditional purge
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_dcache_flush_inv_loop:
+ mov (a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_flush_inv_page(unsigned start)
+# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
+# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush_inv_page:
+ mov PAGE_SIZE,d1
+mn10300_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_dcache_flush_inv_range:
+ movm [d2,d3],(sp)
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_dcache_flush_inv_range_end
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush and invalidate all instances of an address
+ # from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+mn10300_dcache_flush_inv_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # in all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_dcache_flush_inv_range_loop
+
+mn10300_dcache_flush_inv_range_end:
+ ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
new file mode 100644
index 00000000000..e839d0aedd6
--- /dev/null
+++ b/arch/mn10300/mm/cache-mn10300.S
@@ -0,0 +1,289 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+
+#define mn10300_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+ .globl mn10300_icache_inv
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+ .globl mn10300_dcache_inv_page
+
+###############################################################################
+#
+# void mn10300_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+mn10300_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_icache_inv_end
+
+ mov epsw,d1
+ and ~EPSW_IE,epsw
+ nop
+ nop
+
+ # disable the icache
+ and ~CHCTR_ICEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_ICINV,d0
+ or CHCTR_ICEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ mov d1,epsw
+
+mn10300_icache_inv_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_inv_end
+
+ mov epsw,d1
+ and ~EPSW_IE,epsw
+ nop
+ nop
+
+ # disable the dcache
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_DCINV,d0
+ or CHCTR_DCEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ mov d1,epsw
+
+mn10300_dcache_inv_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_inv_range(unsigned start, unsigned end)
+# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
+# void mn10300_dcache_inv_page(unsigned start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_inv_page:
+ mov PAGE_SIZE,d1
+mn10300_dcache_inv_range2:
+ add d0,d1
+mn10300_dcache_inv_range:
+ movm [d2,d3,a2],(sp)
+ mov CHCTR,a2
+
+ movhu (a2),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_dcache_inv_range_end
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ clr d2 # we're going to clear tag ram
+ # entries
+
+ # read the tags from the tag RAM, and if they indicate a valid dirty
+ # cache line then invalidate that line
+ mov DCACHE_TAG(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache tag RAM
+ # access address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
+
+mn10300_dcache_inv_range_outer_loop:
+ # disable interrupts
+ mov epsw,d3
+ and ~EPSW_IE,epsw
+ nop # note that reading CHCTR and
+ # AND'ing D0 occupy two delay
+ # slots after disabling
+ # interrupts
+
+ # disable the dcache
+ movhu (a2),d0
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a2)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+mn10300_dcache_inv_range_loop:
+
+ # process the way 0 slot
+ mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_0:
+
+ # process the way 1 slot
+ mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_1:
+
+ # process the way 2 slot
+ mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_2:
+
+ # process the way 3 slot
+ mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_3:
+
+ # approx every N steps we re-enable the cache and see if there are any
+ # interrupts to be processed
+ # we also break out if we've reached the end of the loop
+ # (the bottom nibble of the count is zero in both cases)
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ btst mn10300_dcache_inv_range_intr_interval,d1
+ bne mn10300_dcache_inv_range_loop
+
+ # wait for the cache to finish what it's doing
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ or CHCTR_DCEN,d0
+ movhu d0,(a2)
+ movhu (a2),d0
+
+ # re-enable interrupts
+ # - we don't bother with delay NOPs as we'll have enough instructions
+ # before we disable interrupts again to give the interrupts a chance
+ # to happen
+ mov d3,epsw
+
+ # go around again if the counter hasn't yet reached zero
+ add 0,d1
+ bne mn10300_dcache_inv_range_outer_loop
+
+mn10300_dcache_inv_range_end:
+ ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
new file mode 100644
index 00000000000..1b76719ec1c
--- /dev/null
+++ b/arch/mn10300/mm/cache.c
@@ -0,0 +1,121 @@
+/* MN10300 Cache flushing routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_dcache_inv);
+EXPORT_SYMBOL(mn10300_dcache_inv_range);
+EXPORT_SYMBOL(mn10300_dcache_inv_range2);
+EXPORT_SYMBOL(mn10300_dcache_inv_page);
+
+#ifdef CONFIG_MN10300_CACHE_WBACK
+EXPORT_SYMBOL(mn10300_dcache_flush);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv_range);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv_page);
+EXPORT_SYMBOL(mn10300_dcache_flush_range);
+EXPORT_SYMBOL(mn10300_dcache_flush_range2);
+EXPORT_SYMBOL(mn10300_dcache_flush_page);
+#endif
+
+/*
+ * write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ mn10300_dcache_flush_page(page_to_phys(page));
+ mn10300_icache_inv();
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/*
+ * write some code we've just written back from the dcache and invalidate the
+ * icache so that we can run that code
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ for (; start < end; start += size) {
+ /* work out how much of the page to flush */
+ off = start & (PAGE_SIZE - 1);
+
+ size = end - start;
+ if (size > PAGE_SIZE - off)
+ size = PAGE_SIZE - off;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ continue;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ continue;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ continue;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ continue;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ continue;
+
+ page = pte_page(pte);
+ if (!page)
+ continue;
+
+ addr = page_to_phys(page);
+
+ /* flush the dcache and invalidate the icache coverage on that
+ * region */
+ mn10300_dcache_flush_range2(addr + off, size);
+ }
+#endif
+
+ mn10300_icache_inv();
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+ if (end < start)
+ return -EINVAL;
+
+ flush_icache_range(start, end);
+ return 0;
+}
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
new file mode 100644
index 00000000000..f3649d8f50e
--- /dev/null
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -0,0 +1,56 @@
+/* MN10300 Dynamic DMA mapping support
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * Derived from: arch/i386/kernel/pci-dma.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int gfp)
+{
+ unsigned long addr;
+ void *ret;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
+ gfp |= GFP_DMA;
+
+ addr = __get_free_pages(gfp, get_order(size));
+ if (!addr)
+ return NULL;
+
+ /* map the coherent memory through the uncached memory window */
+ ret = (void *) (addr | 0x20000000);
+
+ /* fill the memory with obvious rubbish */
+ memset((void *) addr, 0xfb, size);
+
+ /* write back and evict all cache lines covering this region */
+ mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
+
+ *dma_handle = virt_to_bus((void *) addr);
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ unsigned long addr = (unsigned long) vaddr & ~0x20000000;
+
+ free_pages(addr, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/mn10300/mm/extable.c b/arch/mn10300/mm/extable.c
new file mode 100644
index 00000000000..25e5485ab87
--- /dev/null
+++ b/arch/mn10300/mm/extable.c
@@ -0,0 +1,26 @@
+/* MN10300 In-kernel exception handling
+ *
+ * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
new file mode 100644
index 00000000000..78f092ca031
--- /dev/null
+++ b/arch/mn10300/mm/fault.c
@@ -0,0 +1,405 @@
+/* MN10300 MMU Fault handler
+ *
+ * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Modified by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/hardirq.h>
+#include <asm/gdb-stub.h>
+#include <asm/cpu-regs.h>
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out
+ */
+void bust_spinlocks(int yes)
+{
+ if (yes) {
+ oops_in_progress = 1;
+#ifdef CONFIG_SMP
+ /* Many serial drivers do __global_cli() */
+ global_irq_lock = 0;
+#endif
+ } else {
+ int loglevel_save = console_loglevel;
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console
+ * up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
+}
+
+void do_BUG(const char *file, int line)
+{
+ bust_spinlocks(1);
+ printk(KERN_EMERG "------------[ cut here ]------------\n");
+ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
+}
+
+#if 0
+static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgdir + __pgd_offset(address);
+ printk(KERN_DEBUG "pgd entry %p: %016Lx\n",
+ pgd, (long long) pgd_val(*pgd));
+
+ if (!pgd_present(*pgd)) {
+ printk(KERN_DEBUG "... pgd not present!\n");
+ return;
+ }
+ pmd = pmd_offset(pgd, address);
+ printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
+ pmd, (long long)pmd_val(*pmd));
+
+ if (!pmd_present(*pmd)) {
+ printk(KERN_DEBUG "... pmd not present!\n");
+ return;
+ }
+ pte = pte_offset(pmd, address);
+ printk(KERN_DEBUG "pte entry %p: %016Lx\n",
+ pte, (long long) pte_val(*pte));
+
+ if (!pte_present(*pte))
+ printk(KERN_DEBUG "... pte not present!\n");
+}
+#endif
+
+asmlinkage void monitor_signal(struct pt_regs *);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * fault_code:
+ * - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate
+ * - MSW: 0 if data access, 1 if instruction access
+ * - bit 0: TLB miss flag
+ * - bit 1: initial write
+ * - bit 2: page invalid
+ * - bit 3: protection violation
+ * - bit 4: accessor (0=user 1=kernel)
+ * - bit 5: 0=read 1=write
+ * - bit 6-8: page protection spec
+ * - bit 9: illegal address
+ * - bit 16: 0=data 1=ins
+ *
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
+ unsigned long address)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ unsigned long page;
+ siginfo_t info;
+ int write, fault;
+
+#ifdef CONFIG_GDBSTUB
+ /* handle GDB stub causing a fault */
+ if (gdbstub_busy) {
+ gdbstub_exception(regs, TBR & TBR_INT_CODE);
+ return;
+ }
+#endif
+
+#if 0
+ printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n",
+ regs,
+ fault_code & 0x10000 ? "ins" : "data",
+ fault_code & 0xffff, address);
+#endif
+
+ tsk = current;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * and that the fault was a page not present (invalid) error
+ */
+ if (address >= VMALLOC_START && address < VMALLOC_END &&
+ (fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR &&
+ (fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL
+ )
+ goto vmalloc_fault;
+
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
+ /* accessing the stack below the stack pointer is always a
+ * bug */
+ if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) {
+#if 0
+ printk(KERN_WARNING
+ "[%d] ### Access below stack @%lx (sp=%lx)\n",
+ current->pid, address, regs->sp);
+ printk(KERN_WARNING
+ "vma [%08x - %08x]\n",
+ vma->vm_start, vma->vm_end);
+ show_registers(regs);
+ printk(KERN_WARNING
+ "[%d] ### Code: [%08lx]"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ current->pid,
+ regs->pc,
+ ((u8 *) regs->pc)[0],
+ ((u8 *) regs->pc)[1],
+ ((u8 *) regs->pc)[2],
+ ((u8 *) regs->pc)[3],
+ ((u8 *) regs->pc)[4],
+ ((u8 *) regs->pc)[5],
+ ((u8 *) regs->pc)[6],
+ ((u8 *) regs->pc)[7]
+ );
+#endif
+ goto bad_area;
+ }
+ }
+
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) {
+ default: /* 3: write, present */
+ case MMUFCR_xFC_TYPE_WRITE:
+#ifdef TEST_VERIFY_AREA
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
+ printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc);
+#endif
+ /* write to absent page */
+ case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+
+ /* read from protected page */
+ case MMUFCR_xFC_TYPE_READ:
+ goto bad_area;
+
+ /* read from absent page present */
+ case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ:
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ break;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, write);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+ monitor_signal(regs);
+
+ /* User mode accesses just cause a SIGSEGV */
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ monitor_signal(regs);
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT
+ "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT
+ "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n", address);
+ printk(" printing pc:\n");
+ printk(KERN_ALERT "%08lx\n", regs->pc);
+
+#ifdef CONFIG_GDBSTUB
+ gdbstub_intercept(
+ regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR);
+#endif
+
+ page = PTBR;
+ page = ((unsigned long *) __va(page))[address >> 22];
+ printk(KERN_ALERT "*pde = %08lx\n", page);
+ if (page & 1) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+ printk(KERN_ALERT "*pte = %08lx\n", page);
+ }
+
+ die("Oops", regs, fault_code);
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ monitor_signal(regs);
+ printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+ monitor_signal(regs);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
+ goto no_context;
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+
+ pgd = (pgd_t *) PTBR + index;
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+}
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
new file mode 100644
index 00000000000..8c5d88c7b90
--- /dev/null
+++ b/arch/mn10300/mm/init.c
@@ -0,0 +1,160 @@
+/* MN10300 Memory management initialisation
+ *
+ * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Modified by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/tlb.h>
+#include <asm/sections.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+unsigned long highstart_pfn, highend_pfn;
+
+/*
+ * set up paging
+ */
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0,};
+ pte_t *ppte;
+ int loop;
+
+ /* main kernel space -> RAM mapping is handled as 1:1 transparent by
+ * the MMU */
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+ memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));
+
+ /* load the VMALLOC area PTE table addresses into the kernel PGD */
+ ppte = kernel_vmalloc_ptes;
+ for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
+ loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
+ loop++
+ ) {
+ set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
+ ppte += PAGE_SIZE / sizeof(pte_t);
+ }
+
+ /* declare the sizes of the RAM zones (only use the normal zone) */
+ zones_size[ZONE_NORMAL] =
+ (contig_page_data.bdata->node_low_pfn) -
+ (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT);
+
+ /* pass the memory from the bootmem allocator to the main allocator */
+ free_area_init(zones_size);
+
+ __flush_tlb_all();
+}
+
+/*
+ * transfer all the memory from the bootmem allocator to the runtime allocator
+ */
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+ int tmp;
+
+ if (!mem_map)
+ BUG();
+
+#define START_PFN (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT)
+#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
+
+ max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
+ high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages += free_all_bootmem();
+
+ reservedpages = 0;
+ for (tmp = 0; tmp < num_physpages; tmp++)
+ if (PageReserved(&mem_map[tmp]))
+ reservedpages++;
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_stext;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk(KERN_INFO
+ "Memory: %luk/%luk available"
+ " (%dk kernel code, %dk reserved, %dk data, %dk init,"
+ " %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
+ max_mapnr << (PAGE_SHIFT - 10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT - 10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT - 10))
+ );
+}
+
+/*
+ *
+ */
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ memset((void *) addr, 0xcc, PAGE_SIZE);
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+/*
+ * recycle memory containing stuff only required for initialisation
+ */
+void free_initmem(void)
+{
+ free_init_pages("unused kernel memory",
+ (unsigned long) &__init_begin,
+ (unsigned long) &__init_end);
+}
+
+/*
+ * dispose of the memory on which the initial ramdisk resided
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory", start, end);
+}
+#endif
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
new file mode 100644
index 00000000000..32aa89dc384
--- /dev/null
+++ b/arch/mn10300/mm/misalignment.c
@@ -0,0 +1,661 @@
+/* MN10300 Misalignment fixup handler
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/cpu-regs.h>
+#include <asm/busctl-regs.h>
+#include <asm/fpu.h>
+#include <asm/gdb-stub.h>
+#include <asm/asm-offsets.h>
+
+#if 0
+#define kdebug(FMT, ...) printk(KERN_DEBUG FMT, ##__VA_ARGS__)
+#else
+#define kdebug(FMT, ...) do {} while (0)
+#endif
+
+static int misalignment_addr(unsigned long *registers, unsigned params,
+ unsigned opcode, unsigned disp,
+ void **_address, unsigned long **_postinc);
+
+static int misalignment_reg(unsigned long *registers, unsigned params,
+ unsigned opcode, unsigned disp,
+ unsigned long **_register);
+
+static inline unsigned int_log2(unsigned x)
+{
+ unsigned y;
+ asm("bsch %1,%0" : "=r"(y) : "r"(x), "0"(0));
+ return y;
+}
+#define log2(x) int_log2(x)
+
+static const unsigned Dreg_index[] = {
+ REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
+};
+
+static const unsigned Areg_index[] = {
+ REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2
+};
+
+static const unsigned Rreg_index[] = {
+ REG_E0 >> 2, REG_E1 >> 2, REG_E2 >> 2, REG_E3 >> 2,
+ REG_E4 >> 2, REG_E5 >> 2, REG_E6 >> 2, REG_E7 >> 2,
+ REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2,
+ REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
+};
+
+enum format_id {
+ FMT_S0,
+ FMT_S1,
+ FMT_S2,
+ FMT_S4,
+ FMT_D0,
+ FMT_D1,
+ FMT_D2,
+ FMT_D4,
+ FMT_D6,
+ FMT_D7,
+ FMT_D8,
+ FMT_D9,
+};
+
+struct {
+ u_int8_t opsz, dispsz;
+} format_tbl[16] = {
+ [FMT_S0] = { 8, 0 },
+ [FMT_S1] = { 8, 8 },
+ [FMT_S2] = { 8, 16 },
+ [FMT_S4] = { 8, 32 },
+ [FMT_D0] = { 16, 0 },
+ [FMT_D1] = { 16, 8 },
+ [FMT_D2] = { 16, 16 },
+ [FMT_D4] = { 16, 32 },
+ [FMT_D6] = { 24, 0 },
+ [FMT_D7] = { 24, 8 },
+ [FMT_D8] = { 24, 24 },
+ [FMT_D9] = { 24, 32 },
+};
+
+enum value_id {
+ DM0, /* data reg in opcode in bits 0-1 */
+ DM1, /* data reg in opcode in bits 2-3 */
+ DM2, /* data reg in opcode in bits 4-5 */
+ AM0, /* addr reg in opcode in bits 0-1 */
+ AM1, /* addr reg in opcode in bits 2-3 */
+ AM2, /* addr reg in opcode in bits 4-5 */
+ RM0, /* reg in opcode in bits 0-3 */
+ RM1, /* reg in opcode in bits 2-5 */
+ RM2, /* reg in opcode in bits 4-7 */
+ RM4, /* reg in opcode in bits 8-11 */
+ RM6, /* reg in opcode in bits 12-15 */
+
+ RD0, /* reg in displacement in bits 0-3 */
+ RD2, /* reg in displacement in bits 4-7 */
+
+ SP, /* stack pointer */
+
+ SD8, /* 8-bit signed displacement */
+ SD16, /* 16-bit signed displacement */
+ SD24, /* 24-bit signed displacement */
+ SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */
+ SIMM8, /* 8-bit signed immediate */
+ IMM24, /* 24-bit unsigned immediate */
+ IMM32, /* 32-bit unsigned immediate */
+ IMM32_HIGH8, /* 32-bit unsigned immediate, high 8-bits in opcode */
+
+ DN0 = DM0,
+ DN1 = DM1,
+ DN2 = DM2,
+ AN0 = AM0,
+ AN1 = AM1,
+ AN2 = AM2,
+ RN0 = RM0,
+ RN1 = RM1,
+ RN2 = RM2,
+ RN4 = RM4,
+ RN6 = RM6,
+ DI = DM1,
+ RI = RM2,
+
+};
+
+struct mn10300_opcode {
+ const char *name;
+ u_int32_t opcode;
+ u_int32_t opmask;
+ unsigned exclusion;
+
+ enum format_id format;
+
+ unsigned cpu_mask;
+#define AM33 330
+
+ unsigned params[2];
+#define MEM(ADDR) (0x80000000 | (ADDR))
+#define MEM2(ADDR1, ADDR2) (0x80000000 | (ADDR1) << 8 | (ADDR2))
+#define MEMINC(ADDR) (0x81000000 | (ADDR))
+#define MEMINC2(ADDR, INC) (0x81000000 | (ADDR) << 8 | (INC))
+};
+
+/* LIBOPCODES EXCERPT
+ Assemble Matsushita MN10300 instructions.
+ Copyright 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public Licence as published by
+ the Free Software Foundation; either version 2 of the Licence, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public Licence for more details.
+
+ You should have received a copy of the GNU General Public Licence
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*/
+static const struct mn10300_opcode mn10300_opcodes[] = {
+{ "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}},
+{ "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}},
+{ "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}},
+{ "mov", 0xf010, 0xfff0, 0, FMT_D0, 0, {AM1, MEM(AN0)}},
+{ "mov", 0xf300, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
+{ "mov", 0xf340, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
+{ "mov", 0xf380, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), AN2}},
+{ "mov", 0xf3c0, 0xffc0, 0, FMT_D0, 0, {AM2, MEM2(DI, AN0)}},
+{ "mov", 0xf80000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
+{ "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
+{ "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}},
+{ "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}},
+{ "mov", 0xf8f000, 0xfffc00, 0, FMT_D1, AM33, {MEM2(SD8, AM0), SP}},
+{ "mov", 0xf8f400, 0xfffc00, 0, FMT_D1, AM33, {SP, MEM2(SD8, AN0)}},
+{ "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
+{ "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
+{ "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
+{ "mov", 0xf97a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
+{ "mov", 0xfa000000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
+{ "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
+{ "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}},
+{ "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}},
+{ "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
+{ "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
+{ "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
+{ "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
+{ "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
+{ "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
+{ "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
+{ "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
+{ "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}},
+{ "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}},
+{ "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
+{ "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
+{ "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
+{ "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
+{ "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
+{ "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
+{ "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
+{ "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
+
+{ "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}},
+{ "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}},
+{ "movhu", 0xf480, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
+{ "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
+{ "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
+{ "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
+{ "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
+{ "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
+{ "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
+{ "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
+{ "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
+{ "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
+{ "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
+{ "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
+{ "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
+{ "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
+{ "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
+{ "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
+{ "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
+{ "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
+{ "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
+{ "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
+{ "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
+{ "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
+{ "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
+{ "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
+{ "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
+{ "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
+{ 0, 0, 0, 0, 0, 0, {0}},
+};
+
+/*
+ * fix up misalignment problems where possible
+ */
+asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code)
+{
+ const struct exception_table_entry *fixup;
+ const struct mn10300_opcode *pop;
+ unsigned long *registers = (unsigned long *) regs;
+ unsigned long data, *store, *postinc;
+ mm_segment_t seg;
+ siginfo_t info;
+ uint32_t opcode, disp, noc, xo, xm;
+ uint8_t *pc, byte;
+ void *address;
+ unsigned tmp, npop;
+
+ kdebug("MISALIGN at %lx\n", regs->pc);
+
+ if (in_interrupt())
+ die("Misalignment trap in interrupt context", regs, code);
+
+ if (regs->epsw & EPSW_IE)
+ asm volatile("or %0,epsw" : : "i"(EPSW_IE));
+
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+
+ fixup = search_exception_tables(regs->pc);
+
+ /* first thing to do is to match the opcode */
+ pc = (u_int8_t *) regs->pc;
+
+ if (__get_user(byte, pc) != 0)
+ goto fetch_error;
+ opcode = byte;
+ noc = 8;
+
+ for (pop = mn10300_opcodes; pop->name; pop++) {
+ npop = log2(pop->opcode | pop->opmask);
+ if (npop <= 0 || npop > 31)
+ continue;
+ npop = (npop + 8) & ~7;
+
+ got_more_bits:
+ if (npop == noc) {
+ if ((opcode & pop->opmask) == pop->opcode)
+ goto found_opcode;
+ } else if (npop > noc) {
+ xo = pop->opcode >> (npop - noc);
+ xm = pop->opmask >> (npop - noc);
+
+ if ((opcode & xm) != xo)
+ continue;
+
+ /* we've got a partial match (an exact match on the
+ * first N bytes), so we need to get some more data */
+ pc++;
+ if (__get_user(byte, pc) != 0)
+ goto fetch_error;
+ opcode = opcode << 8 | byte;
+ noc += 8;
+ goto got_more_bits;
+ } else {
+ /* there's already been a partial match as long as the
+ * complete match we're now considering, so this one
+ * should't match */
+ continue;
+ }
+ }
+
+ /* didn't manage to find a fixup */
+ if (!user_mode(regs))
+ printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n",
+ regs->pc, opcode);
+
+failed:
+ set_fs(seg);
+ if (die_if_no_fixup("misalignment error", regs, code))
+ return;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void *) regs->pc;
+ force_sig_info(SIGBUS, &info, current);
+ return;
+
+ /* error reading opcodes */
+fetch_error:
+ if (!user_mode(regs))
+ printk(KERN_CRIT
+ "MISALIGN: %p: fault whilst reading instruction data\n",
+ pc);
+ goto failed;
+
+bad_addr_mode:
+ if (!user_mode(regs))
+ printk(KERN_CRIT
+ "MISALIGN: %lx: unsupported addressing mode %x\n",
+ regs->pc, opcode);
+ goto failed;
+
+bad_reg_mode:
+ if (!user_mode(regs))
+ printk(KERN_CRIT
+ "MISALIGN: %lx: unsupported register mode %x\n",
+ regs->pc, opcode);
+ goto failed;
+
+unsupported_instruction:
+ if (!user_mode(regs))
+ printk(KERN_CRIT
+ "MISALIGN: %lx: unsupported instruction %x (%s)\n",
+ regs->pc, opcode, pop->name);
+ goto failed;
+
+transfer_failed:
+ set_fs(seg);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return;
+ }
+ if (die_if_no_fixup("misalignment fixup", regs, code))
+ return;
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = 0;
+ info.si_addr = (void *) regs->pc;
+ force_sig_info(SIGSEGV, &info, current);
+ return;
+
+ /* we matched the opcode */
+found_opcode:
+ kdebug("MISALIGN: %lx: %x==%x { %x, %x }\n",
+ regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
+
+ tmp = format_tbl[pop->format].opsz;
+ if (tmp > noc)
+ BUG(); /* match was less complete than it ought to have been */
+
+ if (tmp < noc) {
+ tmp = noc - tmp;
+ opcode >>= tmp;
+ pc -= tmp >> 3;
+ }
+
+ /* grab the extra displacement (note it's LSB first) */
+ disp = 0;
+ tmp = format_tbl[pop->format].dispsz >> 3;
+ while (tmp > 0) {
+ tmp--;
+ disp <<= 8;
+
+ pc++;
+ if (__get_user(byte, pc) != 0)
+ goto fetch_error;
+ disp |= byte;
+ }
+
+ set_fs(KERNEL_XDS);
+ if (fixup || regs->epsw & EPSW_nSL)
+ set_fs(seg);
+
+ tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000;
+ if (!tmp) {
+ if (!user_mode(regs))
+ printk(KERN_CRIT
+ "MISALIGN: %lx:"
+ " insn not move to/from memory %x\n",
+ regs->pc, opcode);
+ goto failed;
+ }
+
+ if (pop->params[0] & 0x80000000) {
+ /* move memory to register */
+ if (!misalignment_addr(registers, pop->params[0], opcode, disp,
+ &address, &postinc))
+ goto bad_addr_mode;
+
+ if (!misalignment_reg(registers, pop->params[1], opcode, disp,
+ &store))
+ goto bad_reg_mode;
+
+ if (strcmp(pop->name, "mov") == 0) {
+ kdebug("FIXUP: mov (%p),DARn\n", address);
+ if (copy_from_user(&data, (void *) address, 4) != 0)
+ goto transfer_failed;
+ if (pop->params[0] & 0x1000000)
+ *postinc += 4;
+ } else if (strcmp(pop->name, "movhu") == 0) {
+ kdebug("FIXUP: movhu (%p),DARn\n", address);
+ data = 0;
+ if (copy_from_user(&data, (void *) address, 2) != 0)
+ goto transfer_failed;
+ if (pop->params[0] & 0x1000000)
+ *postinc += 2;
+ } else {
+ goto unsupported_instruction;
+ }
+
+ *store = data;
+ } else {
+ /* move register to memory */
+ if (!misalignment_reg(registers, pop->params[0], opcode, disp,
+ &store))
+ goto bad_reg_mode;
+
+ if (!misalignment_addr(registers, pop->params[1], opcode, disp,
+ &address, &postinc))
+ goto bad_addr_mode;
+
+ data = *store;
+
+ if (strcmp(pop->name, "mov") == 0) {
+ kdebug("FIXUP: mov %lx,(%p)\n", data, address);
+ if (copy_to_user((void *) address, &data, 4) != 0)
+ goto transfer_failed;
+ if (pop->params[1] & 0x1000000)
+ *postinc += 4;
+ } else if (strcmp(pop->name, "movhu") == 0) {
+ kdebug("FIXUP: movhu %hx,(%p)\n",
+ (uint16_t) data, address);
+ if (copy_to_user((void *) address, &data, 2) != 0)
+ goto transfer_failed;
+ if (pop->params[1] & 0x1000000)
+ *postinc += 2;
+ } else {
+ goto unsupported_instruction;
+ }
+ }
+
+ tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz;
+ regs->pc += tmp >> 3;
+
+ set_fs(seg);
+ return;
+}
+
+/*
+ * determine the address that was being accessed
+ */
+static int misalignment_addr(unsigned long *registers, unsigned params,
+ unsigned opcode, unsigned disp,
+ void **_address, unsigned long **_postinc)
+{
+ unsigned long *postinc = NULL, address = 0, tmp;
+
+ params &= 0x7fffffff;
+
+ do {
+ switch (params & 0xff) {
+ case DM0:
+ postinc = &registers[Dreg_index[opcode & 0x03]];
+ address += *postinc;
+ break;
+ case DM1:
+ postinc = &registers[Dreg_index[opcode >> 2 & 0x0c]];
+ address += *postinc;
+ break;
+ case DM2:
+ postinc = &registers[Dreg_index[opcode >> 4 & 0x30]];
+ address += *postinc;
+ break;
+ case AM0:
+ postinc = &registers[Areg_index[opcode & 0x03]];
+ address += *postinc;
+ break;
+ case AM1:
+ postinc = &registers[Areg_index[opcode >> 2 & 0x0c]];
+ address += *postinc;
+ break;
+ case AM2:
+ postinc = &registers[Areg_index[opcode >> 4 & 0x30]];
+ address += *postinc;
+ break;
+ case RM0:
+ postinc = &registers[Rreg_index[opcode & 0x0f]];
+ address += *postinc;
+ break;
+ case RM1:
+ postinc = &registers[Rreg_index[opcode >> 2 & 0x0f]];
+ address += *postinc;
+ break;
+ case RM2:
+ postinc = &registers[Rreg_index[opcode >> 4 & 0x0f]];
+ address += *postinc;
+ break;
+ case RM4:
+ postinc = &registers[Rreg_index[opcode >> 8 & 0x0f]];
+ address += *postinc;
+ break;
+ case RM6:
+ postinc = &registers[Rreg_index[opcode >> 12 & 0x0f]];
+ address += *postinc;
+ break;
+ case RD0:
+ postinc = &registers[Rreg_index[disp & 0x0f]];
+ address += *postinc;
+ break;
+ case RD2:
+ postinc = &registers[Rreg_index[disp >> 4 & 0x0f]];
+ address += *postinc;
+ break;
+
+ case SD8:
+ case SIMM8:
+ address += (int32_t) (int8_t) (disp & 0xff);
+ break;
+ case SD16:
+ address += (int32_t) (int16_t) (disp & 0xffff);
+ break;
+ case SD24:
+ tmp = disp << 8;
+ asm("asr 8,%0" : "=r"(tmp) : "0"(tmp));
+ address += tmp;
+ break;
+ case SIMM4_2:
+ tmp = opcode >> 4 & 0x0f;
+ tmp <<= 28;
+ asm("asr 28,%0" : "=r"(tmp) : "0"(tmp));
+ address += tmp;
+ break;
+ case IMM24:
+ address += disp & 0x00ffffff;
+ break;
+ case IMM32:
+ case IMM32_HIGH8:
+ address += disp;
+ break;
+ default:
+ return 0;
+ }
+ } while ((params >>= 8));
+
+ *_address = (void *) address;
+ *_postinc = postinc;
+ return 1;
+}
+
+/*
+ * determine the register that is acting as source/dest
+ */
+static int misalignment_reg(unsigned long *registers, unsigned params,
+ unsigned opcode, unsigned disp,
+ unsigned long **_register)
+{
+ params &= 0x7fffffff;
+
+ if (params & 0xffffff00)
+ return 0;
+
+ switch (params & 0xff) {
+ case DM0:
+ *_register = &registers[Dreg_index[opcode & 0x03]];
+ break;
+ case DM1:
+ *_register = &registers[Dreg_index[opcode >> 2 & 0x03]];
+ break;
+ case DM2:
+ *_register = &registers[Dreg_index[opcode >> 4 & 0x03]];
+ break;
+ case AM0:
+ *_register = &registers[Areg_index[opcode & 0x03]];
+ break;
+ case AM1:
+ *_register = &registers[Areg_index[opcode >> 2 & 0x03]];
+ break;
+ case AM2:
+ *_register = &registers[Areg_index[opcode >> 4 & 0x03]];
+ break;
+ case RM0:
+ *_register = &registers[Rreg_index[opcode & 0x0f]];
+ break;
+ case RM1:
+ *_register = &registers[Rreg_index[opcode >> 2 & 0x0f]];
+ break;
+ case RM2:
+ *_register = &registers[Rreg_index[opcode >> 4 & 0x0f]];
+ break;
+ case RM4:
+ *_register = &registers[Rreg_index[opcode >> 8 & 0x0f]];
+ break;
+ case RM6:
+ *_register = &registers[Rreg_index[opcode >> 12 & 0x0f]];
+ break;
+ case RD0:
+ *_register = &registers[Rreg_index[disp & 0x0f]];
+ break;
+ case RD2:
+ *_register = &registers[Rreg_index[disp >> 4 & 0x0f]];
+ break;
+ case SP:
+ *_register = &registers[REG_SP >> 2];
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
new file mode 100644
index 00000000000..31c9d27a75a
--- /dev/null
+++ b/arch/mn10300/mm/mmu-context.c
@@ -0,0 +1,80 @@
+/* MN10300 MMU context allocation and management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+/*
+ * list of the MMU contexts last allocated on each CPU
+ */
+unsigned long mmu_context_cache[NR_CPUS] = {
+ [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+};
+
+/*
+ * flush the specified TLB entry
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long pteu, cnx, flags;
+
+ addr &= PAGE_MASK;
+
+ /* make sure the context doesn't migrate and defend against
+ * interference from vmalloc'd regions */
+ local_irq_save(flags);
+
+ cnx = mm_context(vma->vm_mm);
+
+ if (cnx != MMU_NO_CONTEXT) {
+ pteu = addr | (cnx & 0x000000ffUL);
+ IPTEU = pteu;
+ DPTEU = pteu;
+ if (IPTEL & xPTEL_V)
+ IPTEL = 0;
+ if (DPTEL & xPTEL_V)
+ DPTEL = 0;
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * preemptively set a TLB entry
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+ unsigned long pteu, ptel, cnx, flags;
+
+ addr &= PAGE_MASK;
+ ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
+
+ /* make sure the context doesn't migrate and defend against
+ * interference from vmalloc'd regions */
+ local_irq_save(flags);
+
+ cnx = mm_context(vma->vm_mm);
+
+ if (cnx != MMU_NO_CONTEXT) {
+ pteu = addr | (cnx & 0x000000ffUL);
+ if (!(pte_val(pte) & _PAGE_NX)) {
+ IPTEU = pteu;
+ if (IPTEL & xPTEL_V)
+ IPTEL = ptel;
+ }
+ DPTEU = pteu;
+ if (DPTEL & xPTEL_V)
+ DPTEL = ptel;
+ }
+
+ local_irq_restore(flags);
+}
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
new file mode 100644
index 00000000000..a477038752b
--- /dev/null
+++ b/arch/mn10300/mm/pgtable.c
@@ -0,0 +1,197 @@
+/* MN10300 Page table management
+ *
+ * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Modified by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/spinlock.h>
+#include <linux/quicklist.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+void show_mem(void)
+{
+ unsigned long i;
+ int free = 0, total = 0, reserved = 0, shared = 0;
+
+ int cached = 0;
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas();
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map + i))
+ reserved++;
+ else if (PageSwapCache(mem_map + i))
+ cached++;
+ else if (!page_count(mem_map + i))
+ free++;
+ else
+ shared += page_count(mem_map + i) - 1;
+ }
+ printk(KERN_INFO "%d pages of RAM\n", total);
+ printk(KERN_INFO "%d free pages\n", free);
+ printk(KERN_INFO "%d reserved pages\n", reserved);
+ printk(KERN_INFO "%d pages shared\n", shared);
+ printk(KERN_INFO "%d pages swap cached\n", cached);
+}
+
+/*
+ * Associate a large virtual page frame with a given physical page frame
+ * and protection flags for that frame. pfn is for the base of the page,
+ * vaddr is what the page gets mapped to - both must be properly aligned.
+ * The pmd must already be instantiated. Assumes PAE mode.
+ */
+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
+ printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
+ return; /* BUG(); */
+ }
+ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
+ printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
+ return; /* BUG(); */
+ }
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ if (pgd_none(*pgd)) {
+ printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
+ return; /* BUG(); */
+ }
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ set_pmd(pmd, pfn_pmd(pfn, flags));
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ if (pte)
+ clear_page(pte);
+ return pte;
+}
+
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *pte;
+
+#ifdef CONFIG_HIGHPTE
+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+#else
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+#endif
+ if (pte)
+ clear_highpage(pte);
+ return pte;
+}
+
+/*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+ * in both cached and uncached pgd's; not needed for PAE since the
+ * kernel pmd is shared. If PAE were not to share the pmd a similar
+ * tactic would be needed. This is essentially codepath-based locking
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+ * If the locking proves to be non-performant, a ticketing scheme with
+ * checks at dup_mmap(), exec(), and other mmlist addition points
+ * could be used. The locking scheme was chosen on the basis of
+ * manfred's recommendations and having no core impact whatsoever.
+ * -- wli
+ */
+DEFINE_SPINLOCK(pgd_lock);
+struct page *pgd_list;
+
+static inline void pgd_list_add(pgd_t *pgd)
+{
+ struct page *page = virt_to_page(pgd);
+ page->index = (unsigned long) pgd_list;
+ if (pgd_list)
+ set_page_private(pgd_list, (unsigned long) &page->index);
+ pgd_list = page;
+ set_page_private(page, (unsigned long) &pgd_list);
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+ struct page *next, **pprev, *page = virt_to_page(pgd);
+ next = (struct page *) page->index;
+ pprev = (struct page **) page_private(page);
+ *pprev = next;
+ if (next)
+ set_page_private(next, (unsigned long) pprev);
+}
+
+void pgd_ctor(void *pgd)
+{
+ unsigned long flags;
+
+ if (PTRS_PER_PMD == 1)
+ spin_lock_irqsave(&pgd_lock, flags);
+
+ memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ if (PTRS_PER_PMD > 1)
+ return;
+
+ pgd_list_add(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+}
+
+/* never called when PTRS_PER_PMD > 1 */
+void pgd_dtor(void *pgd)
+{
+ unsigned long flags; /* can be called from interrupt context */
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ quicklist_free(0, pgd_dtor, pgd);
+}
+
+void __init pgtable_cache_init(void)
+{
+}
+
+void check_pgt_cache(void)
+{
+ quicklist_trim(0, pgd_dtor, 25, 16);
+}
diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S
new file mode 100644
index 00000000000..789208094e9
--- /dev/null
+++ b/arch/mn10300/mm/tlb-mn10300.S
@@ -0,0 +1,207 @@
+###############################################################################
+#
+# TLB loading functions
+#
+# Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+# Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+# Modified by David Howells (dhowells@redhat.com)
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public Licence
+# as published by the Free Software Foundation; either version
+# 2 of the Licence, or (at your option) any later version.
+#
+###############################################################################
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/frame.inc>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+###############################################################################
+#
+# Instruction TLB Miss handler entry point
+#
+###############################################################################
+ .type itlb_miss,@function
+ENTRY(itlb_miss)
+ and ~EPSW_NMID,epsw
+#ifdef CONFIG_GDBSTUB
+ movm [d2,d3,a2],(sp)
+#else
+ or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
+ # register bank
+ nop
+ nop
+ nop
+#endif
+
+ mov (IPTEU),d3
+ mov (PTBR),a2
+ mov d3,d2
+ and 0xffc00000,d2
+ lsr 20,d2
+ mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
+ btst _PAGE_VALID,a2
+ beq itlb_miss_fault # jump if doesn't point anywhere
+
+ and ~(PAGE_SIZE-1),a2
+ mov d3,d2
+ and 0x003ff000,d2
+ lsr 10,d2
+ add d2,a2
+ mov (a2),d2 # get pte from PTD[addr 21..12]
+ btst _PAGE_VALID,d2
+ beq itlb_miss_fault # jump if doesn't point to a page
+ # (might be a swap id)
+ bset _PAGE_ACCESSED,(0,a2)
+ and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+itlb_miss_set:
+ mov d2,(IPTEL) # change the TLB
+#ifdef CONFIG_GDBSTUB
+ movm (sp),[d2,d3,a2]
+#endif
+ rti
+
+itlb_miss_fault:
+ mov _PAGE_VALID,d2 # force address error handler to be
+ # invoked
+ bra itlb_miss_set
+
+ .size itlb_miss, . - itlb_miss
+
+###############################################################################
+#
+# Data TLB Miss handler entry point
+#
+###############################################################################
+ .type dtlb_miss,@function
+ENTRY(dtlb_miss)
+ and ~EPSW_NMID,epsw
+#ifdef CONFIG_GDBSTUB
+ movm [d2,d3,a2],(sp)
+#else
+ or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
+ # register bank
+ nop
+ nop
+ nop
+#endif
+
+ mov (DPTEU),d3
+ mov (PTBR),a2
+ mov d3,d2
+ and 0xffc00000,d2
+ lsr 20,d2
+ mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
+ btst _PAGE_VALID,a2
+ beq dtlb_miss_fault # jump if doesn't point anywhere
+
+ and ~(PAGE_SIZE-1),a2
+ mov d3,d2
+ and 0x003ff000,d2
+ lsr 10,d2
+ add d2,a2
+ mov (a2),d2 # get pte from PTD[addr 21..12]
+ btst _PAGE_VALID,d2
+ beq dtlb_miss_fault # jump if doesn't point to a page
+ # (might be a swap id)
+ bset _PAGE_ACCESSED,(0,a2)
+ and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+dtlb_miss_set:
+ mov d2,(DPTEL) # change the TLB
+#ifdef CONFIG_GDBSTUB
+ movm (sp),[d2,d3,a2]
+#endif
+ rti
+
+dtlb_miss_fault:
+ mov _PAGE_VALID,d2 # force address error handler to be
+ # invoked
+ bra dtlb_miss_set
+ .size dtlb_miss, . - dtlb_miss
+
+###############################################################################
+#
+# Instruction TLB Address Error handler entry point
+#
+###############################################################################
+ .type itlb_aerror,@function
+ENTRY(itlb_aerror)
+ and ~EPSW_NMID,epsw
+ add -4,sp
+ SAVE_ALL
+ add -4,sp # need to pass three params
+
+ # calculate the fault code
+ movhu (MMUFCR_IFC),d1
+ or 0x00010000,d1 # it's an instruction fetch
+
+ # determine the page address
+ mov (IPTEU),a2
+ mov a2,d0
+ and PAGE_MASK,d0
+ mov d0,(12,sp)
+
+ clr d0
+ mov d0,(IPTEL)
+
+ and ~EPSW_NMID,epsw
+ or EPSW_IE,epsw
+ mov fp,d0
+ call do_page_fault[],0 # do_page_fault(regs,code,addr
+
+ jmp ret_from_exception
+ .size itlb_aerror, . - itlb_aerror
+
+###############################################################################
+#
+# Data TLB Address Error handler entry point
+#
+###############################################################################
+ .type dtlb_aerror,@function
+ENTRY(dtlb_aerror)
+ and ~EPSW_NMID,epsw
+ add -4,sp
+ mov d1,(sp)
+
+ movhu (MMUFCR_DFC),d1 # is it the initial valid write
+ # to this page?
+ and MMUFCR_xFC_INITWR,d1
+ beq dtlb_pagefault # jump if not
+
+ mov (DPTEL),d1 # set the dirty bit
+ # (don't replace with BSET!)
+ or _PAGE_DIRTY,d1
+ mov d1,(DPTEL)
+ mov (sp),d1
+ add 4,sp
+ rti
+
+ ALIGN
+dtlb_pagefault:
+ mov (sp),d1
+ SAVE_ALL
+ add -4,sp # need to pass three params
+
+ # calculate the fault code
+ movhu (MMUFCR_DFC),d1
+
+ # determine the page address
+ mov (DPTEU),a2
+ mov a2,d0
+ and PAGE_MASK,d0
+ mov d0,(12,sp)
+
+ clr d0
+ mov d0,(DPTEL)
+
+ and ~EPSW_NMID,epsw
+ or EPSW_IE,epsw
+ mov fp,d0
+ call do_page_fault[],0 # do_page_fault(regs,code,addr
+
+ jmp ret_from_exception
+ .size dtlb_aerror, . - dtlb_aerror