summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/mm')
-rw-r--r--arch/microblaze/mm/Makefile2
-rw-r--r--arch/microblaze/mm/consistent.c246
-rw-r--r--arch/microblaze/mm/fault.c24
-rw-r--r--arch/microblaze/mm/init.c44
-rw-r--r--arch/microblaze/mm/pgtable.c4
5 files changed, 289 insertions, 31 deletions
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index 6c8a924d9e2..09c49ed8723 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -2,6 +2,6 @@
# Makefile
#
-obj-y := init.o
+obj-y := consistent.o init.o
obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
new file mode 100644
index 00000000000..a9b443e3fb9
--- /dev/null
+++ b/arch/microblaze/mm/consistent.c
@@ -0,0 +1,246 @@
+/*
+ * Microblaze support for cache consistent memory.
+ * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2010 PetaLogix
+ * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
+ *
+ * Based on PowerPC version derived from arch/arm/mm/consistent.c
+ * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <asm/pgalloc.h>
+#include <linux/io.h>
+#include <linux/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/cpuinfo.h>
+
+#ifndef CONFIG_MMU
+
+/* I have to use dcache values because I can't relate on ram size */
+#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
+
+/*
+ * Consistent memory allocators. Used for DMA devices that want to
+ * share uncached memory with the processor core.
+ * My crufty no-MMU approach is simple. In the HW platform we can optionally
+ * mirror the DDR up above the processor cacheable region. So, memory accessed
+ * in this mirror region will not be cached. It's alloced from the same
+ * pool as normal memory, but the handle we return is shifted up into the
+ * uncached region. This will no doubt cause big problems if memory allocated
+ * here is not also freed properly. -- JW
+ */
+void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
+{
+ struct page *page, *end, *free;
+ unsigned long order;
+ void *ret, *virt;
+
+ if (in_interrupt())
+ BUG();
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ goto no_page;
+
+ /* We could do with a page_to_phys and page_to_bus here. */
+ virt = page_address(page);
+ ret = ioremap(virt_to_phys(virt), size);
+ if (!ret)
+ goto no_remap;
+
+ /*
+ * Here's the magic! Note if the uncached shadow is not implemented,
+ * it's up to the calling code to also test that condition and make
+ * other arranegments, such as manually flushing the cache and so on.
+ */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
+#endif
+ /* dma_handle is same as physical (shadowed) address */
+ *dma_handle = (dma_addr_t)ret;
+
+ /*
+ * free wasted pages. We skip the first page since we know
+ * that it will have count = 1 and won't require freeing.
+ * We also mark the pages in use as reserved so that
+ * remap_page_range works.
+ */
+ page = virt_to_page(virt);
+ free = page + (size >> PAGE_SHIFT);
+ end = page + (1 << order);
+
+ for (; page < end; page++) {
+ init_page_count(page);
+ if (page >= free)
+ __free_page(page);
+ else
+ SetPageReserved(page);
+ }
+
+ return ret;
+no_remap:
+ __free_pages(page, order);
+no_page:
+ return NULL;
+}
+
+#else
+
+void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
+{
+ int order, err, i;
+ unsigned long page, va, flags;
+ phys_addr_t pa;
+ struct vm_struct *area;
+ void *ret;
+
+ if (in_interrupt())
+ BUG();
+
+ /* Only allocate page size areas. */
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = __get_free_pages(gfp, order);
+ if (!page) {
+ BUG();
+ return NULL;
+ }
+
+ /*
+ * we need to ensure that there are no cachelines in use,
+ * or worse dirty in this area.
+ */
+ flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size);
+
+ /* Allocate some common virtual space to map the new pages. */
+ area = get_vm_area(size, VM_ALLOC);
+ if (area == NULL) {
+ free_pages(page, order);
+ return NULL;
+ }
+ va = (unsigned long) area->addr;
+ ret = (void *)va;
+
+ /* This gives us the real physical address of the first page. */
+ *dma_handle = pa = virt_to_bus((void *)page);
+
+ /* MS: This is the whole magic - use cache inhibit pages */
+ flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
+
+ /*
+ * Set refcount=1 on all pages in an order>0
+ * allocation so that vfree() will actually
+ * free all pages that were allocated.
+ */
+ if (order > 0) {
+ struct page *rpage = virt_to_page(page);
+ for (i = 1; i < (1 << order); i++)
+ init_page_count(rpage+i);
+ }
+
+ err = 0;
+ for (i = 0; i < size && err == 0; i += PAGE_SIZE)
+ err = map_page(va+i, pa+i, flags);
+
+ if (err) {
+ vfree((void *)va);
+ return NULL;
+ }
+
+ return ret;
+}
+#endif /* CONFIG_MMU */
+EXPORT_SYMBOL(consistent_alloc);
+
+/*
+ * free page(s) as defined by the above mapping.
+ */
+void consistent_free(void *vaddr)
+{
+ if (in_interrupt())
+ BUG();
+
+ /* Clear SHADOW_MASK bit in address, and free as per usual */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
+#endif
+ vfree(vaddr);
+}
+EXPORT_SYMBOL(consistent_free);
+
+/*
+ * make an area consistent.
+ */
+void consistent_sync(void *vaddr, size_t size, int direction)
+{
+ unsigned long start;
+ unsigned long end;
+
+ start = (unsigned long)vaddr;
+
+ /* Convert start address back down to unshadowed memory region */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ start &= ~UNCACHED_SHADOW_MASK;
+#endif
+ end = start + size;
+
+ switch (direction) {
+ case PCI_DMA_NONE:
+ BUG();
+ case PCI_DMA_FROMDEVICE: /* invalidate only */
+ flush_dcache_range(start, end);
+ break;
+ case PCI_DMA_TODEVICE: /* writeback only */
+ flush_dcache_range(start, end);
+ break;
+ case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ flush_dcache_range(start, end);
+ break;
+ }
+}
+EXPORT_SYMBOL(consistent_sync);
+
+/*
+ * consistent_sync_page makes memory consistent. identical
+ * to consistent_sync, but takes a struct page instead of a
+ * virtual address
+ */
+void consistent_sync_page(struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ unsigned long start = (unsigned long)page_address(page) + offset;
+ consistent_sync((void *)start, size, direction);
+}
+EXPORT_SYMBOL(consistent_sync_page);
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d9d249a66ff..7af87f4b2c2 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
regs->esr = error_code;
/* On a kernel SLB miss we can only check for a valid exception entry */
- if (kernel_mode(regs) && (address >= TASK_SIZE)) {
+ if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
printk(KERN_WARNING "kernel task_size exceed");
_exception(SIGSEGV, regs, code, address);
}
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif /* CONFIG_KGDB */
- if (in_atomic() || !mm) {
+ if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs))
goto bad_area_nosemaphore;
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
vma = find_vma(mm, address);
- if (!vma)
+ if (unlikely(!vma))
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
goto bad_area;
- if (!is_write)
+ if (unlikely(!is_write))
goto bad_area;
/*
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
- if (address + 0x100000 < vma->vm_end) {
+ if (unlikely(address + 0x100000 < vma->vm_end)) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@ good_area:
code = SEGV_ACCERR;
/* a write */
- if (is_write) {
- if (!(vma->vm_flags & VM_WRITE))
+ if (unlikely(is_write)) {
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
goto bad_area;
/* a read */
} else {
/* protection fault */
- if (error_code & 0x08000000)
+ if (unlikely(error_code & 0x08000000))
goto bad_area;
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
goto bad_area;
}
@@ -235,7 +235,7 @@ survive:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (unlikely(fault & VM_FAULT_MAJOR))
current->maj_flt++;
else
current->min_flt++;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a57cedf3671..40bc10ede09 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -23,6 +23,9 @@
#include <asm/sections.h>
#include <asm/tlb.h>
+/* Use for MMU and noMMU because of PCI generic code */
+int mem_init_done;
+
#ifndef CONFIG_MMU
unsigned int __page_offset;
EXPORT_SYMBOL(__page_offset);
@@ -30,7 +33,6 @@ EXPORT_SYMBOL(__page_offset);
#else
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-int mem_init_done;
static int init_bootmem_done;
#endif /* CONFIG_MMU */
@@ -163,7 +165,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)addr, 0xcc, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
@@ -193,12 +194,6 @@ void free_initmem(void)
(unsigned long)(&__init_end));
}
-/* FIXME from arch/powerpc/mm/mem.c*/
-void show_mem(void)
-{
- printk(KERN_NOTICE "%s\n", __func__);
-}
-
void __init mem_init(void)
{
high_memory = (void *)__va(memory_end);
@@ -208,20 +203,14 @@ void __init mem_init(void)
printk(KERN_INFO "Memory: %luk/%luk available\n",
nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10));
-#ifdef CONFIG_MMU
mem_init_done = 1;
-#endif
}
#ifndef CONFIG_MMU
-/* Check against bounds of physical memory */
-int ___range_ok(unsigned long addr, unsigned long size)
+int page_is_ram(unsigned long pfn)
{
- return ((addr < memory_start) ||
- ((addr + size) > memory_end));
+ return __range_ok(pfn, 0);
}
-EXPORT_SYMBOL(___range_ok);
-
#else
int page_is_ram(unsigned long pfn)
{
@@ -349,4 +338,27 @@ void __init *early_get_page(void)
}
return p;
}
+
#endif /* CONFIG_MMU */
+
+void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
+{
+ if (mem_init_done)
+ return kmalloc(size, mask);
+ else
+ return alloc_bootmem(size);
+}
+
+void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
+{
+ void *p;
+
+ if (mem_init_done)
+ p = kzalloc(size, mask);
+ else {
+ p = alloc_bootmem(size);
+ if (p)
+ memset(p, 0, size);
+ }
+ return p;
+}
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 2820081b21a..d31312cde6e 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -103,7 +103,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
area = get_vm_area(size, VM_IOREMAP);
if (area == NULL)
return NULL;
- v = VMALLOC_VMADDR(area->addr);
+ v = (unsigned long) area->addr;
} else {
v = (ioremap_bot -= size);
}
@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
err = 0;
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
- if (mem_init_done)
+ if (unlikely(mem_init_done))
flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */
}