diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 31 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 8 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_32.c | 14 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_64.c | 266 | ||||
-rw-r--r-- | arch/sh/mm/mmap.c | 136 |
6 files changed, 261 insertions, 197 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index d4079cab2d5..2795618e4f0 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -21,6 +21,29 @@ config PAGE_OFFSET default "0x20000000" if MMU && SUPERH64 default "0x00000000" +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + range 9 64 if PAGE_SIZE_16KB + default "9" if PAGE_SIZE_16KB + range 7 64 if PAGE_SIZE_64KB + default "7" if PAGE_SIZE_64KB + range 11 64 + default "14" if !MMU + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + The page size is not necessarily 4KB. Keep this in mind when + choosing a value for this option. + config MEMORY_START hex "Physical memory start address" default "0x08000000" @@ -201,14 +224,6 @@ config PAGE_SIZE_64KB endchoice -config ENTRY_OFFSET - hex - default "0x00001000" if PAGE_SIZE_4KB - default "0x00002000" if PAGE_SIZE_8KB - default "0x00004000" if PAGE_SIZE_16KB - default "0x00010000" if PAGE_SIZE_64KB - default "0x00000000" - choice prompt "HugeTLB page size" depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 9e277ec7d53..86762092508 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -60,7 +60,7 @@ static inline void sh64_teardown_dtlb_cache_slot(void) static inline void sh64_icache_inv_all(void) { unsigned long long addr, flag, data; - unsigned int flags; + unsigned long flags; addr = ICCR0; flag = ICCR0_ICI; @@ -172,7 +172,7 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, unsigned long eaddr; unsigned long after_last_page_start; unsigned long mm_asid, current_asid; - unsigned long long flags = 0ULL; + unsigned long flags = 0; mm_asid = cpu_asid(smp_processor_id(), mm); current_asid = get_asid(); @@ -236,7 +236,7 @@ static void sh64_icache_inv_user_small_range(struct mm_struct *mm, unsigned long long eaddr = start; unsigned long long eaddr_end = start + len; unsigned long current_asid, mm_asid; - unsigned long long flags; + unsigned long flags; unsigned long long epage_start; /* @@ -342,7 +342,7 @@ static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) * alloco is a NOP if the cache is write-through. */ if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) - ctrl_inb(eaddr); + __raw_readb((unsigned long)eaddr); } } diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 3edf297c829..ee8e6bbe882 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -184,7 +184,6 @@ void __init paging_init(void) } static struct kcore_list kcore_mem, kcore_vmalloc; -int after_bootmem = 0; void __init mem_init(void) { @@ -217,8 +216,6 @@ void __init mem_init(void) memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); - after_bootmem = 1; - codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 60cc486d2c2..da2f4186f2c 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -46,17 +46,15 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return NULL; /* - * If we're on an SH7751 or SH7780 PCI controller, PCI memory is - * mapped at the end of the address space (typically 0xfd000000) - * in a non-translatable area, so mapping through page tables for - * this area is not only pointless, but also fundamentally - * broken. Just return the physical address instead. + * If we're in the fixed PCI memory range, mapping through page + * tables is not only pointless, but also fundamentally broken. + * Just return the physical address instead. * * For boards that map a small PCI memory aperture somewhere in * P1/P2 space, ioremap() will already do the right thing, * and we'll never get this far. */ - if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) + if (is_pci_memory_fixed_range(phys_addr, size)) return (void __iomem *)phys_addr; #if !defined(CONFIG_PMB_FIXED) @@ -121,7 +119,9 @@ void __iounmap(void __iomem *addr) unsigned long seg = PXSEG(vaddr); struct vm_struct *p; - if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) + if (seg < P3SEG || vaddr >= P3_ADDR_MAX) + return; + if (is_pci_memory_fixed_range(vaddr, 0)) return; #ifdef CONFIG_PMB diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c index 31e1bb5effb..828c8597219 100644 --- a/arch/sh/mm/ioremap_64.c +++ b/arch/sh/mm/ioremap_64.c @@ -20,6 +20,7 @@ #include <linux/io.h> #include <linux/bootmem.h> #include <linux/proc_fs.h> +#include <linux/slab.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/addrspace.h> @@ -27,88 +28,17 @@ #include <asm/tlbflush.h> #include <asm/mmu.h> -static void shmedia_mapioaddr(unsigned long, unsigned long); -static unsigned long shmedia_ioremap(struct resource *, u32, int); - -/* - * Generic mapping function (not visible outside): - */ - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void *__ioremap(unsigned long phys_addr, unsigned long size, - unsigned long flags) -{ - void * addr; - struct vm_struct * area; - unsigned long offset, last_addr; - pgprot_t pgprot; - - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - - pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | - _PAGE_WRITE | _PAGE_DIRTY | - _PAGE_ACCESSED | _PAGE_SHARED | flags); - - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr + 1) - phys_addr; - - /* - * Ok, go for it.. - */ - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - pr_debug("Get vm_area returns %p addr %p\n", area, area->addr); - area->phys_addr = phys_addr; - addr = area->addr; - if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, - phys_addr, pgprot)) { - vunmap(addr); - return NULL; - } - return (void *) (offset + (char *)addr); -} -EXPORT_SYMBOL(__ioremap); - -void __iounmap(void *addr) -{ - struct vm_struct *area; - - vfree((void *) (PAGE_MASK & (unsigned long) addr)); - area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr)); - if (!area) { - printk(KERN_ERR "iounmap: bad address %p\n", addr); - return; - } - - kfree(area); -} -EXPORT_SYMBOL(__iounmap); - static struct resource shmedia_iomap = { .name = "shmedia_iomap", .start = IOBASE_VADDR + PAGE_SIZE, .end = IOBASE_END - 1, }; -static void shmedia_mapioaddr(unsigned long pa, unsigned long va); +static void shmedia_mapioaddr(unsigned long pa, unsigned long va, + unsigned long flags); static void shmedia_unmapioaddr(unsigned long vaddr); -static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz); +static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, + int sz, unsigned long flags); /* * We have the same problem as the SPARC, so lets have the same comment: @@ -130,18 +60,18 @@ static struct xresource xresv[XNRES]; static struct xresource *xres_alloc(void) { - struct xresource *xrp; - int n; - - xrp = xresv; - for (n = 0; n < XNRES; n++) { - if (xrp->xflag == 0) { - xrp->xflag = 1; - return xrp; - } - xrp++; - } - return NULL; + struct xresource *xrp; + int n; + + xrp = xresv; + for (n = 0; n < XNRES; n++) { + if (xrp->xflag == 0) { + xrp->xflag = 1; + return xrp; + } + xrp++; + } + return NULL; } static void xres_free(struct xresource *xrp) @@ -161,76 +91,71 @@ static struct resource *shmedia_find_resource(struct resource *root, return NULL; } -static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size, - const char *name) +static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, + const char *name, unsigned long flags) { - static int printed_full = 0; - struct xresource *xres; - struct resource *res; - char *tack; - int tlen; - - if (name == NULL) name = "???"; - - if ((xres = xres_alloc()) != 0) { - tack = xres->xname; - res = &xres->xres; - } else { - if (!printed_full) { - printk("%s: done with statics, switching to kmalloc\n", - __func__); - printed_full = 1; - } - tlen = strlen(name); - tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); - if (!tack) - return -ENOMEM; - memset(tack, 0, sizeof(struct resource)); - res = (struct resource *) tack; - tack += sizeof (struct resource); - } - - strncpy(tack, name, XNMLN); - tack[XNMLN] = 0; - res->name = tack; - - return shmedia_ioremap(res, phys, size); + static int printed_full; + struct xresource *xres; + struct resource *res; + char *tack; + int tlen; + + if (name == NULL) + name = "???"; + + xres = xres_alloc(); + if (xres != 0) { + tack = xres->xname; + res = &xres->xres; + } else { + if (!printed_full) { + printk(KERN_NOTICE "%s: done with statics, " + "switching to kmalloc\n", __func__); + printed_full = 1; + } + tlen = strlen(name); + tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL); + if (!tack) + return NULL; + memset(tack, 0, sizeof(struct resource)); + res = (struct resource *) tack; + tack += sizeof(struct resource); + } + + strncpy(tack, name, XNMLN); + tack[XNMLN] = 0; + res->name = tack; + + return shmedia_ioremap(res, phys, size, flags); } -static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz) +static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz, + unsigned long flags) { - unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); + unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; - unsigned long va; - unsigned int psz; + unsigned long va; + unsigned int psz; - if (allocate_resource(&shmedia_iomap, res, round_sz, + if (allocate_resource(&shmedia_iomap, res, round_sz, shmedia_iomap.start, shmedia_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { - panic("alloc_io_res(%s): cannot occupy\n", - (res->name != NULL)? res->name: "???"); - } + panic("alloc_io_res(%s): cannot occupy\n", + (res->name != NULL) ? res->name : "???"); + } - va = res->start; - pa &= PAGE_MASK; + va = res->start; + pa &= PAGE_MASK; psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; - /* log at boot time ... */ - printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n", - ((res->name != NULL) ? res->name : "???"), - psz, psz == 1 ? " " : "s", va, pa); - - for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { - shmedia_mapioaddr(pa, va); - va += PAGE_SIZE; - pa += PAGE_SIZE; - } - - res->start += offset; - res->end = res->start + sz - 1; /* not strictly necessary.. */ + for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { + shmedia_mapioaddr(pa, va, flags); + va += PAGE_SIZE; + pa += PAGE_SIZE; + } - return res->start; + return (void __iomem *)(unsigned long)(res->start + offset); } static void shmedia_free_io(struct resource *res) @@ -249,14 +174,12 @@ static void shmedia_free_io(struct resource *res) static __init_refok void *sh64_get_page(void) { - extern int after_bootmem; void *page; - if (after_bootmem) { - page = (void *)get_zeroed_page(GFP_ATOMIC); - } else { + if (slab_is_available()) + page = (void *)get_zeroed_page(GFP_KERNEL); + else page = alloc_bootmem_pages(PAGE_SIZE); - } if (!page || ((unsigned long)page & ~PAGE_MASK)) panic("sh64_get_page: Out of memory already?\n"); @@ -264,17 +187,20 @@ static __init_refok void *sh64_get_page(void) return page; } -static void shmedia_mapioaddr(unsigned long pa, unsigned long va) +static void shmedia_mapioaddr(unsigned long pa, unsigned long va, + unsigned long flags) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep, pte; pgprot_t prot; - unsigned long flags = 1; /* 1 = CB0-1 device */ pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); + if (!flags) + flags = 1; /* 1 = CB0-1 device */ + pgdp = pgd_offset_k(va); if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { pudp = (pud_t *)sh64_get_page(); @@ -288,7 +214,7 @@ static void shmedia_mapioaddr(unsigned long pa, unsigned long va) } pmdp = pmd_offset(pudp, va); - if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) { + if (pmd_none(*pmdp) || !pmd_present(*pmdp)) { ptep = (pte_t *)sh64_get_page(); set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); } @@ -336,17 +262,19 @@ static void shmedia_unmapioaddr(unsigned long vaddr) pte_clear(&init_mm, vaddr, ptep); } -unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name) +void __iomem *__ioremap(unsigned long offset, unsigned long size, + unsigned long flags) { - if (size < PAGE_SIZE) - size = PAGE_SIZE; + char name[14]; - return shmedia_alloc_io(phys, size, name); + sprintf(name, "phys_%08x", (u32)offset); + return shmedia_alloc_io(offset, size, name, flags); } -EXPORT_SYMBOL(onchip_remap); +EXPORT_SYMBOL(__ioremap); -void onchip_unmap(unsigned long vaddr) +void __iounmap(void __iomem *virtual) { + unsigned long vaddr = (unsigned long)virtual & PAGE_MASK; struct resource *res; unsigned int psz; @@ -357,10 +285,7 @@ void onchip_unmap(unsigned long vaddr) return; } - psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; - - printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n", - res->name, psz, psz == 1 ? " " : "s"); + psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; shmedia_free_io(res); @@ -371,9 +296,8 @@ void onchip_unmap(unsigned long vaddr) kfree(res); } } -EXPORT_SYMBOL(onchip_unmap); +EXPORT_SYMBOL(__iounmap); -#ifdef CONFIG_PROC_FS static int ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) @@ -385,7 +309,10 @@ ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { if (p + 32 >= e) /* Better than nothing */ break; - if ((nm = r->name) == 0) nm = "???"; + nm = r->name; + if (nm == NULL) + nm = "???"; + p += sprintf(p, "%08lx-%08lx: %s\n", (unsigned long)r->start, (unsigned long)r->end, nm); @@ -393,14 +320,11 @@ ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, return p-buf; } -#endif /* CONFIG_PROC_FS */ static int __init register_proc_onchip(void) { -#ifdef CONFIG_PROC_FS - create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap); -#endif + create_proc_read_entry("io_map", 0, 0, ioremap_proc_info, + &shmedia_iomap); return 0; } - -__initcall(register_proc_onchip); +late_initcall(register_proc_onchip); diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 931f4d003fa..1b5fdfb4e0c 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -1,7 +1,7 @@ /* * arch/sh/mm/mmap.c * - * Copyright (C) 2008 Paul Mundt + * Copyright (C) 2008 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -21,9 +21,26 @@ EXPORT_SYMBOL(shm_align_mask); /* * To avoid cache aliases, we map the shared page with same color. */ -#define COLOUR_ALIGN(addr, pgoff) \ - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) +static inline unsigned long COLOUR_ALIGN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; + unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; + + return base + off; +} + +static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = addr & ~shm_align_mask; + unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; + + if (base + off <= addr) + return base + off; + + return base - off; +} unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -103,6 +120,117 @@ full_search: addr = COLOUR_ALIGN(addr, pgoff); } } + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_colour_align; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; + } + + if (unlikely(len > TASK_SIZE)) + return -ENOMEM; + + do_colour_align = 0; + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + + /* requesting a specific address */ + if (addr) { + if (do_colour_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + if (do_colour_align) { + unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); + + addr = base + len; + } + + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + } + + if (unlikely(mm->mmap_base < len)) + goto bottomup; + + addr = mm->mmap_base-len; + if (do_colour_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (likely(!vma || addr+len <= vma->vm_start)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + if (do_colour_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + } while (likely(len < vma->vm_start)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; +} #endif /* CONFIG_MMU */ /* |