diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 4 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/sh/mm/asids-debugfs.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 69 | ||||
-rw-r--r-- | arch/sh/mm/nommu.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 35 | ||||
-rw-r--r-- | arch/sh/mm/sram.c | 34 | ||||
-rw-r--r-- | arch/sh/mm/tlb-debugfs.c | 11 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_32.c | 16 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 5 |
12 files changed, 135 insertions, 61 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 1445ca6257d..09370392aff 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -168,6 +168,10 @@ config IOREMAP_FIXED config UNCACHED_MAPPING bool +config HAVE_SRAM_POOL + bool + select GENERIC_ALLOCATOR + choice prompt "Kernel page size" default PAGE_SIZE_4KB diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 53f7c684afb..ab89ea4f941 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o +obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o # Special flags for fault_64.o. This puts restrictions on the number of # caller-save registers that the compiler can target when building this file. @@ -66,4 +67,4 @@ CFLAGS_fault_64.o += -ffixed-r7 \ -ffixed-r60 -ffixed-r61 -ffixed-r62 \ -fomit-frame-pointer -EXTRA_CFLAGS += -Werror +ccflags-y := -Werror diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c index cd8c3bf39b5..74c03ecc487 100644 --- a/arch/sh/mm/asids-debugfs.c +++ b/arch/sh/mm/asids-debugfs.c @@ -63,7 +63,7 @@ static int __init asids_debugfs_init(void) { struct dentry *asids_dentry; - asids_dentry = debugfs_create_file("asids", S_IRUSR, sh_debugfs_root, + asids_dentry = debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir, NULL, &asids_debugfs_fops); if (!asids_dentry) return -ENOMEM; diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 690ed010d00..52411462c40 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -126,25 +126,19 @@ static int __init cache_debugfs_init(void) { struct dentry *dcache_dentry, *icache_dentry; - dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root, + dcache_dentry = debugfs_create_file("dcache", S_IRUSR, arch_debugfs_dir, (unsigned int *)CACHE_TYPE_DCACHE, &cache_debugfs_fops); if (!dcache_dentry) return -ENOMEM; - if (IS_ERR(dcache_dentry)) - return PTR_ERR(dcache_dentry); - icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root, + icache_dentry = debugfs_create_file("icache", S_IRUSR, arch_debugfs_dir, (unsigned int *)CACHE_TYPE_ICACHE, &cache_debugfs_fops); if (!icache_dentry) { debugfs_remove(dcache_dentry); return -ENOMEM; } - if (IS_ERR(icache_dentry)) { - debugfs_remove(dcache_dentry); - return PTR_ERR(icache_dentry); - } return 0; } diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index c86a0854025..03879328699 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -38,11 +38,12 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, void *ret, *ret_nocache; int order = get_order(size); + gfp |= __GFP_ZERO; + ret = (void *)__get_free_pages(gfp, order); if (!ret) return NULL; - memset(ret, 0, size); /* * Pages from the page allocator may have data present in * cache. So flush the cache before using uncached memory. diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d0e249100e9..3385b28acaa 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -47,7 +47,6 @@ static pte_t *__get_pte_phys(unsigned long addr) pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { @@ -67,8 +66,7 @@ static pte_t *__get_pte_phys(unsigned long addr) return NULL; } - pte = pte_offset_kernel(pmd, addr); - return pte; + return pte_offset_kernel(pmd, addr); } static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) @@ -125,13 +123,45 @@ void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) clear_pte_phys(address, prot); } +static pmd_t * __init one_md_table_init(pud_t *pud) +{ + if (pud_none(*pud)) { + pmd_t *pmd; + + pmd = alloc_bootmem_pages(PAGE_SIZE); + pud_populate(&init_mm, pud, pmd); + BUG_ON(pmd != pmd_offset(pud, 0)); + } + + return pmd_offset(pud, 0); +} + +static pte_t * __init one_page_table_init(pmd_t *pmd) +{ + if (pmd_none(*pmd)) { + pte_t *pte; + + pte = alloc_bootmem_pages(PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, pte); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + + return pte_offset_kernel(pmd, 0); +} + +static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, + unsigned long vaddr, pte_t *lastpte) +{ + return pte; +} + void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte = NULL; int i, j, k; unsigned long vaddr; @@ -144,19 +174,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { -#ifdef __PAGETABLE_PMD_FOLDED - pmd = (pmd_t *)pud; -#else - pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); - pud_populate(&init_mm, pud, pmd); + pmd = one_md_table_init(pud); +#ifndef __PAGETABLE_PMD_FOLDED pmd += k; #endif for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { - if (pmd_none(*pmd)) { - pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); - pmd_populate_kernel(&init_mm, pmd, pte); - BUG_ON(pte != pte_offset_kernel(pmd, 0)); - } + pte = page_table_kmap_check(one_page_table_init(pmd), + pmd, vaddr, pte); vaddr += PMD_SIZE; } k = 0; @@ -200,7 +224,6 @@ static void __init bootmem_init_one_node(unsigned int nid) unsigned long total_pages, paddr; unsigned long end_pfn; struct pglist_data *p; - int i; p = NODE_DATA(nid); @@ -226,11 +249,12 @@ static void __init bootmem_init_one_node(unsigned int nid) * reservations in other nodes. */ if (nid == 0) { + struct memblock_region *reg; + /* Reserve the sections we're already using. */ - for (i = 0; i < memblock.reserved.cnt; i++) - reserve_bootmem(memblock.reserved.region[i].base, - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); + for_each_memblock(reserved, reg) { + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + } } sparse_memory_present_with_active_regions(nid); @@ -238,13 +262,14 @@ static void __init bootmem_init_one_node(unsigned int nid) static void __init do_init_bootmem(void) { + struct memblock_region *reg; int i; /* Add active regions with valid PFNs. */ - for (i = 0; i < memblock.memory.cnt; i++) { + for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); __add_active_range(0, start_pfn, end_pfn); } diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index 7694f50c903..36312d254fa 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c @@ -67,6 +67,10 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) BUG(); } +void __flush_tlb_global(void) +{ +} + void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 6379091a164..b20b1b3eee4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -40,7 +40,7 @@ struct pmb_entry { unsigned long flags; unsigned long size; - spinlock_t lock; + raw_spinlock_t lock; /* * 0 .. NR_PMB_ENTRIES for specific entry selection, or @@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, memset(pmbe, 0, sizeof(struct pmb_entry)); - spin_lock_init(&pmbe->lock); + raw_spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; @@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe) { unsigned long flags; - spin_lock_irqsave(&pmbe->lock, flags); + raw_spin_lock_irqsave(&pmbe->lock, flags); __set_pmb_entry(pmbe); - spin_unlock_irqrestore(&pmbe->lock, flags); + raw_spin_unlock_irqrestore(&pmbe->lock, flags); } #endif /* CONFIG_PM */ @@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, return PTR_ERR(pmbe); } - spin_lock_irqsave(&pmbe->lock, flags); + raw_spin_lock_irqsave(&pmbe->lock, flags); pmbe->size = pmb_sizes[i].size; @@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, * entries for easier tear-down. */ if (likely(pmbp)) { - spin_lock(&pmbp->lock); + raw_spin_lock_nested(&pmbp->lock, + SINGLE_DEPTH_NESTING); pmbp->link = pmbe; - spin_unlock(&pmbp->lock); + raw_spin_unlock(&pmbp->lock); } pmbp = pmbe; @@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, i--; mapped++; - spin_unlock_irqrestore(&pmbe->lock, flags); + raw_spin_unlock_irqrestore(&pmbe->lock, flags); } } while (size >= SZ_16M); @@ -627,15 +628,14 @@ static void __init pmb_synchronize(void) continue; } - spin_lock_irqsave(&pmbe->lock, irqflags); + raw_spin_lock_irqsave(&pmbe->lock, irqflags); for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) if (pmb_sizes[j].flag == size) pmbe->size = pmb_sizes[j].size; if (pmbp) { - spin_lock(&pmbp->lock); - + raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, @@ -644,13 +644,12 @@ static void __init pmb_synchronize(void) */ if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; - - spin_unlock(&pmbp->lock); + raw_spin_unlock(&pmbp->lock); } pmbp = pmbe; - spin_unlock_irqrestore(&pmbe->lock, irqflags); + raw_spin_unlock_irqrestore(&pmbe->lock, irqflags); } } @@ -757,7 +756,7 @@ static void __init pmb_resize(void) /* * Found it, now resize it. */ - spin_lock_irqsave(&pmbe->lock, flags); + raw_spin_lock_irqsave(&pmbe->lock, flags); pmbe->size = SZ_16M; pmbe->flags &= ~PMB_SZ_MASK; @@ -767,7 +766,7 @@ static void __init pmb_resize(void) __set_pmb_entry(pmbe); - spin_unlock_irqrestore(&pmbe->lock, flags); + raw_spin_unlock_irqrestore(&pmbe->lock, flags); } read_unlock(&pmb_rwlock); @@ -866,11 +865,9 @@ static int __init pmb_debugfs_init(void) struct dentry *dentry; dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, - sh_debugfs_root, NULL, &pmb_debugfs_fops); + arch_debugfs_dir, NULL, &pmb_debugfs_fops); if (!dentry) return -ENOMEM; - if (IS_ERR(dentry)) - return PTR_ERR(dentry); return 0; } diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c new file mode 100644 index 00000000000..bc156ec4545 --- /dev/null +++ b/arch/sh/mm/sram.c @@ -0,0 +1,34 @@ +/* + * SRAM pool for tiny memories not otherwise managed. + * + * Copyright (C) 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/sram.h> + +/* + * This provides a standard SRAM pool for tiny memories that can be + * added either by the CPU or the platform code. Typical SRAM sizes + * to be inserted in to the pool will generally be less than the page + * size, with anything more reasonably sized handled as a NUMA memory + * node. + */ +struct gen_pool *sram_pool; + +static int __init sram_pool_init(void) +{ + /* + * This is a global pool, we don't care about node locality. + */ + sram_pool = gen_pool_create(1, -1); + if (unlikely(!sram_pool)) + return -ENOMEM; + + return 0; +} +core_initcall(sram_pool_init); diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c index 229bf75f28d..dea637a0924 100644 --- a/arch/sh/mm/tlb-debugfs.c +++ b/arch/sh/mm/tlb-debugfs.c @@ -151,15 +151,13 @@ static int __init tlb_debugfs_init(void) { struct dentry *itlb, *utlb; - itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root, + itlb = debugfs_create_file("itlb", S_IRUSR, arch_debugfs_dir, (unsigned int *)TLB_TYPE_ITLB, &tlb_debugfs_fops); if (unlikely(!itlb)) return -ENOMEM; - if (IS_ERR(itlb)) - return PTR_ERR(itlb); - utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root, + utlb = debugfs_create_file("utlb", S_IRUSR, arch_debugfs_dir, (unsigned int *)TLB_TYPE_UTLB, &tlb_debugfs_fops); if (unlikely(!utlb)) { @@ -167,11 +165,6 @@ static int __init tlb_debugfs_init(void) return -ENOMEM; } - if (IS_ERR(utlb)) { - debugfs_remove(itlb); - return PTR_ERR(utlb); - } - return 0; } module_init(tlb_debugfs_init); diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c index 3fbe03ce8fe..a6a20d6de4c 100644 --- a/arch/sh/mm/tlbflush_32.c +++ b/arch/sh/mm/tlbflush_32.c @@ -119,3 +119,19 @@ void local_flush_tlb_mm(struct mm_struct *mm) local_irq_restore(flags); } } + +void __flush_tlb_global(void) +{ + unsigned long flags; + + local_irq_save(flags); + + /* + * This is the most destructive of the TLB flushing options, + * and will tear down all of the UTLB/ITLB mappings, including + * wired entries. + */ + __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); + + local_irq_restore(flags); +} diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 03db41cc126..7f5810f5dfd 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -455,6 +455,11 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) flush_tlb_all(); } +void __flush_tlb_global(void) +{ + flush_tlb_all(); +} + void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } |