summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/Makefile1
-rw-r--r--arch/xtensa/mm/cache.c7
-rw-r--r--arch/xtensa/mm/highmem.c72
-rw-r--r--arch/xtensa/mm/init.c299
-rw-r--r--arch/xtensa/mm/mmu.c36
-rw-r--r--arch/xtensa/mm/tlb.c15
6 files changed, 383 insertions, 47 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index f0b646d2f84..f54f78e24d7 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -4,3 +4,4 @@
obj-y := init.o cache.o misc.o
obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index ba4c47f291b..63cbb867dad 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -59,6 +59,10 @@
*
*/
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
+#error "HIGHMEM is not supported on cores with aliasing cache."
+#endif
+
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/*
@@ -179,10 +183,11 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
#else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
&& (vma->vm_flags & VM_EXEC) != 0) {
- unsigned long paddr = (unsigned long) page_address(page);
+ unsigned long paddr = (unsigned long)kmap_atomic(page);
__flush_dcache_page(paddr);
__invalidate_icache_page(paddr);
set_bit(PG_arch_1, &page->flags);
+ kunmap_atomic((void *)paddr);
}
#endif
}
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644
index 00000000000..17a8c0d6fd1
--- /dev/null
+++ b/arch/xtensa/mm/highmem.c
@@ -0,0 +1,72 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+void *kmap_atomic(struct page *page)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+ int type;
+
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+ set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ int idx, type;
+
+ if (kvaddr >= (void *)FIXADDR_START &&
+ kvaddr < (void *)FIXADDR_TOP) {
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
+ pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+ local_flush_tlb_kernel_range((unsigned long)kvaddr,
+ (unsigned long)kvaddr + PAGE_SIZE);
+
+ kmap_atomic_idx_pop();
+ }
+
+ pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index aff108df92d..4224256bb21 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -8,6 +8,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -19,6 +20,7 @@
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
@@ -27,11 +29,133 @@
#include <asm/bootparam.h>
#include <asm/page.h>
#include <asm/sections.h>
+#include <asm/sysmem.h>
+
+struct sysmem_info sysmem __initdata;
+
+static void __init sysmem_dump(void)
+{
+ unsigned i;
+
+ pr_debug("Sysmem:\n");
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
+ sysmem.bank[i].start, sysmem.bank[i].end,
+ (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
+}
+
+/*
+ * Find bank with maximal .start such that bank.start <= start
+ */
+static inline struct meminfo * __init find_bank(unsigned long start)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start <= start)
+ it = sysmem.bank + i;
+ else
+ break;
+ return it;
+}
+
+/*
+ * Move all memory banks starting at 'from' to a new place at 'to',
+ * adjust nr_banks accordingly.
+ * Both 'from' and 'to' must be inside the sysmem.bank.
+ *
+ * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
+ */
+static int __init move_banks(struct meminfo *to, struct meminfo *from)
+{
+ unsigned n = sysmem.nr_banks - (from - sysmem.bank);
+
+ if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
+ return -ENOMEM;
+ if (to != from)
+ memmove(to, from, n * sizeof(struct meminfo));
+ sysmem.nr_banks += to - from;
+ return 0;
+}
+
+/*
+ * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
+ * original sysmem and the new bank.
+ *
+ * Returns: 0 (success), < 0 (error)
+ */
+int __init add_sysmem_bank(unsigned long start, unsigned long end)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
+
+ if (start == end ||
+ (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
+ pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
+
+ start = PAGE_ALIGN(start);
+ end &= PAGE_MASK;
+ sz = end - start;
+
+ it = find_bank(start);
+
+ if (it)
+ bank_sz = it->end - it->start;
+
+ if (it && bank_sz >= start - it->start) {
+ if (end - it->start > bank_sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (!it)
+ it = sysmem.bank;
+ else
+ ++it;
+
+ if (it - sysmem.bank < sysmem.nr_banks &&
+ it->start - start <= sz) {
+ it->start = start;
+ if (it->end - it->start < sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (move_banks(it + 1, it) < 0) {
+ pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
+ it->start = start;
+ it->end = end;
+ return 0;
+ }
+ }
+ sz = it->end - it->start;
+ for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start - it->start <= sz) {
+ if (sz < sysmem.bank[i].end - it->start)
+ it->end = sysmem.bank[i].end;
+ } else {
+ break;
+ }
+
+ move_banks(it + 1, sysmem.bank + i);
+ return 0;
+}
/*
* mem_reserve(start, end, must_exist)
*
* Reserve some memory from the memory pool.
+ * If must_exist is set and a part of the region being reserved does not exist
+ * memory map is not altered.
*
* Parameters:
* start Start of region,
@@ -39,53 +163,69 @@
* must_exist Must exist in memory pool.
*
* Returns:
- * 0 (memory area couldn't be mapped)
- * -1 (success)
+ * 0 (success)
+ * < 0 (error)
*/
int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
{
- int i;
-
- if (start == end)
- return 0;
+ struct meminfo *it;
+ struct meminfo *rm = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
+ sz = end - start;
+ if (!sz)
+ return -EINVAL;
- for (i = 0; i < sysmem.nr_banks; i++)
- if (start < sysmem.bank[i].end
- && end >= sysmem.bank[i].start)
- break;
+ it = find_bank(start);
+
+ if (it)
+ bank_sz = it->end - it->start;
- if (i == sysmem.nr_banks) {
- if (must_exist)
- printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
- "not in any region!\n", start, end);
- return 0;
+ if ((!it || end - it->start > bank_sz) && must_exist) {
+ pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
+ start, end);
+ return -EINVAL;
}
- if (start > sysmem.bank[i].start) {
- if (end < sysmem.bank[i].end) {
- /* split entry */
- if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
- panic("meminfo overflow\n");
- sysmem.bank[sysmem.nr_banks].start = end;
- sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
- sysmem.nr_banks++;
+ if (it && start - it->start < bank_sz) {
+ if (start == it->start) {
+ if (end - it->start < bank_sz) {
+ it->start = end;
+ return 0;
+ } else {
+ rm = it;
+ }
+ } else {
+ it->end = start;
+ if (end - it->start < bank_sz)
+ return add_sysmem_bank(end,
+ it->start + bank_sz);
+ ++it;
}
- sysmem.bank[i].end = start;
+ }
- } else if (end < sysmem.bank[i].end) {
- sysmem.bank[i].start = end;
+ if (!it)
+ it = sysmem.bank;
- } else {
- /* remove entry */
- sysmem.nr_banks--;
- sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
- sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
+ for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
+ if (it->end - start <= sz) {
+ if (!rm)
+ rm = it;
+ } else {
+ if (it->start - start < sz)
+ it->start = end;
+ break;
+ }
}
- return -1;
+
+ if (rm)
+ move_banks(rm, it);
+
+ return 0;
}
@@ -99,6 +239,7 @@ void __init bootmem_init(void)
unsigned long bootmap_start, bootmap_size;
int i;
+ sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
@@ -156,19 +297,13 @@ void __init bootmem_init(void)
void __init zones_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
- int i;
-
/* All pages are DMA-able, so we put them all in the DMA zone. */
-
- zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET;
- for (i = 1; i < MAX_NR_ZONES; i++)
- zones_size[i] = 0;
-
+ unsigned long zones_size[MAX_NR_ZONES] = {
+ [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+ [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
#endif
-
+ };
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
@@ -178,16 +313,38 @@ void __init zones_init(void)
void __init mem_init(void)
{
- max_mapnr = max_low_pfn - ARCH_PFN_OFFSET;
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
#ifdef CONFIG_HIGHMEM
-#error HIGHGMEM not implemented in init.c
+ unsigned long tmp;
+
+ reset_all_zones_managed_pages();
+ for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
#endif
+ max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+ high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
+
free_all_bootmem();
mem_init_print_info(NULL);
+ pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+#endif
+ " vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
+ " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n",
+#ifdef CONFIG_HIGHMEM
+ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+ (LAST_PKMAP*PAGE_SIZE) >> 10,
+ FIXADDR_START, FIXADDR_TOP,
+ (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+ PAGE_OFFSET, PAGE_OFFSET +
+ (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -204,3 +361,53 @@ void free_initmem(void)
{
free_initmem_default(-1);
}
+
+static void __init parse_memmap_one(char *p)
+{
+ char *oldp;
+ unsigned long start_at, mem_size;
+
+ if (!p)
+ return;
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return;
+
+ switch (*p) {
+ case '@':
+ start_at = memparse(p + 1, &p);
+ add_sysmem_bank(start_at, start_at + mem_size);
+ break;
+
+ case '$':
+ start_at = memparse(p + 1, &p);
+ mem_reserve(start_at, start_at + mem_size, 0);
+ break;
+
+ case 0:
+ mem_reserve(mem_size, 0, 0);
+ break;
+
+ default:
+ pr_warn("Unrecognized memmap syntax: %s\n", p);
+ break;
+ }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+ while (str) {
+ char *k = strchr(str, ',');
+
+ if (k)
+ *k++ = 0;
+
+ parse_memmap_one(str);
+ str = k;
+ }
+
+ return 0;
+}
+early_param("memmap", parse_memmap_opt);
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 861203e958d..3429b483d9f 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -3,6 +3,7 @@
*
* Extracted from init.c
*/
+#include <linux/bootmem.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/string.h>
@@ -16,9 +17,44 @@
#include <asm/initialize_mmu.h>
#include <asm/io.h>
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr)
+{
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+
+ if (pmd_none(*pmd)) {
+ unsigned i;
+ pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+
+ for (i = 0; i < 1024; i++)
+ pte_clear(NULL, 0, pte + i);
+
+ set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
+ __func__, vaddr, pmd, pte);
+ return pte;
+ } else {
+ return pte_offset_kernel(pmd, 0);
+ }
+}
+
+static void __init fixedrange_init(void)
+{
+ BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
+ init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+}
+#endif
+
void __init paging_init(void)
{
memset(swapper_pg_dir, 0, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+ fixedrange_init();
+ pkmap_page_table = init_pmd(PKMAP_BASE);
+ kmap_init();
+#endif
}
/*
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index ade62382678..5ece856c572 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags);
}
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+ end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+ start &= PAGE_MASK;
+ while (start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ } else {
+ local_flush_tlb_all();
+ }
+}
+
#ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr)