diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/context.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 14 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 3 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 60 |
5 files changed, 68 insertions, 18 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 119bc52ab93..4e07eec1270 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, pid = task_pid_nr(thread->task) << ASID_BITS; asm volatile( " mrc p15, 0, %0, c13, c0, 1\n" - " bfi %1, %0, #0, %2\n" - " mcr p15, 0, %1, c13, c0, 1\n" + " and %0, %0, %2\n" + " orr %0, %0, %1\n" + " mcr p15, 0, %0, c13, c0, 1\n" : "=r" (contextidr), "+r" (pid) - : "I" (ASID_BITS)); + : "I" (~ASID_MASK)); isb(); return NOTIFY_OK; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 051204fc461..e59c4ab71bc 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -489,7 +489,7 @@ static bool __in_atomic_pool(void *start, size_t size) void *pool_start = pool->vaddr; void *pool_end = pool->vaddr + pool->size; - if (start < pool_start || start > pool_end) + if (start < pool_start || start >= pool_end) return false; if (end <= pool_end) diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 566750fa57d..9d869f93a3d 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -36,6 +36,7 @@ #include <asm/system_info.h> #include <asm/mach/map.h> +#include <asm/mach/pci.h> #include "mm.h" int ioremap_page(unsigned long virt, unsigned long phys, @@ -383,3 +384,16 @@ void __arm_iounmap(volatile void __iomem *io_addr) arch_iounmap(io_addr); } EXPORT_SYMBOL(__arm_iounmap); + +#ifdef CONFIG_PCI +int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) +{ + BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); + + return ioremap_page_range(PCI_IO_VIRT_BASE + offset, + PCI_IO_VIRT_BASE + offset + SZ_64K, + phys_addr, + __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); +} +EXPORT_SYMBOL_GPL(pci_ioremap_io); +#endif diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 6776160618e..a8ee92da354 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page /* permanent static mappings from iotable_init() */ #define VM_ARM_STATIC_MAPPING 0x40000000 +/* empty mapping */ +#define VM_ARM_EMPTY_MAPPING 0x20000000 + /* mapping type (attributes) for permanent static mappings */ #define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4c2d0451e84..18144e6a311 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -31,6 +31,7 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/mach/pci.h> #include "mm.h" @@ -216,7 +217,7 @@ static struct mem_type mem_types[] = { .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .domain = DOMAIN_IO, - }, + }, [MT_DEVICE_WC] = { /* ioremap_wc */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_l1 = PMD_TYPE_TABLE, @@ -777,14 +778,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(md); vm->addr = (void *)(md->virtual & PAGE_MASK); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); - vm->phys_addr = __pfn_to_phys(md->pfn); - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->phys_addr = __pfn_to_phys(md->pfn); + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags |= VM_ARM_MTYPE(md->type); vm->caller = iotable_init; vm_area_add_early(vm++); } } +void __init vm_reserve_area_early(unsigned long addr, unsigned long size, + void *caller) +{ + struct vm_struct *vm; + + vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); + vm->addr = (void *)addr; + vm->size = size; + vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; + vm->caller = caller; + vm_area_add_early(vm); +} + #ifndef CONFIG_ARM_LPAE /* @@ -802,14 +816,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) static void __init pmd_empty_section_gap(unsigned long addr) { - struct vm_struct *vm; - - vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); - vm->addr = (void *)addr; - vm->size = SECTION_SIZE; - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; - vm->caller = pmd_empty_section_gap; - vm_area_add_early(vm); + vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap); } static void __init fill_pmd_gaps(void) @@ -820,7 +827,7 @@ static void __init fill_pmd_gaps(void) /* we're still single threaded hence no lock needed here */ for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) continue; addr = (unsigned long)vm->addr; if (addr < next) @@ -858,6 +865,28 @@ static void __init fill_pmd_gaps(void) #define fill_pmd_gaps() do { } while (0) #endif +#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) +static void __init pci_reserve_io(void) +{ + struct vm_struct *vm; + unsigned long addr; + + /* we're still single threaded hence no lock needed here */ + for (vm = vmlist; vm; vm = vm->next) { + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + continue; + addr = (unsigned long)vm->addr; + addr &= ~(SZ_2M - 1); + if (addr == PCI_IO_VIRT_BASE) + return; + + } + vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); +} +#else +#define pci_reserve_io() do { } while (0) +#endif + static void * __initdata vmalloc_min = (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); @@ -961,8 +990,8 @@ void __init sanity_check_meminfo(void) * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size) > vmalloc_min || - __va(bank->start + bank->size) < __va(bank->start)) { + if (__va(bank->start + bank->size - 1) >= vmalloc_min || + __va(bank->start + bank->size - 1) <= __va(bank->start)) { unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " "to -%.8llx (vmalloc region overlap).\n", @@ -1141,6 +1170,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc) mdesc->map_io(); fill_pmd_gaps(); + /* Reserve fixed i/o space in VMALLOC region */ + pci_reserve_io(); + /* * Finally flush the caches and tlb to ensure that we're in a * consistent state wrt the writebuffer. This also ensures that |