diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/dma-noncoherent.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 19 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/mmap_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_hash64.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/subpage-prot.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 12 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_low_64e.S | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 6 |
18 files changed, 60 insertions, 40 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 08dfa8e6d86..65abfcfaaa9 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -84,8 +84,8 @@ void __init MMU_init_hw(void) * vectors and the kernel live in real-mode. */ - mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */ - mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */ + mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */ + mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */ } #define LARGE_PAGE_SIZE_16M (1<<24) diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 36692f5c9a7..757c0bed9a9 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -23,6 +23,7 @@ */ #include <linux/sched.h> +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 056d23a1b10..784a400e078 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -37,7 +37,7 @@ #define HPTE_LOCK_BIT 3 -static DEFINE_SPINLOCK(native_tlbie_lock); +static DEFINE_RAW_SPINLOCK(native_tlbie_lock); static inline void __tlbie(unsigned long va, int psize, int ssize) { @@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(va, psize, ssize); @@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); } static inline void native_lock_hpte(struct hash_pte *hptep) @@ -122,7 +122,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) unsigned long *word = &hptep->v; while (1) { - if (!test_and_set_bit(HPTE_LOCK_BIT, word)) + if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) break; while(test_bit(HPTE_LOCK_BIT, word)) cpu_relax(); @@ -133,8 +133,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) { unsigned long *word = &hptep->v; - asm volatile("lwsync":::"memory"); - clear_bit(HPTE_LOCK_BIT, word); + clear_bit_unlock(HPTE_LOCK_BIT, word); } static long native_hpte_insert(unsigned long hpte_group, unsigned long va, @@ -434,7 +433,7 @@ static void native_hpte_clear(void) /* we take the tlbie lock and hold it. Some hardware will * deadlock if we try to tlbie from two processors at once. */ - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); slots = pteg_count * HPTES_PER_GROUP; @@ -458,7 +457,7 @@ static void native_hpte_clear(void) } asm volatile("eieio; tlbsync; ptesync":::"memory"); - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); local_irq_restore(flags); } @@ -521,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local) int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { @@ -536,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local) asm volatile("eieio; tlbsync; ptesync":::"memory"); if (lock_tlbie) - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); } local_irq_restore(flags); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 123f7070238..9bb249c3046 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/io.h> +#include <linux/slab.h> #include <linux/hugetlb.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 4ec900af332..767333005eb 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -31,6 +31,7 @@ #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/lmb.h> +#include <linux/gfp.h> #include <asm/pgalloc.h> #include <asm/prom.h> @@ -47,7 +48,7 @@ #include "mmu_decl.h" #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) -/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */ +/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */ #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" #endif diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 776f28d02b6..d7fa50b09b4 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -42,6 +42,7 @@ #include <linux/poison.h> #include <linux/lmb.h> #include <linux/hugetlb.h> +#include <linux/slab.h> #include <asm/pgalloc.h> #include <asm/page.h> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b9b152558f9..0f594d774bf 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> +#include <linux/gfp.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/stddef.h> @@ -48,6 +49,7 @@ #include <asm/sparsemem.h> #include <asm/vdso.h> #include <asm/fixmap.h> +#include <asm/swiotlb.h> #include "mmu_decl.h" @@ -320,6 +322,11 @@ void __init mem_init(void) struct page *page; unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; +#ifdef CONFIG_SWIOTLB + if (ppc_swiotlb_enable) + swiotlb_init(1); +#endif + num_physpages = lmb.memory.size >> PAGE_SHIFT; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); @@ -494,13 +501,13 @@ EXPORT_SYMBOL(flush_icache_user_range); * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) + pte_t *ptep) { #ifdef CONFIG_PPC_STD_MMU unsigned long access = 0, trap; /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(pte) || address >= TASK_SIZE) + if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* We try to figure out if we are coming from an instruction diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c index 0d957a4c70f..5a783d8e8e8 100644 --- a/arch/powerpc/mm/mmap_64.c +++ b/arch/powerpc/mm/mmap_64.c @@ -47,7 +47,7 @@ static inline int mmap_is_legacy(void) if (current->personality & ADDR_COMPAT_LAYOUT) return 1; - if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) + if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) return 1; return sysctl_legacy_va_layout; @@ -77,7 +77,7 @@ static unsigned long mmap_rnd(void) static inline unsigned long mmap_base(void) { - unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; + unsigned long gap = rlimit(RLIMIT_STACK); if (gap < MIN_GAP) gap = MIN_GAP; diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index b910d37aea1..2535828aa84 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -19,11 +19,12 @@ #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/module.h> +#include <linux/gfp.h> #include <asm/mmu_context.h> static DEFINE_SPINLOCK(mmu_context_lock); -static DEFINE_IDR(mmu_context_idr); +static DEFINE_IDA(mmu_context_ida); /* * The proto-VSID space has 2^35 - 1 segments available for user mappings. @@ -39,11 +40,11 @@ int __init_new_context(void) int err; again: - if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) + if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) return -ENOMEM; spin_lock(&mmu_context_lock); - err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index); + err = ida_get_new_above(&mmu_context_ida, 1, &index); spin_unlock(&mmu_context_lock); if (err == -EAGAIN) @@ -53,7 +54,7 @@ again: if (index > MAX_CONTEXT) { spin_lock(&mmu_context_lock); - idr_remove(&mmu_context_idr, index); + ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); return -ENOMEM; } @@ -85,7 +86,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void __destroy_context(int context_id) { spin_lock(&mmu_context_lock); - idr_remove(&mmu_context_idr, context_id); + ida_remove(&mmu_context_ida, context_id); spin_unlock(&mmu_context_lock); } EXPORT_SYMBOL_GPL(__destroy_context); diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 1044a634b6d..1f2d9ff0989 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -47,6 +47,7 @@ #include <linux/bootmem.h> #include <linux/notifier.h> #include <linux/cpu.h> +#include <linux/slab.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> @@ -56,7 +57,7 @@ static unsigned int next_context, nr_free_contexts; static unsigned long *context_map; static unsigned long *stale_map[NR_CPUS]; static struct mm_struct **context_mm; -static DEFINE_SPINLOCK(context_lock); +static DEFINE_RAW_SPINLOCK(context_lock); #define CTX_MAP_SIZE \ (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) @@ -121,9 +122,9 @@ static unsigned int steal_context_smp(unsigned int id) /* This will happen if you have more CPUs than available contexts, * all we can do here is wait a bit and try again */ - spin_unlock(&context_lock); + raw_spin_unlock(&context_lock); cpu_relax(); - spin_lock(&context_lock); + raw_spin_lock(&context_lock); /* This will cause the caller to try again */ return MMU_NO_CONTEXT; @@ -194,7 +195,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) unsigned long *map; /* No lockless fast path .. yet */ - spin_lock(&context_lock); + raw_spin_lock(&context_lock); pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", cpu, next, next->context.active, next->context.id); @@ -278,7 +279,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) /* Flick the MMU and release lock */ pr_hardcont(" -> %d\n", id); set_context(id, next->pgd); - spin_unlock(&context_lock); + raw_spin_unlock(&context_lock); } /* @@ -307,7 +308,7 @@ void destroy_context(struct mm_struct *mm) WARN_ON(mm->context.active != 0); - spin_lock_irqsave(&context_lock, flags); + raw_spin_lock_irqsave(&context_lock, flags); id = mm->context.id; if (id != MMU_NO_CONTEXT) { __clear_bit(id, context_map); @@ -318,7 +319,7 @@ void destroy_context(struct mm_struct *mm) context_mm[id] = NULL; nr_free_contexts++; } - spin_unlock_irqrestore(&context_lock, flags); + raw_spin_unlock_irqrestore(&context_lock, flags); } #ifdef CONFIG_SMP diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b037d95eead..64c00227b99 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -451,7 +451,7 @@ static int __cpuinit numa_setup_cpu(unsigned long lcpu) nid = of_node_to_nid_single(cpu); if (nid < 0 || !node_online(nid)) - nid = any_online_node(NODE_MASK_ALL); + nid = first_online_node; out: map_cpu_to_node(lcpu, nid); @@ -1114,7 +1114,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) int nid, found = 0; if (!numa_enabled || (min_common_depth < 0)) - return any_online_node(NODE_MASK_ALL); + return first_online_node; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { @@ -1125,7 +1125,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) } if (nid < 0 || !node_online(nid)) - nid = any_online_node(NODE_MASK_ALL); + nid = first_online_node; if (NODE_DATA(nid)->node_spanned_pages) return nid; diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 99df697c601..ebc2f38eb38 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -22,6 +22,7 @@ */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/percpu.h> diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 573b3bd1c45..b9243e7557a 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -27,6 +27,7 @@ #include <linux/init.h> #include <linux/highmem.h> #include <linux/lmb.h> +#include <linux/slab.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 853d5565eed..d95679a5fb2 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -35,6 +35,7 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/lmb.h> +#include <linux/slab.h> #include <asm/pgalloc.h> #include <asm/page.h> diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index a040b81e93b..e4f8f1fc81a 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -10,7 +10,6 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/gfp.h> -#include <linux/slab.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/hugetlb.h> diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 282d9306361..1ec06576f61 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); + /* Mask the address for the correct page size */ + addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif - } else + } else { psize = pte_pagesize_index(mm, addr, pte); + /* Mask the address for the standard page size. If we + * have a 64k page kernel, but the hardware does not + * support 64k pages, this might be different from the + * hardware page size encoded in the slice table. */ + addr &= PAGE_MASK; + } - /* Mask the address for the correct page size */ - addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); /* Build full vaddr */ if (!is_kernel_addr(addr)) { diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index f288279e679..8b04c54e596 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -1,5 +1,5 @@ /* - * Low leve TLB miss handlers for Book3E + * Low level TLB miss handlers for Book3E * * Copyright (C) 2008-2009 * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 2fbc680c2c7..e81d5d67f83 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -150,7 +150,7 @@ EXPORT_SYMBOL(local_flush_tlb_page); */ #ifdef CONFIG_SMP -static DEFINE_SPINLOCK(tlbivax_lock); +static DEFINE_RAW_SPINLOCK(tlbivax_lock); static int mm_is_core_local(struct mm_struct *mm) { @@ -232,10 +232,10 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); if (lock) - spin_lock(&tlbivax_lock); + raw_spin_lock(&tlbivax_lock); _tlbivax_bcast(vmaddr, pid, tsize, ind); if (lock) - spin_unlock(&tlbivax_lock); + raw_spin_unlock(&tlbivax_lock); goto bail; } else { struct tlb_flush_param p = { |