diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 19 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_hash64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 14 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 12 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_low_64e.S | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 6 |
9 files changed, 41 insertions, 36 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 08dfa8e6d86..65abfcfaaa9 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -84,8 +84,8 @@ void __init MMU_init_hw(void) * vectors and the kernel live in real-mode. */ - mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */ - mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */ + mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */ + mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */ } #define LARGE_PAGE_SIZE_16M (1<<24) diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 056d23a1b10..784a400e078 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -37,7 +37,7 @@ #define HPTE_LOCK_BIT 3 -static DEFINE_SPINLOCK(native_tlbie_lock); +static DEFINE_RAW_SPINLOCK(native_tlbie_lock); static inline void __tlbie(unsigned long va, int psize, int ssize) { @@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(va, psize, ssize); @@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); } static inline void native_lock_hpte(struct hash_pte *hptep) @@ -122,7 +122,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) unsigned long *word = &hptep->v; while (1) { - if (!test_and_set_bit(HPTE_LOCK_BIT, word)) + if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) break; while(test_bit(HPTE_LOCK_BIT, word)) cpu_relax(); @@ -133,8 +133,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) { unsigned long *word = &hptep->v; - asm volatile("lwsync":::"memory"); - clear_bit(HPTE_LOCK_BIT, word); + clear_bit_unlock(HPTE_LOCK_BIT, word); } static long native_hpte_insert(unsigned long hpte_group, unsigned long va, @@ -434,7 +433,7 @@ static void native_hpte_clear(void) /* we take the tlbie lock and hold it. Some hardware will * deadlock if we try to tlbie from two processors at once. */ - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); slots = pteg_count * HPTES_PER_GROUP; @@ -458,7 +457,7 @@ static void native_hpte_clear(void) } asm volatile("eieio; tlbsync; ptesync":::"memory"); - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); local_irq_restore(flags); } @@ -521,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local) int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) - spin_lock(&native_tlbie_lock); + raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { @@ -536,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local) asm volatile("eieio; tlbsync; ptesync":::"memory"); if (lock_tlbie) - spin_unlock(&native_tlbie_lock); + raw_spin_unlock(&native_tlbie_lock); } local_irq_restore(flags); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b9b152558f9..311224cdb7a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -494,13 +494,13 @@ EXPORT_SYMBOL(flush_icache_user_range); * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) + pte_t *ptep) { #ifdef CONFIG_PPC_STD_MMU unsigned long access = 0, trap; /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(pte) || address >= TASK_SIZE) + if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* We try to figure out if we are coming from an instruction diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index b910d37aea1..51622daae09 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -23,7 +23,7 @@ #include <asm/mmu_context.h> static DEFINE_SPINLOCK(mmu_context_lock); -static DEFINE_IDR(mmu_context_idr); +static DEFINE_IDA(mmu_context_ida); /* * The proto-VSID space has 2^35 - 1 segments available for user mappings. @@ -39,11 +39,11 @@ int __init_new_context(void) int err; again: - if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) + if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) return -ENOMEM; spin_lock(&mmu_context_lock); - err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index); + err = ida_get_new_above(&mmu_context_ida, 1, &index); spin_unlock(&mmu_context_lock); if (err == -EAGAIN) @@ -53,7 +53,7 @@ again: if (index > MAX_CONTEXT) { spin_lock(&mmu_context_lock); - idr_remove(&mmu_context_idr, index); + ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); return -ENOMEM; } @@ -85,7 +85,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void __destroy_context(int context_id) { spin_lock(&mmu_context_lock); - idr_remove(&mmu_context_idr, context_id); + ida_remove(&mmu_context_ida, context_id); spin_unlock(&mmu_context_lock); } EXPORT_SYMBOL_GPL(__destroy_context); diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 1044a634b6d..dbc692145ec 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -56,7 +56,7 @@ static unsigned int next_context, nr_free_contexts; static unsigned long *context_map; static unsigned long *stale_map[NR_CPUS]; static struct mm_struct **context_mm; -static DEFINE_SPINLOCK(context_lock); +static DEFINE_RAW_SPINLOCK(context_lock); #define CTX_MAP_SIZE \ (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) @@ -121,9 +121,9 @@ static unsigned int steal_context_smp(unsigned int id) /* This will happen if you have more CPUs than available contexts, * all we can do here is wait a bit and try again */ - spin_unlock(&context_lock); + raw_spin_unlock(&context_lock); cpu_relax(); - spin_lock(&context_lock); + raw_spin_lock(&context_lock); /* This will cause the caller to try again */ return MMU_NO_CONTEXT; @@ -194,7 +194,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) unsigned long *map; /* No lockless fast path .. yet */ - spin_lock(&context_lock); + raw_spin_lock(&context_lock); pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", cpu, next, next->context.active, next->context.id); @@ -278,7 +278,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) /* Flick the MMU and release lock */ pr_hardcont(" -> %d\n", id); set_context(id, next->pgd); - spin_unlock(&context_lock); + raw_spin_unlock(&context_lock); } /* @@ -307,7 +307,7 @@ void destroy_context(struct mm_struct *mm) WARN_ON(mm->context.active != 0); - spin_lock_irqsave(&context_lock, flags); + raw_spin_lock_irqsave(&context_lock, flags); id = mm->context.id; if (id != MMU_NO_CONTEXT) { __clear_bit(id, context_map); @@ -318,7 +318,7 @@ void destroy_context(struct mm_struct *mm) context_mm[id] = NULL; nr_free_contexts++; } - spin_unlock_irqrestore(&context_lock, flags); + raw_spin_unlock_irqrestore(&context_lock, flags); } #ifdef CONFIG_SMP diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b037d95eead..64c00227b99 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -451,7 +451,7 @@ static int __cpuinit numa_setup_cpu(unsigned long lcpu) nid = of_node_to_nid_single(cpu); if (nid < 0 || !node_online(nid)) - nid = any_online_node(NODE_MASK_ALL); + nid = first_online_node; out: map_cpu_to_node(lcpu, nid); @@ -1114,7 +1114,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) int nid, found = 0; if (!numa_enabled || (min_common_depth < 0)) - return any_online_node(NODE_MASK_ALL); + return first_online_node; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { @@ -1125,7 +1125,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) } if (nid < 0 || !node_online(nid)) - nid = any_online_node(NODE_MASK_ALL); + nid = first_online_node; if (NODE_DATA(nid)->node_spanned_pages) return nid; diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 282d9306361..1ec06576f61 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); + /* Mask the address for the correct page size */ + addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif - } else + } else { psize = pte_pagesize_index(mm, addr, pte); + /* Mask the address for the standard page size. If we + * have a 64k page kernel, but the hardware does not + * support 64k pages, this might be different from the + * hardware page size encoded in the slice table. */ + addr &= PAGE_MASK; + } - /* Mask the address for the correct page size */ - addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); /* Build full vaddr */ if (!is_kernel_addr(addr)) { diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index f288279e679..8b04c54e596 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -1,5 +1,5 @@ /* - * Low leve TLB miss handlers for Book3E + * Low level TLB miss handlers for Book3E * * Copyright (C) 2008-2009 * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 2fbc680c2c7..e81d5d67f83 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -150,7 +150,7 @@ EXPORT_SYMBOL(local_flush_tlb_page); */ #ifdef CONFIG_SMP -static DEFINE_SPINLOCK(tlbivax_lock); +static DEFINE_RAW_SPINLOCK(tlbivax_lock); static int mm_is_core_local(struct mm_struct *mm) { @@ -232,10 +232,10 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); if (lock) - spin_lock(&tlbivax_lock); + raw_spin_lock(&tlbivax_lock); _tlbivax_bcast(vmaddr, pid, tsize, ind); if (lock) - spin_unlock(&tlbivax_lock); + raw_spin_unlock(&tlbivax_lock); goto bail; } else { struct tlb_flush_param p = { |