diff options
-rw-r--r-- | arch/powerpc/include/asm/cputhreads.h | 15 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 19 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 12 |
3 files changed, 31 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index a8e18447c62..f71bb4c118b 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h @@ -61,22 +61,25 @@ static inline cpumask_t cpu_online_cores_map(void) return cpu_thread_mask_to_cores(cpu_online_map); } -static inline int cpu_thread_to_core(int cpu) -{ - return cpu >> threads_shift; -} +#ifdef CONFIG_SMP +int cpu_core_index_of_thread(int cpu); +int cpu_first_thread_of_core(int core); +#else +static inline int cpu_core_index_of_thread(int cpu) { return cpu; } +static inline int cpu_first_thread_of_core(int core) { return core; } +#endif static inline int cpu_thread_in_core(int cpu) { return cpu & (threads_per_core - 1); } -static inline int cpu_first_thread_in_core(int cpu) +static inline int cpu_first_thread_sibling(int cpu) { return cpu & ~(threads_per_core - 1); } -static inline int cpu_last_thread_in_core(int cpu) +static inline int cpu_last_thread_sibling(int cpu) { return cpu | (threads_per_core - 1); } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 68034bbf2e4..98136050917 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -466,7 +466,20 @@ out: return id; } -/* Must be called when no change can occur to cpu_present_mask, +/* Helper routines for cpu to core mapping */ +int cpu_core_index_of_thread(int cpu) +{ + return cpu >> threads_shift; +} +EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); + +int cpu_first_thread_of_core(int core) +{ + return core << threads_shift; +} +EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); + +/* Must be called when no change can occur to cpu_present_map, * i.e. during cpu online or offline. */ static struct device_node *cpu_to_l2cache(int cpu) @@ -514,7 +527,7 @@ int __devinit start_secondary(void *unused) notify_cpu_starting(cpu); set_cpu_online(cpu, true); /* Update sibling maps */ - base = cpu_first_thread_in_core(cpu); + base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i)) continue; @@ -600,7 +613,7 @@ int __cpu_disable(void) return err; /* Update sibling maps */ - base = cpu_first_thread_in_core(cpu); + base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 5ce99848d91..c0aab52da3a 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -111,8 +111,8 @@ static unsigned int steal_context_smp(unsigned int id) * a core map instead but this will do for now. */ for_each_cpu(cpu, mm_cpumask(mm)) { - for (i = cpu_first_thread_in_core(cpu); - i <= cpu_last_thread_in_core(cpu); i++) + for (i = cpu_first_thread_sibling(cpu); + i <= cpu_last_thread_sibling(cpu); i++) __set_bit(id, stale_map[i]); cpu = i - 1; } @@ -264,14 +264,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) */ if (test_bit(id, stale_map[cpu])) { pr_hardcont(" | stale flush %d [%d..%d]", - id, cpu_first_thread_in_core(cpu), - cpu_last_thread_in_core(cpu)); + id, cpu_first_thread_sibling(cpu), + cpu_last_thread_sibling(cpu)); local_flush_tlb_mm(next); /* XXX This clear should ultimately be part of local_flush_tlb_mm */ - for (i = cpu_first_thread_in_core(cpu); - i <= cpu_last_thread_in_core(cpu); i++) { + for (i = cpu_first_thread_sibling(cpu); + i <= cpu_last_thread_sibling(cpu); i++) { __clear_bit(id, stale_map[i]); } } |