diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2007-10-25 15:27:44 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-03 13:56:25 +1100 |
commit | 8d089085a4e6d8a91c954130cb4caebd50788a08 (patch) | |
tree | 8f852ded25d15ae8cd84c8260909db787292e260 /arch/powerpc/kernel/setup-common.c | |
parent | b80fa3cce7390185e43ea22e9b3c38ab138bc580 (diff) |
[POWERPC] Cleanup SMT thread handling
This cleans up the SMT thread handling, removing some hard coded
assumptions and providing a set of helpers to convert between linux
cpu numbers, thread numbers and cores.
This implementation requires the number of threads per core to be a
power of 2 and identical on all cores in the system, but it's an
implementation detail, not an API requirement and so this limitation
can be lifted in the future if anybody ever needs it.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/setup-common.c')
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 78 |
1 files changed, 65 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 2de00f870ed..6adb5a1e98b 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -33,6 +33,7 @@ #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/debugfs.h> +#include <linux/percpu.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/processor.h> @@ -57,6 +58,7 @@ #include <asm/mmu.h> #include <asm/lmb.h> #include <asm/xmon.h> +#include <asm/cputhreads.h> #include "setup.h" @@ -327,6 +329,31 @@ void __init check_for_initrd(void) #ifdef CONFIG_SMP +int threads_per_core, threads_shift; +cpumask_t threads_core_mask; + +static void __init cpu_init_thread_core_maps(int tpc) +{ + int i; + + threads_per_core = tpc; + threads_core_mask = CPU_MASK_NONE; + + /* This implementation only supports power of 2 number of threads + * for simplicity and performance + */ + threads_shift = ilog2(tpc); + BUG_ON(tpc != (1 << threads_shift)); + + for (i = 0; i < tpc; i++) + cpu_set(i, threads_core_mask); + + printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", + tpc, tpc > 1 ? "s" : ""); + printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); +} + + /** * setup_cpu_maps - initialize the following cpu maps: * cpu_possible_map @@ -350,22 +377,32 @@ void __init smp_setup_cpu_maps(void) { struct device_node *dn = NULL; int cpu = 0; + int nthreads = 1; + + DBG("smp_setup_cpu_maps()\n"); while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { const int *intserv; - int j, len = sizeof(u32), nthreads = 1; + int j, len; + + DBG(" * %s...\n", dn->full_name); intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); - if (intserv) + if (intserv) { nthreads = len / sizeof(int); - else { + DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", + nthreads); + } else { + DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); intserv = of_get_property(dn, "reg", NULL); if (!intserv) intserv = &cpu; /* assume logical == phys */ } for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { + DBG(" thread %d -> cpu %d (hard id %d)\n", + j, cpu, intserv[j]); cpu_set(cpu, cpu_present_map); set_hard_smp_processor_id(cpu, intserv[j]); cpu_set(cpu, cpu_possible_map); @@ -373,6 +410,12 @@ void __init smp_setup_cpu_maps(void) } } + /* If no SMT supported, nthreads is forced to 1 */ + if (!cpu_has_feature(CPU_FTR_SMT)) { + DBG(" SMT disabled ! nthreads forced to 1\n"); + nthreads = 1; + } + #ifdef CONFIG_PPC64 /* * On pSeries LPAR, we need to know how many cpus @@ -395,7 +438,7 @@ void __init smp_setup_cpu_maps(void) /* Double maxcpus for processors which have SMT capability */ if (cpu_has_feature(CPU_FTR_SMT)) - maxcpus *= 2; + maxcpus *= nthreads; if (maxcpus > NR_CPUS) { printk(KERN_WARNING @@ -412,9 +455,16 @@ void __init smp_setup_cpu_maps(void) out: of_node_put(dn); } - vdso_data->processorCount = num_present_cpus(); #endif /* CONFIG_PPC64 */ + + /* Initialize CPU <=> thread mapping/ + * + * WARNING: We assume that the number of threads is the same for + * every CPU in the system. If that is not the case, then some code + * here will have to be reworked + */ + cpu_init_thread_core_maps(nthreads); } /* @@ -424,17 +474,19 @@ void __init smp_setup_cpu_maps(void) */ void __init smp_setup_cpu_sibling_map(void) { -#if defined(CONFIG_PPC64) - int cpu; +#ifdef CONFIG_PPC64 + int i, cpu, base; - /* - * Do the sibling map; assume only two threads per processor. - */ for_each_possible_cpu(cpu) { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); - if (cpu_has_feature(CPU_FTR_SMT)) - cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu)); + DBG("Sibling map for CPU %d:", cpu); + base = cpu_first_thread_in_core(cpu); + for (i = 0; i < threads_per_core; i++) { + cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); + DBG(" %d", base + i); + } + DBG("\n"); } + #endif /* CONFIG_PPC64 */ } #endif /* CONFIG_SMP */ |