summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/paca.c
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2010-01-28 13:23:22 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-03-09 11:52:52 +1100
commit1426d5a3bd07589534286375998c0c8c6fdc5260 (patch)
treefade4f3afd6324b46e27f3c8702935a7edc2fe21 /arch/powerpc/kernel/paca.c
parent59603b9ae426e968d452f9325cdcff308573dee7 (diff)
powerpc: Dynamically allocate pacas
On 64-bit kernels we currently have a 512 byte struct paca_struct for each cpu (usually just called "the paca"). Currently they are statically allocated, which means a kernel built for a large number of cpus will waste a lot of space if it's booted on a machine with few cpus. We can avoid that by only allocating the number of pacas we need at boot. However this is complicated by the fact that we need to access the paca before we know how many cpus there are in the system. The solution is to dynamically allocate enough space for NR_CPUS pacas, but then later in boot when we know how many cpus we have, we free any unused pacas. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/paca.c')
-rw-r--r--arch/powerpc/kernel/paca.c93
1 files changed, 71 insertions, 22 deletions
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index d16b1ea55d4..0c40c6f476f 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -9,11 +9,15 @@
#include <linux/threads.h>
#include <linux/module.h>
+#include <linux/lmb.h>
+#include <asm/firmware.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
+#include <asm/iseries/lpar_map.h>
+#include <asm/iseries/hv_types.h>
/* This symbol is provided by the linker - let it fill in the paca
* field correctly */
@@ -70,37 +74,82 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = {
* processors. The processor VPD array needs one entry per physical
* processor (not thread).
*/
-struct paca_struct paca[NR_CPUS];
+struct paca_struct *paca;
EXPORT_SYMBOL(paca);
-void __init initialise_pacas(void)
-{
- int cpu;
+struct paca_struct boot_paca;
- /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB
- * of the TOC can be addressed using a single machine instruction.
- */
+void __init initialise_paca(struct paca_struct *new_paca, int cpu)
+{
+ /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB
+ * of the TOC can be addressed using a single machine instruction.
+ */
unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL;
- /* Can't use for_each_*_cpu, as they aren't functional yet */
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- struct paca_struct *new_paca = &paca[cpu];
-
#ifdef CONFIG_PPC_BOOK3S
- new_paca->lppaca_ptr = &lppaca[cpu];
+ new_paca->lppaca_ptr = &lppaca[cpu];
#else
- new_paca->kernel_pgd = swapper_pg_dir;
+ new_paca->kernel_pgd = swapper_pg_dir;
#endif
- new_paca->lock_token = 0x8000;
- new_paca->paca_index = cpu;
- new_paca->kernel_toc = kernel_toc;
- new_paca->kernelbase = (unsigned long) _stext;
- new_paca->kernel_msr = MSR_KERNEL;
- new_paca->hw_cpu_id = 0xffff;
- new_paca->__current = &init_task;
+ new_paca->lock_token = 0x8000;
+ new_paca->paca_index = cpu;
+ new_paca->kernel_toc = kernel_toc;
+ new_paca->kernelbase = (unsigned long) _stext;
+ new_paca->kernel_msr = MSR_KERNEL;
+ new_paca->hw_cpu_id = 0xffff;
+ new_paca->__current = &init_task;
#ifdef CONFIG_PPC_STD_MMU_64
- new_paca->slb_shadow_ptr = &slb_shadow[cpu];
+ new_paca->slb_shadow_ptr = &slb_shadow[cpu];
#endif /* CONFIG_PPC_STD_MMU_64 */
+}
+
+static int __initdata paca_size;
+
+void __init allocate_pacas(void)
+{
+ int nr_cpus, cpu, limit;
+
+ /*
+ * We can't take SLB misses on the paca, and we want to access them
+ * in real mode, so allocate them within the RMA and also within
+ * the first segment. On iSeries they must be within the area mapped
+ * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
+ */
+ limit = min(0x10000000ULL, lmb.rmo_size);
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ limit = min(limit, HvPagesToMap * HVPAGESIZE);
+
+ nr_cpus = NR_CPUS;
+ /* On iSeries we know we can never have more than 64 cpus */
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ nr_cpus = min(64, nr_cpus);
+
+ paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
+
+ paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit));
+ memset(paca, 0, paca_size);
+
+ printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
+ paca_size, nr_cpus, paca);
+
+ /* Can't use for_each_*_cpu, as they aren't functional yet */
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ initialise_paca(&paca[cpu], cpu);
+}
+
+void __init free_unused_pacas(void)
+{
+ int new_size;
+
+ new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus());
+
+ if (new_size >= paca_size)
+ return;
+
+ lmb_free(__pa(paca) + new_size, paca_size - new_size);
+
+ printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
+ paca_size - new_size);
- }
+ paca_size = new_size;
}