diff options
-rw-r--r-- | arch/powerpc/Kconfig | 69 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 5 | ||||
-rw-r--r-- | include/asm-powerpc/kdump.h | 5 | ||||
-rw-r--r-- | include/asm-powerpc/page.h | 45 | ||||
-rw-r--r-- | include/asm-powerpc/page_32.h | 6 |
11 files changed, 135 insertions, 22 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4bb2e9310a5..fdc755a05f7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -656,21 +656,76 @@ config LOWMEM_SIZE hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL default "0x30000000" +config RELOCATABLE + bool "Build a relocatable kernel (EXPERIMENTAL)" + depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE + help + This builds a kernel image that is capable of running at the + location the kernel is loaded at (some alignment restrictions may + exist). + + One use is for the kexec on panic case where the recovery kernel + must live at a different physical address than the primary + kernel. + + Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address + it has been loaded at and the compile time physical addresses + CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START + setting can still be useful to bootwrappers that need to know the + load location of the kernel (eg. u-boot/mkimage). + +config PAGE_OFFSET_BOOL + bool "Set custom page offset address" + depends on ADVANCED_OPTIONS + help + This option allows you to set the kernel virtual address at which + the kernel will map low memory. This can be useful in optimizing + the virtual memory layout of the system. + + Say N here unless you know what you are doing. + +config PAGE_OFFSET + hex "Virtual address of memory base" if PAGE_OFFSET_BOOL + default "0xc0000000" + config KERNEL_START_BOOL bool "Set custom kernel base address" depends on ADVANCED_OPTIONS help This option allows you to set the kernel virtual address at which - the kernel will map low memory (the kernel image will be linked at - this address). This can be useful in optimizing the virtual memory - layout of the system. + the kernel will be loaded. Normally this should match PAGE_OFFSET + however there are times (like kdump) that one might not want them + to be the same. Say N here unless you know what you are doing. config KERNEL_START hex "Virtual address of kernel base" if KERNEL_START_BOOL + default PAGE_OFFSET if PAGE_OFFSET_BOOL + default "0xc2000000" if CRASH_DUMP default "0xc0000000" +config PHYSICAL_START_BOOL + bool "Set physical address where the kernel is loaded" + depends on ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE + help + This gives the physical address where the kernel is loaded. + + Say N here unless you know what you are doing. + +config PHYSICAL_START + hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL + default "0x02000000" if PPC_STD_MMU && CRASH_DUMP + default "0x00000000" + +config PHYSICAL_ALIGN + hex + default "0x10000000" if FSL_BOOKE + help + This value puts the alignment restrictions on physical address + where kernel is loaded and run from. Kernel is compiled for an + address which meets above alignment restriction. + config TASK_SIZE_BOOL bool "Set custom user task size" depends on ADVANCED_OPTIONS @@ -717,9 +772,17 @@ config PIN_TLB endmenu if PPC64 +config PAGE_OFFSET + hex + default "0xc000000000000000" config KERNEL_START hex + default "0xc000000002000000" if CRASH_DUMP default "0xc000000000000000" +config PHYSICAL_START + hex + default "0x02000000" if CRASH_DUMP + default "0x00000000" endif source "net/Kconfig" diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 4ff74414356..e581524d85b 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -371,6 +371,17 @@ skpinv: addi r6,r6,1 /* Increment */ bl early_init +#ifdef CONFIG_RELOCATABLE + lis r3,kernstart_addr@ha + la r3,kernstart_addr@l(r3) +#ifdef CONFIG_PHYS_64BIT + stw r23,0(r3) + stw r25,4(r3) +#else + stw r25,0(r3) +#endif +#endif + mfspr r3,SPRN_TLB1CFG andi. r3,r3,0xfff lis r4,num_tlbcam_entries@ha diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 3bfe7837e82..2aefe2a4129 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -53,6 +53,7 @@ #include <asm/pci-bridge.h> #include <asm/phyp_dump.h> #include <asm/kexec.h> +#include <mm/mmu_decl.h> #ifdef DEBUG #define DBG(fmt...) printk(KERN_ERR fmt) @@ -978,7 +979,10 @@ static int __init early_init_dt_scan_memory(unsigned long node, } #endif lmb_add(base, size); + + memstart_addr = min((u64)memstart_addr, base); } + return 0; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 31ada9fdfc5..153a48dc8f4 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -435,7 +435,7 @@ void __init setup_system(void) printk("htab_address = 0x%p\n", htab_address); printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); #if PHYSICAL_START > 0 - printk("physical_start = 0x%x\n", PHYSICAL_START); + printk("physical_start = 0x%lx\n", PHYSICAL_START); #endif printk("-----------------------------------------------------\n"); diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index ada249bf977..ce10e2b1b90 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -202,7 +202,7 @@ adjust_total_lowmem(void) cam_max_size = max_lowmem_size; /* adjust lowmem size to max_lowmem_size */ - ram = min(max_lowmem_size, total_lowmem); + ram = min(max_lowmem_size, (phys_addr_t)total_lowmem); /* Calculate CAM values */ __cam0 = 1UL << 2 * (__ilog2(ram) / 2); diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 47325f23c51..578750e4ca8 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -59,7 +59,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long total_memory; unsigned long total_lowmem; -phys_addr_t memstart_addr; +phys_addr_t memstart_addr = (phys_addr_t)~0ull; +EXPORT_SYMBOL(memstart_addr); +phys_addr_t kernstart_addr; +EXPORT_SYMBOL(kernstart_addr); phys_addr_t lowmem_end_addr; int boot_mapsize; diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 698bd000f98..c5ac532a016 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -72,7 +72,8 @@ #warning TASK_SIZE is smaller than it needs to be. #endif -phys_addr_t memstart_addr; +phys_addr_t memstart_addr = ~0; +phys_addr_t kernstart_addr; void free_initmem(void) { diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 16def4dcff6..0062e6b1c55 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -216,7 +216,7 @@ void __init do_init_bootmem(void) unsigned long total_pages; int boot_mapsize; - max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; + max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM total_pages = total_lowmem >> PAGE_SHIFT; @@ -232,7 +232,8 @@ void __init do_init_bootmem(void) start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); - boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); + min_low_pfn = MEMORY_START >> PAGE_SHIFT; + boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); /* Add active regions with valid PFNs */ for (i = 0; i < lmb.memory.cnt; i++) { diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h index 10e8eb1e6f4..f6c93c71689 100644 --- a/include/asm-powerpc/kdump.h +++ b/include/asm-powerpc/kdump.h @@ -11,16 +11,11 @@ #ifdef CONFIG_CRASH_DUMP -#define PHYSICAL_START KDUMP_KERNELBASE #define KDUMP_TRAMPOLINE_START 0x0100 #define KDUMP_TRAMPOLINE_END 0x3000 #define KDUMP_MIN_TCE_ENTRIES 2048 -#else /* !CONFIG_CRASH_DUMP */ - -#define PHYSICAL_START 0x0 - #endif /* CONFIG_CRASH_DUMP */ #ifndef __ASSEMBLY__ diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index 6c850609b84..cffdf0eb0df 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h @@ -12,6 +12,7 @@ #include <asm/asm-compat.h> #include <asm/kdump.h> +#include <asm/types.h> /* * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software @@ -42,8 +43,23 @@ * * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET. * - * To get a physical address from a virtual one you subtract PAGE_OFFSET, - * _not_ KERNELBASE. + * PAGE_OFFSET is the virtual address of the start of lowmem. + * + * PHYSICAL_START is the physical address of the start of the kernel. + * + * MEMORY_START is the physical address of the start of lowmem. + * + * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on + * ppc32 and based on how they are set we determine MEMORY_START. + * + * For the linear mapping the following equation should be true: + * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START + * + * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START + * + * There are two was to determine a physical address from a virtual one: + * va = pa + PAGE_OFFSET - MEMORY_START + * va = pa + KERNELBASE - PHYSICAL_START * * If you want to know something's offset from the start of the kernel you * should subtract KERNELBASE. @@ -51,20 +67,33 @@ * If you want to test if something's a kernel address, use is_kernel_addr(). */ -#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START) -#define KERNELBASE (PAGE_OFFSET + PHYSICAL_START) -#define LOAD_OFFSET PAGE_OFFSET +#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START) +#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) +#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) + +#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_FLATMEM) +#ifndef __ASSEMBLY__ +extern phys_addr_t memstart_addr; +extern phys_addr_t kernstart_addr; +#endif +#define PHYSICAL_START kernstart_addr +#define MEMORY_START memstart_addr +#else +#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) +#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) +#endif #ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) ((pfn) < max_mapnr) +#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) -#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) +#define __va(x) ((void *)((unsigned long)(x) - PHYSICAL_START + KERNELBASE)) +#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE) /* * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h index 51f8134b593..ebfae530a37 100644 --- a/include/asm-powerpc/page_32.h +++ b/include/asm-powerpc/page_32.h @@ -1,6 +1,12 @@ #ifndef _ASM_POWERPC_PAGE_32_H #define _ASM_POWERPC_PAGE_32_H +#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0) +#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0 +#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN" +#endif +#endif + #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 #ifdef CONFIG_NOT_COHERENT_CACHE |