diff options
Diffstat (limited to 'arch/powerpc/kernel')
44 files changed, 2831 insertions, 756 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 80e9fe2632b..803858e8616 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -12,15 +12,15 @@ endif obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ irq.o align.o signal_32.o pmc.o vdso.o \ - init_task.o process.o systbl.o + init_task.o process.o systbl.o idle.o obj-y += vdso32/ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o cpu_setup_power4.o \ - firmware.o sysfs.o idle_64.o + firmware.o sysfs.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o -obj-$(CONFIG_POWER4) += idle_power4.o +obj-$(CONFIG_PPC_970_NAP) += idle_power4.o obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o procfs-$(CONFIG_PPC64) := proc_ppc64.o obj-$(CONFIG_PROC_FS) += $(procfs-y) @@ -34,6 +34,11 @@ obj-$(CONFIG_IBMEBUS) += ibmebus.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o +obj-$(CONFIG_TAU) += tau_6xx.o +obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o +obj32-$(CONFIG_MODULES) += module_32.o +obj-$(CONFIG_E500) += perfmon_fsl_booke.o ifeq ($(CONFIG_PPC_MERGE),y) @@ -51,7 +56,6 @@ obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_BOOTX_TEXT) += btext.o -obj-$(CONFIG_6xx) += idle_6xx.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o @@ -77,6 +81,7 @@ smpobj-$(CONFIG_SMP) += smp.o endif +obj-$(CONFIG_PPC32) += $(obj32-y) obj-$(CONFIG_PPC64) += $(obj64-y) extra-$(CONFIG_PPC_FPU) += fpu.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 882889b1592..8f85c5e8a55 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -91,6 +91,7 @@ int main(void) #endif /* CONFIG_PPC64 */ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); #ifdef CONFIG_PPC32 @@ -105,8 +106,6 @@ int main(void) DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); - DEFINE(PLATFORM_LPAR, PLATFORM_LPAR); - /* paca */ DEFINE(PACA_SIZE, sizeof(struct paca_struct)); DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S new file mode 100644 index 00000000000..55ed7716636 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -0,0 +1,474 @@ +/* + * This file contains low level CPU setup functions. + * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/cputable.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> +#include <asm/cache.h> + +_GLOBAL(__setup_cpu_603) + b setup_common_caches +_GLOBAL(__setup_cpu_604) + mflr r4 + bl setup_common_caches + bl setup_604_hid0 + mtlr r4 + blr +_GLOBAL(__setup_cpu_750) + mflr r4 + bl __init_fpu_registers + bl setup_common_caches + bl setup_750_7400_hid0 + mtlr r4 + blr +_GLOBAL(__setup_cpu_750cx) + mflr r4 + bl __init_fpu_registers + bl setup_common_caches + bl setup_750_7400_hid0 + bl setup_750cx + mtlr r4 + blr +_GLOBAL(__setup_cpu_750fx) + mflr r4 + bl __init_fpu_registers + bl setup_common_caches + bl setup_750_7400_hid0 + bl setup_750fx + mtlr r4 + blr +_GLOBAL(__setup_cpu_7400) + mflr r4 + bl __init_fpu_registers + bl setup_7400_workarounds + bl setup_common_caches + bl setup_750_7400_hid0 + mtlr r4 + blr +_GLOBAL(__setup_cpu_7410) + mflr r4 + bl __init_fpu_registers + bl setup_7410_workarounds + bl setup_common_caches + bl setup_750_7400_hid0 + li r3,0 + mtspr SPRN_L2CR2,r3 + mtlr r4 + blr +_GLOBAL(__setup_cpu_745x) + mflr r4 + bl setup_common_caches + bl setup_745x_specifics + mtlr r4 + blr + +/* Enable caches for 603's, 604, 750 & 7400 */ +setup_common_caches: + mfspr r11,SPRN_HID0 + andi. r0,r11,HID0_DCE + ori r11,r11,HID0_ICE|HID0_DCE + ori r8,r11,HID0_ICFI + bne 1f /* don't invalidate the D-cache */ + ori r8,r8,HID0_DCI /* unless it wasn't enabled */ +1: sync + mtspr SPRN_HID0,r8 /* enable and invalidate caches */ + sync + mtspr SPRN_HID0,r11 /* enable caches */ + sync + isync + blr + +/* 604, 604e, 604ev, ... + * Enable superscalar execution & branch history table + */ +setup_604_hid0: + mfspr r11,SPRN_HID0 + ori r11,r11,HID0_SIED|HID0_BHTE + ori r8,r11,HID0_BTCD + sync + mtspr SPRN_HID0,r8 /* flush branch target address cache */ + sync /* on 604e/604r */ + mtspr SPRN_HID0,r11 + sync + isync + blr + +/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some + * erratas we work around here. + * Moto MPC710CE.pdf describes them, those are errata + * #3, #4 and #5 + * Note that we assume the firmware didn't choose to + * apply other workarounds (there are other ones documented + * in the .pdf). It appear that Apple firmware only works + * around #3 and with the same fix we use. We may want to + * check if the CPU is using 60x bus mode in which case + * the workaround for errata #4 is useless. Also, we may + * want to explicitely clear HID0_NOPDST as this is not + * needed once we have applied workaround #5 (though it's + * not set by Apple's firmware at least). + */ +setup_7400_workarounds: + mfpvr r3 + rlwinm r3,r3,0,20,31 + cmpwi 0,r3,0x0207 + ble 1f + blr +setup_7410_workarounds: + mfpvr r3 + rlwinm r3,r3,0,20,31 + cmpwi 0,r3,0x0100 + bnelr +1: + mfspr r11,SPRN_MSSSR0 + /* Errata #3: Set L1OPQ_SIZE to 0x10 */ + rlwinm r11,r11,0,9,6 + oris r11,r11,0x0100 + /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */ + oris r11,r11,0x0002 + /* Errata #5: Set DRLT_SIZE to 0x01 */ + rlwinm r11,r11,0,5,2 + oris r11,r11,0x0800 + sync + mtspr SPRN_MSSSR0,r11 + sync + isync + blr + +/* 740/750/7400/7410 + * Enable Store Gathering (SGE), Address Brodcast (ABE), + * Branch History Table (BHTE), Branch Target ICache (BTIC) + * Dynamic Power Management (DPM), Speculative (SPD) + * Clear Instruction cache throttling (ICTC) + */ +setup_750_7400_hid0: + mfspr r11,SPRN_HID0 + ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC + oris r11,r11,HID0_DPM@h +BEGIN_FTR_SECTION + xori r11,r11,HID0_BTIC +END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) +BEGIN_FTR_SECTION + xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ +END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) + li r3,HID0_SPD + andc r11,r11,r3 /* clear SPD: enable speculative */ + li r3,0 + mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ + isync + mtspr SPRN_HID0,r11 + sync + isync + blr + +/* 750cx specific + * Looks like we have to disable NAP feature for some PLL settings... + * (waiting for confirmation) + */ +setup_750cx: + mfspr r10, SPRN_HID1 + rlwinm r10,r10,4,28,31 + cmpwi cr0,r10,7 + cmpwi cr1,r10,9 + cmpwi cr2,r10,11 + cror 4*cr0+eq,4*cr0+eq,4*cr1+eq + cror 4*cr0+eq,4*cr0+eq,4*cr2+eq + bnelr + lwz r6,CPU_SPEC_FEATURES(r5) + li r7,CPU_FTR_CAN_NAP + andc r6,r6,r7 + stw r6,CPU_SPEC_FEATURES(r5) + blr + +/* 750fx specific + */ +setup_750fx: + blr + +/* MPC 745x + * Enable Store Gathering (SGE), Branch Folding (FOLD) + * Branch History Table (BHTE), Branch Target ICache (BTIC) + * Dynamic Power Management (DPM), Speculative (SPD) + * Ensure our data cache instructions really operate. + * Timebase has to be running or we wouldn't have made it here, + * just ensure we don't disable it. + * Clear Instruction cache throttling (ICTC) + * Enable L2 HW prefetch + */ +setup_745x_specifics: + /* We check for the presence of an L3 cache setup by + * the firmware. If any, we disable NAP capability as + * it's known to be bogus on rev 2.1 and earlier + */ + mfspr r11,SPRN_L3CR + andis. r11,r11,L3CR_L3E@h + beq 1f + lwz r6,CPU_SPEC_FEATURES(r5) + andi. r0,r6,CPU_FTR_L3_DISABLE_NAP + beq 1f + li r7,CPU_FTR_CAN_NAP + andc r6,r6,r7 + stw r6,CPU_SPEC_FEATURES(r5) +1: + mfspr r11,SPRN_HID0 + + /* All of the bits we have to set..... + */ + ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE + ori r11,r11,HID0_LRSTK | HID0_BTIC + oris r11,r11,HID0_DPM@h +BEGIN_FTR_SECTION + xori r11,r11,HID0_BTIC +END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) +BEGIN_FTR_SECTION + xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ +END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) + + /* All of the bits we have to clear.... + */ + li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI + andc r11,r11,r3 /* clear SPD: enable speculative */ + li r3,0 + + mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ + isync + mtspr SPRN_HID0,r11 + sync + isync + + /* Enable L2 HW prefetch, if L2 is enabled + */ + mfspr r3,SPRN_L2CR + andis. r3,r3,L2CR_L2E@h + beqlr + mfspr r3,SPRN_MSSCR0 + ori r3,r3,3 + sync + mtspr SPRN_MSSCR0,r3 + sync + isync + blr + +/* + * Initialize the FPU registers. This is needed to work around an errata + * in some 750 cpus where using a not yet initialized FPU register after + * power on reset may hang the CPU + */ +_GLOBAL(__init_fpu_registers) + mfmsr r10 + ori r11,r10,MSR_FP + mtmsr r11 + isync + addis r9,r3,empty_zero_page@ha + addi r9,r9,empty_zero_page@l + REST_32FPRS(0,r9) + sync + mtmsr r10 + isync + blr + + +/* Definitions for the table use to save CPU states */ +#define CS_HID0 0 +#define CS_HID1 4 +#define CS_HID2 8 +#define CS_MSSCR0 12 +#define CS_MSSSR0 16 +#define CS_ICTRL 20 +#define CS_LDSTCR 24 +#define CS_LDSTDB 28 +#define CS_SIZE 32 + + .data + .balign L1_CACHE_BYTES +cpu_state_storage: + .space CS_SIZE + .balign L1_CACHE_BYTES,0 + .text + +/* Called in normal context to backup CPU 0 state. This + * does not include cache settings. This function is also + * called for machine sleep. This does not include the MMU + * setup, BATs, etc... but rather the "special" registers + * like HID0, HID1, MSSCR0, etc... + */ +_GLOBAL(__save_cpu_setup) + /* Some CR fields are volatile, we back it up all */ + mfcr r7 + + /* Get storage ptr */ + lis r5,cpu_state_storage@h + ori r5,r5,cpu_state_storage@l + + /* Save HID0 (common to all CONFIG_6xx cpus) */ + mfspr r3,SPRN_HID0 + stw r3,CS_HID0(r5) + + /* Now deal with CPU type dependent registers */ + mfspr r3,SPRN_PVR + srwi r3,r3,16 + cmplwi cr0,r3,0x8000 /* 7450 */ + cmplwi cr1,r3,0x000c /* 7400 */ + cmplwi cr2,r3,0x800c /* 7410 */ + cmplwi cr3,r3,0x8001 /* 7455 */ + cmplwi cr4,r3,0x8002 /* 7457 */ + cmplwi cr5,r3,0x8003 /* 7447A */ + cmplwi cr6,r3,0x7000 /* 750FX */ + cmplwi cr7,r3,0x8004 /* 7448 */ + /* cr1 is 7400 || 7410 */ + cror 4*cr1+eq,4*cr1+eq,4*cr2+eq + /* cr0 is 74xx */ + cror 4*cr0+eq,4*cr0+eq,4*cr3+eq + cror 4*cr0+eq,4*cr0+eq,4*cr4+eq + cror 4*cr0+eq,4*cr0+eq,4*cr1+eq + cror 4*cr0+eq,4*cr0+eq,4*cr5+eq + cror 4*cr0+eq,4*cr0+eq,4*cr7+eq + bne 1f + /* Backup 74xx specific regs */ + mfspr r4,SPRN_MSSCR0 + stw r4,CS_MSSCR0(r5) + mfspr r4,SPRN_MSSSR0 + stw r4,CS_MSSSR0(r5) + beq cr1,1f + /* Backup 745x specific registers */ + mfspr r4,SPRN_HID1 + stw r4,CS_HID1(r5) + mfspr r4,SPRN_ICTRL + stw r4,CS_ICTRL(r5) + mfspr r4,SPRN_LDSTCR + stw r4,CS_LDSTCR(r5) + mfspr r4,SPRN_LDSTDB + stw r4,CS_LDSTDB(r5) +1: + bne cr6,1f + /* Backup 750FX specific registers */ + mfspr r4,SPRN_HID1 + stw r4,CS_HID1(r5) + /* If rev 2.x, backup HID2 */ + mfspr r3,SPRN_PVR + andi. r3,r3,0xff00 + cmpwi cr0,r3,0x0200 + bne 1f + mfspr r4,SPRN_HID2 + stw r4,CS_HID2(r5) +1: + mtcr r7 + blr + +/* Called with no MMU context (typically MSR:IR/DR off) to + * restore CPU state as backed up by the previous + * function. This does not include cache setting + */ +_GLOBAL(__restore_cpu_setup) + /* Some CR fields are volatile, we back it up all */ + mfcr r7 + + /* Get storage ptr */ + lis r5,(cpu_state_storage-KERNELBASE)@h + ori r5,r5,cpu_state_storage@l + + /* Restore HID0 */ + lwz r3,CS_HID0(r5) + sync + isync + mtspr SPRN_HID0,r3 + sync + isync + + /* Now deal with CPU type dependent registers */ + mfspr r3,SPRN_PVR + srwi r3,r3,16 + cmplwi cr0,r3,0x8000 /* 7450 */ + cmplwi cr1,r3,0x000c /* 7400 */ + cmplwi cr2,r3,0x800c /* 7410 */ + cmplwi cr3,r3,0x8001 /* 7455 */ + cmplwi cr4,r3,0x8002 /* 7457 */ + cmplwi cr5,r3,0x8003 /* 7447A */ + cmplwi cr6,r3,0x7000 /* 750FX */ + cmplwi cr7,r3,0x8004 /* 7448 */ + /* cr1 is 7400 || 7410 */ + cror 4*cr1+eq,4*cr1+eq,4*cr2+eq + /* cr0 is 74xx */ + cror 4*cr0+eq,4*cr0+eq,4*cr3+eq + cror 4*cr0+eq,4*cr0+eq,4*cr4+eq + cror 4*cr0+eq,4*cr0+eq,4*cr1+eq + cror 4*cr0+eq,4*cr0+eq,4*cr5+eq + cror 4*cr0+eq,4*cr0+eq,4*cr7+eq + bne 2f + /* Restore 74xx specific regs */ + lwz r4,CS_MSSCR0(r5) + sync + mtspr SPRN_MSSCR0,r4 + sync + isync + lwz r4,CS_MSSSR0(r5) + sync + mtspr SPRN_MSSSR0,r4 + sync + isync + bne cr2,1f + /* Clear 7410 L2CR2 */ + li r4,0 + mtspr SPRN_L2CR2,r4 +1: beq cr1,2f + /* Restore 745x specific registers */ + lwz r4,CS_HID1(r5) + sync + mtspr SPRN_HID1,r4 + isync + sync + lwz r4,CS_ICTRL(r5) + sync + mtspr SPRN_ICTRL,r4 + isync + sync + lwz r4,CS_LDSTCR(r5) + sync + mtspr SPRN_LDSTCR,r4 + isync + sync + lwz r4,CS_LDSTDB(r5) + sync + mtspr SPRN_LDSTDB,r4 + isync + sync +2: bne cr6,1f + /* Restore 750FX specific registers + * that is restore HID2 on rev 2.x and PLL config & switch + * to PLL 0 on all + */ + /* If rev 2.x, restore HID2 with low voltage bit cleared */ + mfspr r3,SPRN_PVR + andi. r3,r3,0xff00 + cmpwi cr0,r3,0x0200 + bne 4f + lwz r4,CS_HID2(r5) + rlwinm r4,r4,0,19,17 + mtspr SPRN_HID2,r4 + sync +4: + lwz r4,CS_HID1(r5) + rlwinm r5,r4,0,16,14 + mtspr SPRN_HID1,r5 + /* Wait for PLL to stabilize */ + mftbl r5 +3: mftbl r6 + sub r6,r6,r5 + cmplwi cr0,r6,10000 + ble 3b + /* Setup final PLL */ + mtspr SPRN_HID1,r4 +1: + mtcr r7 + blr + diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 211d72653ea..764d0732971 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -61,7 +61,7 @@ static int __init parse_elfcorehdr(char *p) if (p) elfcorehdr_addr = memparse(p, &p); - return 0; + return 1; } __setup("elfcorehdr=", parse_elfcorehdr); #endif @@ -71,7 +71,7 @@ static int __init parse_savemaxmem(char *p) if (p) saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; - return 0; + return 1; } __setup("savemaxmem=", parse_savemaxmem); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 4827ca1ec89..8866fd26c6b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -128,45 +128,48 @@ transfer_to_handler: stw r12,4(r11) #endif b 3f + 2: /* if from kernel, check interrupted DOZE/NAP mode and * check for stack overflow */ + lwz r9,THREAD_INFO-THREAD(r12) + cmplw r1,r9 /* if r1 <= current->thread_info */ + ble- stack_ovf /* then the kernel stack overflowed */ +5: #ifdef CONFIG_6xx - mfspr r11,SPRN_HID0 - mtcr r11 -BEGIN_FTR_SECTION - bt- 8,power_save_6xx_restore /* Check DOZE */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) -BEGIN_FTR_SECTION - bt- 9,power_save_6xx_restore /* Check NAP */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) + tophys(r9,r9) /* check local flags */ + lwz r12,TI_LOCAL_FLAGS(r9) + mtcrf 0x01,r12 + bt- 31-TLF_NAPPING,4f #endif /* CONFIG_6xx */ .globl transfer_to_handler_cont transfer_to_handler_cont: - lwz r11,THREAD_INFO-THREAD(r12) - cmplw r1,r11 /* if r1 <= current->thread_info */ - ble- stack_ovf /* then the kernel stack overflowed */ 3: mflr r9 lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ - FIX_SRR1(r10,r12) mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r10 mtlr r9 SYNC RFI /* jump to handler, enable MMU */ +#ifdef CONFIG_6xx +4: rlwinm r12,r12,0,~_TLF_NAPPING + stw r12,TI_LOCAL_FLAGS(r9) + b power_save_6xx_restore +#endif + /* * On kernel stack overflow, load up an initial stack pointer * and call StackOverflow(regs), which should not return. */ stack_ovf: /* sometimes we use a statically-allocated stack, which is OK. */ - lis r11,_end@h - ori r11,r11,_end@l - cmplw r1,r11 - ble 3b /* r1 <= &_end is OK */ + lis r12,_end@h + ori r12,r12,_end@l + cmplw r1,r12 + ble 5b /* r1 <= &_end is OK */ SAVE_NVGPRS(r11) addi r3,r1,STACK_FRAME_OVERHEAD lis r1,init_thread_union@ha diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 1060155d84c..19ad5c6b181 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -617,6 +617,12 @@ _GLOBAL(enter_rtas) mfsrr1 r10 std r10,_SRR1(r1) + /* Temporary workaround to clear CR until RTAS can be modified to + * ignore all bits. + */ + li r0,0 + mtcr r0 + /* There is no way it is acceptable to get here with interrupts enabled, * check it with the asm equivalent of WARN_ON */ diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c index 4d37a3cb80f..0bfe9061720 100644 --- a/arch/powerpc/kernel/firmware.c +++ b/arch/powerpc/kernel/firmware.c @@ -14,7 +14,9 @@ */ #include <linux/config.h> +#include <linux/module.h> #include <asm/firmware.h> -unsigned long ppc64_firmware_features; +unsigned long powerpc_firmware_features; +EXPORT_SYMBOL_GPL(powerpc_firmware_features); diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 35084f3a841..b7d140430a4 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -376,11 +376,28 @@ label##_common: \ bl hdlr; \ b .ret_from_except +/* + * Like STD_EXCEPTION_COMMON, but for exceptions that can occur + * in the idle task and therefore need the special idle handling. + */ +#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + FINISH_NAP; \ + DISABLE_INTS; \ + bl .save_nvgprs; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b .ret_from_except + #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ .align 7; \ .globl label##_common; \ label##_common: \ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + FINISH_NAP; \ DISABLE_INTS; \ bl .ppc64_runlatch_on; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ @@ -388,6 +405,25 @@ label##_common: \ b .ret_from_except_lite /* + * When the idle code in power4_idle puts the CPU into NAP mode, + * it has to do so in a loop, and relies on the external interrupt + * and decrementer interrupt entry code to get it out of the loop. + * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags + * to signal that it is in the loop and needs help to get out. + */ +#ifdef CONFIG_PPC_970_NAP +#define FINISH_NAP \ +BEGIN_FTR_SECTION \ + clrrdi r11,r1,THREAD_SHIFT; \ + ld r9,TI_LOCAL_FLAGS(r11); \ + andi. r10,r9,_TLF_NAPPING; \ + bnel power4_fixup_nap; \ +END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) +#else +#define FINISH_NAP +#endif + +/* * Start of pSeries system interrupt routines */ . = 0x100 @@ -772,6 +808,7 @@ hardware_interrupt_iSeries_masked: .globl machine_check_common machine_check_common: EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) + FINISH_NAP DISABLE_INTS bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD @@ -783,7 +820,7 @@ machine_check_common: STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) + STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) #ifdef CONFIG_ALTIVEC STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) @@ -1034,6 +1071,7 @@ unrecov_slb: .globl hardware_interrupt_entry hardware_interrupt_common: EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) + FINISH_NAP hardware_interrupt_entry: DISABLE_INTS bl .ppc64_runlatch_on @@ -1041,6 +1079,15 @@ hardware_interrupt_entry: bl .do_IRQ b .ret_from_except_lite +#ifdef CONFIG_PPC_970_NAP +power4_fixup_nap: + andc r9,r9,r10 + std r9,TI_LOCAL_FLAGS(r11) + ld r10,_LINK(r1) /* make idle task do the */ + std r10,_NIP(r1) /* equivalent of a blr */ + blr +#endif + .align 7 .globl alignment_common alignment_common: @@ -1544,7 +1591,11 @@ _STATIC(__boot_from_prom) mr r28,r6 mr r27,r7 - /* Align the stack to 16-byte boundary for broken yaboot */ + /* + * Align the stack to 16-byte boundary + * Depending on the size and layout of the ELF sections in the initial + * boot binary, the stack pointer will be unalignet on PowerMac + */ rldicr r1,r1,0,59 /* Make sure we are running in 64 bits mode */ @@ -1847,21 +1898,6 @@ _STATIC(start_here_multiplatform) bl .__save_cpu_setup sync - /* Setup a valid physical PACA pointer in SPRG3 for early_setup - * note that boot_cpuid can always be 0 nowadays since there is - * nowhere it can be initialized differently before we reach this - * code - */ - LOAD_REG_IMMEDIATE(r27, boot_cpuid) - add r27,r27,r26 - lwz r27,0(r27) - - LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */ - mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ - add r13,r13,r24 /* for this processor. */ - add r13,r13,r26 /* convert to physical addr */ - mtspr SPRN_SPRG3,r13 - /* Do very early kernel initializations, including initial hash table, * stab and slb setup before we turn on relocation. */ @@ -1930,6 +1966,17 @@ _STATIC(start_here_common) /* Not reached */ BUG_OPCODE +/* Put the paca pointer into r13 and SPRG3 */ +_GLOBAL(setup_boot_paca) + LOAD_REG_IMMEDIATE(r3, boot_cpuid) + lwz r3,0(r3) + LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ + mulli r3,r3,PACA_SIZE /* Calculate vaddr of right paca */ + add r13,r3,r4 /* for this processor. */ + mtspr SPRN_SPRG3,r13 + + blr + /* * We put a few things here that have to be page-aligned. * This stuff goes at the beginning of the bss, which is page-aligned. diff --git a/arch/powerpc/kernel/idle_64.c b/arch/powerpc/kernel/idle.c index b879d3057ef..d491052c8e0 100644 --- a/arch/powerpc/kernel/idle_64.c +++ b/arch/powerpc/kernel/idle.c @@ -2,13 +2,17 @@ * Idle daemon for PowerPC. Idle daemon will handle any action * that needs to be taken when the system becomes idle. * - * Originally Written by Cort Dougan (cort@cs.nmt.edu) + * Originally written by Cort Dougan (cort@cs.nmt.edu). + * Subsequent 32-bit hacking by Tom Rini, Armin Kuster, + * Paul Mackerras and others. * * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com> * * Additional shared processor, SMT, and firmware support * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com> * + * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org> + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version @@ -29,18 +33,43 @@ #include <asm/machdep.h> #include <asm/smp.h> -extern void power4_idle(void); +#ifdef CONFIG_HOTPLUG_CPU +#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \ + system_state == SYSTEM_RUNNING) +#else +#define cpu_should_die() 0 +#endif -void default_idle(void) +/* + * The body of the idle task. + */ +void cpu_idle(void) { - unsigned int cpu = smp_processor_id(); - set_thread_flag(TIF_POLLING_NRFLAG); + if (ppc_md.idle_loop) + ppc_md.idle_loop(); /* doesn't return */ + set_thread_flag(TIF_POLLING_NRFLAG); while (1) { - if (!need_resched()) { - while (!need_resched() && !cpu_is_offline(cpu)) { - ppc64_runlatch_off(); + while (!need_resched() && !cpu_should_die()) { + ppc64_runlatch_off(); + if (ppc_md.power_save) { + clear_thread_flag(TIF_POLLING_NRFLAG); + /* + * smp_mb is so clearing of TIF_POLLING_NRFLAG + * is ordered w.r.t. need_resched() test. + */ + smp_mb(); + local_irq_disable(); + + /* check again after disabling irqs */ + if (!need_resched() && !cpu_should_die()) + ppc_md.power_save(); + + local_irq_enable(); + set_thread_flag(TIF_POLLING_NRFLAG); + + } else { /* * Go into low thread priority and possibly * low power mode. @@ -48,46 +77,18 @@ void default_idle(void) HMT_low(); HMT_very_low(); } - - HMT_medium(); } + HMT_medium(); ppc64_runlatch_on(); + if (cpu_should_die()) + cpu_die(); preempt_enable_no_resched(); schedule(); preempt_disable(); - if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) - cpu_die(); } } -void native_idle(void) -{ - while (1) { - ppc64_runlatch_off(); - - if (!need_resched()) - power4_idle(); - - if (need_resched()) { - ppc64_runlatch_on(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } - - if (cpu_is_offline(smp_processor_id()) && - system_state == SYSTEM_RUNNING) - cpu_die(); - } -} - -void cpu_idle(void) -{ - BUG_ON(NULL == ppc_md.idle_loop); - ppc_md.idle_loop(); -} - int powersave_nap; #ifdef CONFIG_SYSCTL diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 444fdcc769f..b45fa0e3721 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -22,8 +22,6 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> -#undef DEBUG - .text /* @@ -87,19 +85,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) cmpwi 0,r3,0 beqlr - /* Clear MSR:EE */ - mfmsr r7 - rlwinm r0,r7,0,17,15 - mtmsr r0 - - /* Check current_thread_info()->flags */ - rlwinm r4,r1,0,0,18 - lwz r4,TI_FLAGS(r4) - andi. r0,r4,_TIF_NEED_RESCHED - beq 1f - mtmsr r7 /* out of line this ? */ - blr -1: /* Some pre-nap cleanups needed on some CPUs */ andis. r0,r3,HID0_NAP@h beq 2f @@ -122,12 +107,6 @@ BEGIN_FTR_SECTION dcbf 0,r4 dcbf 0,r4 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) -#ifdef DEBUG - lis r6,nap_enter_count@ha - lwz r4,nap_enter_count@l(r6) - addi r4,r4,1 - stw r4,nap_enter_count@l(r6) -#endif 2: BEGIN_FTR_SECTION /* Go to low speed mode on some 750FX */ @@ -157,47 +136,42 @@ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE /* Could be ommited (already set) */ + rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */ + lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ + ori r8,r8,_TLF_NAPPING /* so when we take an exception */ + stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ + mfmsr r7 + ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h - sync - isync +1: sync mtmsr r7 isync - sync - blr - + b 1b + /* * Return from NAP/DOZE mode, restore some CPU specific registers, * we are called with DR/IR still off and r2 containing physical - * address of current. + * address of current. R11 points to the exception frame (physical + * address). We have to preserve r10. */ _GLOBAL(power_save_6xx_restore) - mfspr r11,SPRN_HID0 - rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ - cror 4*cr1+eq,4*cr0+eq,4*cr0+eq -BEGIN_FTR_SECTION - rlwinm r11,r11,0,9,7 /* Clear DOZE */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) - mtspr SPRN_HID0, r11 + lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */ + stw r9,_NIP(r11) /* make it do a blr */ -#ifdef DEBUG - beq cr1,1f - lis r11,(nap_return_count-KERNELBASE)@ha - lwz r9,nap_return_count@l(r11) - addi r9,r9,1 - stw r9,nap_return_count@l(r11) -1: -#endif - - rlwinm r9,r1,0,0,18 - tophys(r9,r9) - lwz r11,TI_CPU(r9) +#ifdef CONFIG_SMP + mfspr r12,SPRN_SPRG3 + lwz r11,TI_CPU(r12) /* get cpu number * 4 */ slwi r11,r11,2 +#else + li r11,0 +#endif /* Todo make sure all these are in the same page - * and load r22 (@ha part + CPU offset) only once + * and load r11 (@ha part + CPU offset) only once */ BEGIN_FTR_SECTION - beq cr1,1f + mfspr r9,SPRN_HID0 + andis. r9,r9,HID0_NAP@h + beq 1f addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha lwz r9,nap_save_msscr0@l(r9) mtspr SPRN_MSSCR0, r9 @@ -220,14 +194,5 @@ _GLOBAL(nap_save_msscr0) _GLOBAL(nap_save_hid1) .space 4*NR_CPUS -_GLOBAL(powersave_nap) - .long 0 _GLOBAL(powersave_lowspeed) .long 0 - -#ifdef DEBUG -_GLOBAL(nap_enter_count) - .space 4 -_GLOBAL(nap_return_count) - .space 4 -#endif diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index c16b4afab58..d85c7c938ee 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -1,11 +1,5 @@ /* - * This file contains the power_save function for 6xx & 7xxx CPUs - * rewritten in assembler - * - * Warning ! This code assumes that if your machine has a 750fx - * it will have PLL 1 set to low speed mode (used during NAP/DOZE). - * if this is not the case some additional changes will have to - * be done to check a runtime var (a bit like powersave-nap) + * This file contains the power_save function for 970-family CPUs. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -26,53 +20,31 @@ .text -/* - * Here is the power_save_6xx function. This could eventually be - * split into several functions & changing the function pointer - * depending on the various features. - */ _GLOBAL(power4_idle) BEGIN_FTR_SECTION blr END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) - /* We must dynamically check for the NAP feature as it - * can be cleared by CPU init after the fixups are done - */ - LOAD_REG_ADDRBASE(r3,cur_cpu_spec) - ld r4,ADDROFF(cur_cpu_spec)(r3) - ld r4,CPU_SPEC_FEATURES(r4) - andi. r0,r4,CPU_FTR_CAN_NAP - beqlr /* Now check if user or arch enabled NAP mode */ LOAD_REG_ADDRBASE(r3,powersave_nap) lwz r4,ADDROFF(powersave_nap)(r3) cmpwi 0,r4,0 beqlr - /* Clear MSR:EE */ - mfmsr r7 - li r4,0 - ori r4,r4,MSR_EE - andc r0,r7,r4 - mtmsrd r0 - - /* Check current_thread_info()->flags */ - clrrdi r4,r1,THREAD_SHIFT - ld r4,TI_FLAGS(r4) - andi. r0,r4,_TIF_NEED_RESCHED - beq 1f - mtmsrd r7 /* out of line this ? */ - blr -1: /* Go to NAP now */ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + clrrdi r9,r1,THREAD_SHIFT /* current thread_info */ + ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ + ori r8,r8,_TLF_NAPPING /* so when we take an exception */ + std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ + mfmsr r7 + ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h - sync +1: sync isync mtmsrd r7 isync - sync - blr + b 1b + diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 771a59cbd21..57d560c6889 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -272,18 +272,26 @@ unsigned int virt_irq_to_real_map[NR_IRQS]; * Don't use virtual irqs 0, 1, 2 for devices. * The pcnet32 driver considers interrupt numbers < 2 to be invalid, * and 2 is the XICS IPI interrupt. - * We limit virtual irqs to 17 less than NR_IRQS so that when we - * offset them by 16 (to reserve the first 16 for ISA interrupts) - * we don't end up with an interrupt number >= NR_IRQS. + * We limit virtual irqs to __irq_offet_value less than virt_irq_max so + * that when we offset them we don't end up with an interrupt + * number >= virt_irq_max. */ #define MIN_VIRT_IRQ 3 -#define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) -#define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) + +unsigned int virt_irq_max; +static unsigned int max_virt_irq; +static unsigned int nr_virt_irqs; void virt_irq_init(void) { int i; + + if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1))) + virt_irq_max = NR_IRQS - 1; + max_virt_irq = virt_irq_max - __irq_offset_value; + nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1; + for (i = 0; i < NR_IRQS; i++) virt_irq_to_real_map[i] = UNDEFINED_IRQ; } @@ -308,17 +316,17 @@ int virt_irq_create_mapping(unsigned int real_irq) return real_irq; } - /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ + /* map to a number between MIN_VIRT_IRQ and max_virt_irq */ virq = real_irq; - if (virq > MAX_VIRT_IRQ) - virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; + if (virq > max_virt_irq) + virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; /* search for this number or a free slot */ first_virq = virq; while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { if (virt_irq_to_real_map[virq] == real_irq) return virq; - if (++virq > MAX_VIRT_IRQ) + if (++virq > max_virt_irq) virq = MIN_VIRT_IRQ; if (virq == first_virq) goto nospace; /* oops, no free slots */ @@ -330,8 +338,8 @@ int virt_irq_create_mapping(unsigned int real_irq) nospace: if (!warned) { printk(KERN_CRIT "Interrupt table is full\n"); - printk(KERN_CRIT "Increase NR_IRQS (currently %d) " - "in your kernel sources and rebuild.\n", NR_IRQS); + printk(KERN_CRIT "Increase virt_irq_max (currently %d) " + "in your kernel sources and rebuild.\n", virt_irq_max); warned = 1; } return NO_IRQ; @@ -349,8 +357,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) virq = real_irq; - if (virq > MAX_VIRT_IRQ) - virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; + if (virq > max_virt_irq) + virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; first_virq = virq; @@ -360,7 +368,7 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) virq++; - if (virq >= MAX_VIRT_IRQ) + if (virq >= max_virt_irq) virq = 0; } while (first_virq != virq); @@ -379,7 +387,7 @@ void irq_ctx_init(void) struct thread_info *tp; int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { memset((void *)softirq_ctx[i], 0, THREAD_SIZE); tp = softirq_ctx[i]; tp->cpu = i; diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S new file mode 100644 index 00000000000..d7f4e982b53 --- /dev/null +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -0,0 +1,471 @@ +/* + L2CR functions + Copyright © 1997-1998 by PowerLogix R & D, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ +/* + Thur, Dec. 12, 1998. + - First public release, contributed by PowerLogix. + *********** + Sat, Aug. 7, 1999. + - Terry: Made sure code disabled interrupts before running. (Previously + it was assumed interrupts were already disabled). + - Terry: Updated for tentative G4 support. 4MB of memory is now flushed + instead of 2MB. (Prob. only 3 is necessary). + - Terry: Updated for workaround to HID0[DPM] processor bug + during global invalidates. + *********** + Thu, July 13, 2000. + - Terry: Added isync to correct for an errata. + + 22 August 2001. + - DanM: Finally added the 7450 patch I've had for the past + several months. The L2CR is similar, but I'm going + to assume the user of this functions knows what they + are doing. + + Author: Terry Greeniaus (tgree@phys.ualberta.ca) + Please e-mail updates to this file to me, thanks! +*/ +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/cputable.h> +#include <asm/ppc_asm.h> +#include <asm/cache.h> +#include <asm/page.h> + +/* Usage: + + When setting the L2CR register, you must do a few special + things. If you are enabling the cache, you must perform a + global invalidate. If you are disabling the cache, you must + flush the cache contents first. This routine takes care of + doing these things. When first enabling the cache, make sure + you pass in the L2CR you want, as well as passing in the + global invalidate bit set. A global invalidate will only be + performed if the L2I bit is set in applyThis. When enabling + the cache, you should also set the L2E bit in applyThis. If + you want to modify the L2CR contents after the cache has been + enabled, the recommended procedure is to first call + __setL2CR(0) to disable the cache and then call it again with + the new values for L2CR. Examples: + + _setL2CR(0) - disables the cache + _setL2CR(0xB3A04000) - enables my G3 upgrade card: + - L2E set to turn on the cache + - L2SIZ set to 1MB + - L2CLK set to 1:1 + - L2RAM set to pipelined synchronous late-write + - L2I set to perform a global invalidation + - L2OH set to 0.5 nS + - L2DF set because this upgrade card + requires it + + A similar call should work for your card. You need to know + the correct setting for your card and then place them in the + fields I have outlined above. Other fields support optional + features, such as L2DO which caches only data, or L2TS which + causes cache pushes from the L1 cache to go to the L2 cache + instead of to main memory. + +IMPORTANT: + Starting with the 7450, the bits in this register have moved + or behave differently. The Enable, Parity Enable, Size, + and L2 Invalidate are the only bits that have not moved. + The size is read-only for these processors with internal L2 + cache, and the invalidate is a control as well as status. + -- Dan + +*/ +/* + * Summary: this procedure ignores the L2I bit in the value passed in, + * flushes the cache if it was already enabled, always invalidates the + * cache, then enables the cache if the L2E bit is set in the value + * passed in. + * -- paulus. + */ +_GLOBAL(_set_L2CR) + /* Make sure this is a 750 or 7400 chip */ +BEGIN_FTR_SECTION + li r3,-1 + blr +END_FTR_SECTION_IFCLR(CPU_FTR_L2CR) + + mflr r9 + + /* Stop DST streams */ +BEGIN_FTR_SECTION + DSSALL + sync +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + + /* Turn off interrupts and data relocation. */ + mfmsr r7 /* Save MSR in r7 */ + rlwinm r4,r7,0,17,15 + rlwinm r4,r4,0,28,26 /* Turn off DR bit */ + sync + mtmsr r4 + isync + + /* Before we perform the global invalidation, we must disable dynamic + * power management via HID0[DPM] to work around a processor bug where + * DPM can possibly interfere with the state machine in the processor + * that invalidates the L2 cache tags. + */ + mfspr r8,SPRN_HID0 /* Save HID0 in r8 */ + rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */ + sync + mtspr SPRN_HID0,r4 /* Disable DPM */ + sync + + /* Get the current enable bit of the L2CR into r4 */ + mfspr r4,SPRN_L2CR + + /* Tweak some bits */ + rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ + rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */ + rlwinm r3,r3,0,1,31 /* Turn off the enable bit */ + + /* Check to see if we need to flush */ + rlwinm. r4,r4,0,0,0 + beq 2f + + /* Flush the cache. First, read the first 4MB of memory (physical) to + * put new data in the cache. (Actually we only need + * the size of the L2 cache plus the size of the L1 cache, but 4MB will + * cover everything just to be safe). + */ + + /**** Might be a good idea to set L2DO here - to prevent instructions + from getting into the cache. But since we invalidate + the next time we enable the cache it doesn't really matter. + Don't do this unless you accomodate all processor variations. + The bit moved on the 7450..... + ****/ + +BEGIN_FTR_SECTION + /* Disable L2 prefetch on some 745x and try to ensure + * L2 prefetch engines are idle. As explained by errata + * text, we can't be sure they are, we just hope very hard + * that well be enough (sic !). At least I noticed Apple + * doesn't even bother doing the dcbf's here... + */ + mfspr r4,SPRN_MSSCR0 + rlwinm r4,r4,0,0,29 + sync + mtspr SPRN_MSSCR0,r4 + sync + isync + lis r4,KERNELBASE@h + dcbf 0,r4 + dcbf 0,r4 + dcbf 0,r4 + dcbf 0,r4 +END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) + + /* TODO: use HW flush assist when available */ + + lis r4,0x0002 + mtctr r4 + li r4,0 +1: + lwzx r0,r0,r4 + addi r4,r4,32 /* Go to start of next cache line */ + bdnz 1b + isync + + /* Now, flush the first 4MB of memory */ + lis r4,0x0002 + mtctr r4 + li r4,0 + sync +1: + dcbf 0,r4 + addi r4,r4,32 /* Go to start of next cache line */ + bdnz 1b + +2: + /* Set up the L2CR configuration bits (and switch L2 off) */ + /* CPU errata: Make sure the mtspr below is already in the + * L1 icache + */ + b 20f + .balign L1_CACHE_BYTES +22: + sync + mtspr SPRN_L2CR,r3 + sync + b 23f +20: + b 21f +21: sync + isync + b 22b + +23: + /* Perform a global invalidation */ + oris r3,r3,0x0020 + sync + mtspr SPRN_L2CR,r3 + sync + isync /* For errata */ + +BEGIN_FTR_SECTION + /* On the 7450, we wait for the L2I bit to clear...... + */ +10: mfspr r3,SPRN_L2CR + andis. r4,r3,0x0020 + bne 10b + b 11f +END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) + + /* Wait for the invalidation to complete */ +3: mfspr r3,SPRN_L2CR + rlwinm. r4,r3,0,31,31 + bne 3b + +11: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */ + sync + mtspr SPRN_L2CR,r3 + sync + + /* See if we need to enable the cache */ + cmplwi r5,0 + beq 4f + + /* Enable the cache */ + oris r3,r3,0x8000 + mtspr SPRN_L2CR,r3 + sync + + /* Enable L2 HW prefetch on 744x/745x */ +BEGIN_FTR_SECTION + mfspr r3,SPRN_MSSCR0 + ori r3,r3,3 + sync + mtspr SPRN_MSSCR0,r3 + sync + isync +END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) +4: + + /* Restore HID0[DPM] to whatever it was before */ + sync + mtspr 1008,r8 + sync + + /* Restore MSR (restores EE and DR bits to original state) */ + SYNC + mtmsr r7 + isync + + mtlr r9 + blr + +_GLOBAL(_get_L2CR) + /* Return the L2CR contents */ + li r3,0 +BEGIN_FTR_SECTION + mfspr r3,SPRN_L2CR +END_FTR_SECTION_IFSET(CPU_FTR_L2CR) + blr + + +/* + * Here is a similar routine for dealing with the L3 cache + * on the 745x family of chips + */ + +_GLOBAL(_set_L3CR) + /* Make sure this is a 745x chip */ +BEGIN_FTR_SECTION + li r3,-1 + blr +END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) + + /* Turn off interrupts and data relocation. */ + mfmsr r7 /* Save MSR in r7 */ + rlwinm r4,r7,0,17,15 + rlwinm r4,r4,0,28,26 /* Turn off DR bit */ + sync + mtmsr r4 + isync + + /* Stop DST streams */ + DSSALL + sync + + /* Get the current enable bit of the L3CR into r4 */ + mfspr r4,SPRN_L3CR + + /* Tweak some bits */ + rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ + rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */ + rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */ + rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ + /* Check to see if we need to flush */ + rlwinm. r4,r4,0,0,0 + beq 2f + + /* Flush the cache. + */ + + /* TODO: use HW flush assist */ + + lis r4,0x0008 + mtctr r4 + li r4,0 +1: + lwzx r0,r0,r4 + dcbf 0,r4 + addi r4,r4,32 /* Go to start of next cache line */ + bdnz 1b + +2: + /* Set up the L3CR configuration bits (and switch L3 off) */ + sync + mtspr SPRN_L3CR,r3 + sync + + oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */ + mtspr SPRN_L3CR,r3 + sync + oris r3,r3,L3CR_L3CLKEN@h /* Set clken */ + mtspr SPRN_L3CR,r3 + sync + + /* Wait for stabilize */ + li r0,256 + mtctr r0 +1: bdnz 1b + + /* Perform a global invalidation */ + ori r3,r3,0x0400 + sync + mtspr SPRN_L3CR,r3 + sync + isync + + /* We wait for the L3I bit to clear...... */ +10: mfspr r3,SPRN_L3CR + andi. r4,r3,0x0400 + bne 10b + + /* Clear CLKEN */ + rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ + mtspr SPRN_L3CR,r3 + sync + + /* Wait for stabilize */ + li r0,256 + mtctr r0 +1: bdnz 1b + + /* See if we need to enable the cache */ + cmplwi r5,0 + beq 4f + + /* Enable the cache */ + oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h + mtspr SPRN_L3CR,r3 + sync + + /* Wait for stabilize */ + li r0,256 + mtctr r0 +1: bdnz 1b + + /* Restore MSR (restores EE and DR bits to original state) */ +4: SYNC + mtmsr r7 + isync + blr + +_GLOBAL(_get_L3CR) + /* Return the L3CR contents */ + li r3,0 +BEGIN_FTR_SECTION + mfspr r3,SPRN_L3CR +END_FTR_SECTION_IFSET(CPU_FTR_L3CR) + blr + +/* --- End of PowerLogix code --- + */ + + +/* flush_disable_L1() - Flush and disable L1 cache + * + * clobbers r0, r3, ctr, cr0 + * Must be called with interrupts disabled and MMU enabled. + */ +_GLOBAL(__flush_disable_L1) + /* Stop pending alitvec streams and memory accesses */ +BEGIN_FTR_SECTION + DSSALL +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + sync + + /* Load counter to 0x4000 cache lines (512k) and + * load cache with datas + */ + li r3,0x4000 /* 512kB / 32B */ + mtctr r3 + lis r3,KERNELBASE@h +1: + lwz r0,0(r3) + addi r3,r3,0x0020 /* Go to start of next cache line */ + bdnz 1b + isync + sync + + /* Now flush those cache lines */ + li r3,0x4000 /* 512kB / 32B */ + mtctr r3 + lis r3,KERNELBASE@h +1: + dcbf 0,r3 + addi r3,r3,0x0020 /* Go to start of next cache line */ + bdnz 1b + sync + + /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */ + mfspr r3,SPRN_HID0 + rlwinm r3,r3,0,18,15 + mtspr SPRN_HID0,r3 + sync + isync + blr + +/* inval_enable_L1 - Invalidate and enable L1 cache + * + * Assumes L1 is already disabled and MSR:EE is off + * + * clobbers r3 + */ +_GLOBAL(__inval_enable_L1) + /* Enable and then Flash inval the instruction & data cache */ + mfspr r3,SPRN_HID0 + ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI + sync + isync + mtspr SPRN_HID0,r3 + xori r3,r3, HID0_ICFI|HID0_DCI + mtspr SPRN_HID0,r3 + sync + + blr + + diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c7a799a0951..6e67b5b49ba 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -37,7 +37,7 @@ static int legacy_serial_console = -1; static int __init add_legacy_port(struct device_node *np, int want_index, int iotype, phys_addr_t base, phys_addr_t taddr, unsigned long irq, - unsigned int flags) + upf_t flags) { u32 *clk, *spd, clock = BASE_BAUD * 16; int index; @@ -113,7 +113,7 @@ static int __init add_legacy_soc_port(struct device_node *np, { phys_addr_t addr; u32 *addrp; - unsigned int flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; + upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; /* We only support ports that have a clock frequency properly * encoded in the device-tree. @@ -236,6 +236,23 @@ static int __init add_legacy_pci_port(struct device_node *np, } #endif +static void __init setup_legacy_serial_console(int console) +{ + struct legacy_serial_info *info = + &legacy_serial_infos[console]; + void __iomem *addr; + + if (info->taddr == 0) + return; + addr = ioremap(info->taddr, 0x1000); + if (addr == NULL) + return; + if (info->speed == 0) + info->speed = udbg_probe_uart_speed(addr, info->clock); + DBG("default console speed = %d\n", info->speed); + udbg_init_uart(addr, info->speed, info->clock); +} + /* * This is called very early, as part of setup_system() or eventually * setup_arch(), basically before anything else in this file. This function @@ -318,25 +335,8 @@ void __init find_legacy_serial_ports(void) #endif DBG("legacy_serial_console = %d\n", legacy_serial_console); - - /* udbg is 64 bits only for now, that will change soon though ... */ - while (legacy_serial_console >= 0) { - struct legacy_serial_info *info = - &legacy_serial_infos[legacy_serial_console]; - void __iomem *addr; - - if (info->taddr == 0) - break; - addr = ioremap(info->taddr, 0x1000); - if (addr == NULL) - break; - if (info->speed == 0) - info->speed = udbg_probe_uart_speed(addr, info->clock); - DBG("default console speed = %d\n", info->speed); - udbg_init_uart(addr, info->speed, info->clock); - break; - } - + if (legacy_serial_console >= 0) + setup_legacy_serial_console(legacy_serial_console); DBG(" <- find_legacy_serial_port()\n"); } diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index e789fef4eb8..2cbde865d4f 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -37,7 +37,7 @@ #include <asm/prom.h> #include <asm/vdso_datapage.h> -#define MODULE_VERS "1.6" +#define MODULE_VERS "1.7" #define MODULE_NAME "lparcfg" /* #define LPARCFG_DEBUG */ @@ -56,7 +56,7 @@ static unsigned long get_purr(void) unsigned long sum_purr = 0; int cpu; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { sum_purr += lppaca[cpu].emulated_time_base; #ifdef PURR_DEBUG @@ -149,17 +149,17 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag) if (rc == 0) /* success, return */ return; /* check for null tag ? */ - if (rc == H_Hardware) + if (rc == H_HARDWARE) printk(KERN_INFO "plpar-hcall (%s) failed with hardware fault\n", tag); - else if (rc == H_Function) + else if (rc == H_FUNCTION) printk(KERN_INFO "plpar-hcall (%s) failed; function not allowed\n", tag); - else if (rc == H_Authority) + else if (rc == H_AUTHORITY) printk(KERN_INFO - "plpar-hcall (%s) failed; not authorized to this function\n", - tag); - else if (rc == H_Parameter) + "plpar-hcall (%s) failed; not authorized to this" + " function\n", tag); + else if (rc == H_PARAMETER) printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n", tag); else @@ -209,7 +209,7 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) unsigned long dummy; rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy); - if (rc != H_Authority) + if (rc != H_AUTHORITY) log_plpar_hcall_return(rc, "H_PIC"); } @@ -222,7 +222,7 @@ static unsigned long get_purr(void) int cpu; struct cpu_usage *cu; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { cu = &per_cpu(cpu_usage_array, cpu); sum_purr += cu->current_tb; } @@ -242,7 +242,7 @@ static void parse_system_parameter_string(struct seq_file *m) { int call_status; - char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); + unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); if (!local_buffer) { printk(KERN_ERR "%s %s kmalloc failure at line %d \n", __FILE__, __FUNCTION__, __LINE__); @@ -254,7 +254,8 @@ static void parse_system_parameter_string(struct seq_file *m) call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, NULL, SPLPAR_CHARACTERISTICS_TOKEN, - __pa(rtas_data_buf)); + __pa(rtas_data_buf), + RTAS_DATA_BUF_SIZE); memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); spin_unlock(&rtas_data_buf_lock); @@ -275,7 +276,7 @@ static void parse_system_parameter_string(struct seq_file *m) #ifdef LPARCFG_DEBUG printk(KERN_INFO "success calling get-system-parameter \n"); #endif - splpar_strlen = local_buffer[0] * 16 + local_buffer[1]; + splpar_strlen = local_buffer[0] * 256 + local_buffer[1]; local_buffer += 2; /* step over strlen value */ memset(workbuffer, 0, SPLPAR_MAXLENGTH); @@ -529,13 +530,13 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, *new_weight_ptr); - if (retval == H_Success || retval == H_Constrained) { + if (retval == H_SUCCESS || retval == H_CONSTRAINED) { retval = count; - } else if (retval == H_Busy) { + } else if (retval == H_BUSY) { retval = -EBUSY; - } else if (retval == H_Hardware) { + } else if (retval == H_HARDWARE) { retval = -EIO; - } else if (retval == H_Parameter) { + } else if (retval == H_PARAMETER) { retval = -EINVAL; } else { printk(KERN_WARNING "%s: received unknown hv return code %ld", diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c new file mode 100644 index 00000000000..92f4e5f64f0 --- /dev/null +++ b/arch/powerpc/kernel/module_32.c @@ -0,0 +1,320 @@ +/* Kernel module help for PPC. + Copyright (C) 2001 Rusty Russell. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +#include <linux/module.h> +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/cache.h> + +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(fmt , ...) +#endif + +LIST_HEAD(module_bug_list); + +void *module_alloc(unsigned long size) +{ + if (size == 0) + return NULL; + return vmalloc(size); +} + +/* Free memory returned from module_alloc */ +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ +} + +/* Count how many different relocations (different symbol, different + addend) */ +static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) +{ + unsigned int i, j, ret = 0; + + /* Sure, this is order(n^2), but it's usually short, and not + time critical */ + for (i = 0; i < num; i++) { + for (j = 0; j < i; j++) { + /* If this addend appeared before, it's + already been counted */ + if (ELF32_R_SYM(rela[i].r_info) + == ELF32_R_SYM(rela[j].r_info) + && rela[i].r_addend == rela[j].r_addend) + break; + } + if (j == i) ret++; + } + return ret; +} + +/* Get the potential trampolines size required of the init and + non-init sections */ +static unsigned long get_plt_size(const Elf32_Ehdr *hdr, + const Elf32_Shdr *sechdrs, + const char *secstrings, + int is_init) +{ + unsigned long ret = 0; + unsigned i; + + /* Everything marked ALLOC (this includes the exported + symbols) */ + for (i = 1; i < hdr->e_shnum; i++) { + /* If it's called *.init*, and we're not init, we're + not interested */ + if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0) + != is_init) + continue; + + /* We don't want to look at debug sections. */ + if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != 0) + continue; + + if (sechdrs[i].sh_type == SHT_RELA) { + DEBUGP("Found relocations in section %u\n", i); + DEBUGP("Ptr: %p. Number: %u\n", + (void *)hdr + sechdrs[i].sh_offset, + sechdrs[i].sh_size / sizeof(Elf32_Rela)); + ret += count_relocs((void *)hdr + + sechdrs[i].sh_offset, + sechdrs[i].sh_size + / sizeof(Elf32_Rela)) + * sizeof(struct ppc_plt_entry); + } + } + + return ret; +} + +int module_frob_arch_sections(Elf32_Ehdr *hdr, + Elf32_Shdr *sechdrs, + char *secstrings, + struct module *me) +{ + unsigned int i; + + /* Find .plt and .init.plt sections */ + for (i = 0; i < hdr->e_shnum; i++) { + if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0) + me->arch.init_plt_section = i; + else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0) + me->arch.core_plt_section = i; + } + if (!me->arch.core_plt_section || !me->arch.init_plt_section) { + printk("Module doesn't contain .plt or .init.plt sections.\n"); + return -ENOEXEC; + } + + /* Override their sizes */ + sechdrs[me->arch.core_plt_section].sh_size + = get_plt_size(hdr, sechdrs, secstrings, 0); + sechdrs[me->arch.init_plt_section].sh_size + = get_plt_size(hdr, sechdrs, secstrings, 1); + return 0; +} + +int apply_relocate(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *module) +{ + printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", + module->name); + return -ENOEXEC; +} + +static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) +{ + if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) + && entry->jump[1] == 0x396b0000 + (val & 0xffff)) + return 1; + return 0; +} + +/* Set up a trampoline in the PLT to bounce us to the distant function */ +static uint32_t do_plt_call(void *location, + Elf32_Addr val, + Elf32_Shdr *sechdrs, + struct module *mod) +{ + struct ppc_plt_entry *entry; + + DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); + /* Init, or core PLT? */ + if (location >= mod->module_core + && location < mod->module_core + mod->core_size) + entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; + else + entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; + + /* Find this entry, or if that fails, the next avail. entry */ + while (entry->jump[0]) { + if (entry_matches(entry, val)) return (uint32_t)entry; + entry++; + } + + /* Stolen from Paul Mackerras as well... */ + entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ + entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ + entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ + entry->jump[3] = 0x4e800420; /* bctr */ + + DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); + return (uint32_t)entry; +} + +int apply_relocate_add(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *module) +{ + unsigned int i; + Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + uint32_t value; + + DEBUGP("Applying ADD relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rela[i].r_offset; + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rela[i].r_info); + /* `Everything is relative'. */ + value = sym->st_value + rela[i].r_addend; + + switch (ELF32_R_TYPE(rela[i].r_info)) { + case R_PPC_ADDR32: + /* Simply set it */ + *(uint32_t *)location = value; + break; + + case R_PPC_ADDR16_LO: + /* Low half of the symbol */ + *(uint16_t *)location = value; + break; + + case R_PPC_ADDR16_HA: + /* Sign-adjusted lower 16 bits: PPC ELF ABI says: + (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF. + This is the same, only sane. + */ + *(uint16_t *)location = (value + 0x8000) >> 16; + break; + + case R_PPC_REL24: + if ((int)(value - (uint32_t)location) < -0x02000000 + || (int)(value - (uint32_t)location) >= 0x02000000) + value = do_plt_call(location, value, + sechdrs, module); + + /* Only replace bits 2 through 26 */ + DEBUGP("REL24 value = %08X. location = %08X\n", + value, (uint32_t)location); + DEBUGP("Location before: %08X.\n", + *(uint32_t *)location); + *(uint32_t *)location + = (*(uint32_t *)location & ~0x03fffffc) + | ((value - (uint32_t)location) + & 0x03fffffc); + DEBUGP("Location after: %08X.\n", + *(uint32_t *)location); + DEBUGP("ie. jump to %08X+%08X = %08X\n", + *(uint32_t *)location & 0x03fffffc, + (uint32_t)location, + (*(uint32_t *)location & 0x03fffffc) + + (uint32_t)location); + break; + + case R_PPC_REL32: + /* 32-bit relative jump. */ + *(uint32_t *)location = value - (uint32_t)location; + break; + + default: + printk("%s: unknown ADD relocation: %u\n", + module->name, + ELF32_R_TYPE(rela[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + char *secstrings; + unsigned int i; + + me->arch.bug_table = NULL; + me->arch.num_bugs = 0; + + /* Find the __bug_table section, if present */ + secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + for (i = 1; i < hdr->e_shnum; i++) { + if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) + continue; + me->arch.bug_table = (void *) sechdrs[i].sh_addr; + me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry); + break; + } + + /* + * Strictly speaking this should have a spinlock to protect against + * traversals, but since we only traverse on BUG()s, a spinlock + * could potentially lead to deadlock and thus be counter-productive. + */ + list_add(&me->arch.bug_list, &module_bug_list); + + return 0; +} + +void module_arch_cleanup(struct module *mod) +{ + list_del(&mod->arch.bug_list); +} + +struct bug_entry *module_find_bug(unsigned long bugaddr) +{ + struct mod_arch_specific *mod; + unsigned int i; + struct bug_entry *bug; + + list_for_each_entry(mod, &module_bug_list, bug_list) { + bug = mod->bug_table; + for (i = 0; i < mod->num_bugs; ++i, ++bug) + if (bugaddr == bug->bug_addr) + return bug; + } + return NULL; +} diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index fd7db8d542d..ada50aa5b60 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -160,7 +160,7 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file, case IOC_NVRAM_GET_OFFSET: { int part, offset; - if (_machine != PLATFORM_POWERMAC) + if (!machine_is(powermac)) return -EINVAL; if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) return -EFAULT; @@ -174,8 +174,9 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file, return 0; } #endif /* CONFIG_PPC_PMAC */ + default: + return -EINVAL; } - return -EINVAL; } struct file_operations nvram_fops = { @@ -443,7 +444,7 @@ static int nvram_setup_partition(void) * in our nvram, as Apple defined partitions use pretty much * all of the space */ - if (_machine == PLATFORM_POWERMAC) + if (machine_is(powermac)) return -ENOSPC; /* see if we have an OS partition that meets our needs. diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 5d1b708086b..f505a8827e3 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -56,14 +56,11 @@ struct lppaca lppaca[] = { * processors. The processor VPD array needs one entry per physical * processor (not thread). */ -#define PACA_INIT_COMMON(number, start, asrr, asrv) \ +#define PACA_INIT_COMMON(number) \ .lppaca_ptr = &lppaca[number], \ .lock_token = 0x8000, \ .paca_index = (number), /* Paca Index */ \ .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ - .stab_real = (asrr), /* Real pointer to segment table */ \ - .stab_addr = (asrv), /* Virt pointer to segment table */ \ - .cpu_start = (start), /* Processor start */ \ .hw_cpu_id = 0xffff, #ifdef CONFIG_PPC_ISERIES @@ -72,30 +69,20 @@ struct lppaca lppaca[] = { #define PACA_INIT(number) \ { \ - PACA_INIT_COMMON(number, 0, 0, 0) \ - PACA_INIT_ISERIES(number) \ -} - -#define BOOTCPU_PACA_INIT(number) \ -{ \ - PACA_INIT_COMMON(number, 1, 0, (u64)&initial_stab) \ + PACA_INIT_COMMON(number) \ PACA_INIT_ISERIES(number) \ } #else #define PACA_INIT(number) \ { \ - PACA_INIT_COMMON(number, 0, 0, 0) \ + PACA_INIT_COMMON(number) \ } -#define BOOTCPU_PACA_INIT(number) \ -{ \ - PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, (u64)&initial_stab) \ -} #endif struct paca_struct paca[] = { - BOOTCPU_PACA_INIT(0), + PACA_INIT(0), #if NR_CPUS > 1 PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3), #if NR_CPUS > 4 diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 704c846b2b0..b129d2e4b75 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -787,7 +787,7 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) * fix has to be done by making the remapping per-host and always * filling the pci_to_OF map. --BenH */ - if (_machine == _MACH_Pmac && busnr >= 0xf0) + if (machine_is(powermac) && busnr >= 0xf0) busnr -= 0xf0; else #endif @@ -1728,7 +1728,7 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) * (bus 0 is HT root), we return the AGP one instead. */ #ifdef CONFIG_PPC_PMAC - if (_machine == _MACH_Pmac && machine_is_compatible("MacRISC4")) + if (machine_is(powermac) && machine_is_compatible("MacRISC4")) if (bus == 0) bus = 0xf0; #endif /* CONFIG_PPC_PMAC */ diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index ba92bab7cc2..4c4449be81c 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -78,6 +78,7 @@ int global_phb_number; /* Global phb counter */ /* Cached ISA bridge dev. */ struct pci_dev *ppc64_isabridge_dev = NULL; +EXPORT_SYMBOL_GPL(ppc64_isabridge_dev); static void fixup_broken_pcnet32(struct pci_dev* dev) { diff --git a/arch/powerpc/kernel/perfmon_fsl_booke.c b/arch/powerpc/kernel/perfmon_fsl_booke.c new file mode 100644 index 00000000000..32455dfcc36 --- /dev/null +++ b/arch/powerpc/kernel/perfmon_fsl_booke.c @@ -0,0 +1,222 @@ +/* kernel/perfmon_fsl_booke.c + * Freescale Book-E Performance Monitor code + * + * Author: Andy Fleming + * Copyright (c) 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/a.out.h> +#include <linux/interrupt.h> +#include <linux/config.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/prctl.h> + +#include <asm/pgtable.h> +#include <asm/uaccess.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/reg.h> +#include <asm/xmon.h> +#include <asm/pmc.h> + +static inline u32 get_pmlca(int ctr); +static inline void set_pmlca(int ctr, u32 pmlca); + +static inline u32 get_pmlca(int ctr) +{ + u32 pmlca; + + switch (ctr) { + case 0: + pmlca = mfpmr(PMRN_PMLCA0); + break; + case 1: + pmlca = mfpmr(PMRN_PMLCA1); + break; + case 2: + pmlca = mfpmr(PMRN_PMLCA2); + break; + case 3: + pmlca = mfpmr(PMRN_PMLCA3); + break; + default: + panic("Bad ctr number\n"); + } + + return pmlca; +} + +static inline void set_pmlca(int ctr, u32 pmlca) +{ + switch (ctr) { + case 0: + mtpmr(PMRN_PMLCA0, pmlca); + break; + case 1: + mtpmr(PMRN_PMLCA1, pmlca); + break; + case 2: + mtpmr(PMRN_PMLCA2, pmlca); + break; + case 3: + mtpmr(PMRN_PMLCA3, pmlca); + break; + default: + panic("Bad ctr number\n"); + } +} + +void init_pmc_stop(int ctr) +{ + u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | + PMLCA_FCM1 | PMLCA_FCM0); + u32 pmlcb = 0; + + switch (ctr) { + case 0: + mtpmr(PMRN_PMLCA0, pmlca); + mtpmr(PMRN_PMLCB0, pmlcb); + break; + case 1: + mtpmr(PMRN_PMLCA1, pmlca); + mtpmr(PMRN_PMLCB1, pmlcb); + break; + case 2: + mtpmr(PMRN_PMLCA2, pmlca); + mtpmr(PMRN_PMLCB2, pmlcb); + break; + case 3: + mtpmr(PMRN_PMLCA3, pmlca); + mtpmr(PMRN_PMLCB3, pmlcb); + break; + default: + panic("Bad ctr number!\n"); + } +} + +void set_pmc_event(int ctr, int event) +{ + u32 pmlca; + + pmlca = get_pmlca(ctr); + + pmlca = (pmlca & ~PMLCA_EVENT_MASK) | + ((event << PMLCA_EVENT_SHIFT) & + PMLCA_EVENT_MASK); + + set_pmlca(ctr, pmlca); +} + +void set_pmc_user_kernel(int ctr, int user, int kernel) +{ + u32 pmlca; + + pmlca = get_pmlca(ctr); + + if(user) + pmlca &= ~PMLCA_FCU; + else + pmlca |= PMLCA_FCU; + + if(kernel) + pmlca &= ~PMLCA_FCS; + else + pmlca |= PMLCA_FCS; + + set_pmlca(ctr, pmlca); +} + +void set_pmc_marked(int ctr, int mark0, int mark1) +{ + u32 pmlca = get_pmlca(ctr); + + if(mark0) + pmlca &= ~PMLCA_FCM0; + else + pmlca |= PMLCA_FCM0; + + if(mark1) + pmlca &= ~PMLCA_FCM1; + else + pmlca |= PMLCA_FCM1; + + set_pmlca(ctr, pmlca); +} + +void pmc_start_ctr(int ctr, int enable) +{ + u32 pmlca = get_pmlca(ctr); + + pmlca &= ~PMLCA_FC; + + if (enable) + pmlca |= PMLCA_CE; + else + pmlca &= ~PMLCA_CE; + + set_pmlca(ctr, pmlca); +} + +void pmc_start_ctrs(int enable) +{ + u32 pmgc0 = mfpmr(PMRN_PMGC0); + + pmgc0 &= ~PMGC0_FAC; + pmgc0 |= PMGC0_FCECE; + + if (enable) + pmgc0 |= PMGC0_PMIE; + else + pmgc0 &= ~PMGC0_PMIE; + + mtpmr(PMRN_PMGC0, pmgc0); +} + +void pmc_stop_ctrs(void) +{ + u32 pmgc0 = mfpmr(PMRN_PMGC0); + + pmgc0 |= PMGC0_FAC; + + pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); + + mtpmr(PMRN_PMGC0, pmgc0); +} + +void dump_pmcs(void) +{ + printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0)); + printk("pmc\t\tpmlca\t\tpmlcb\n"); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0), + mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0)); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1), + mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1)); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2), + mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2)); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3), + mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); +} + +EXPORT_SYMBOL(init_pmc_stop); +EXPORT_SYMBOL(set_pmc_event); +EXPORT_SYMBOL(set_pmc_user_kernel); +EXPORT_SYMBOL(set_pmc_marked); +EXPORT_SYMBOL(pmc_start_ctr); +EXPORT_SYMBOL(pmc_start_ctrs); +EXPORT_SYMBOL(pmc_stop_ctrs); +EXPORT_SYMBOL(dump_pmcs); diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c index 7ba42a405f4..3c2cf661f6d 100644 --- a/arch/powerpc/kernel/proc_ppc64.c +++ b/arch/powerpc/kernel/proc_ppc64.c @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <linux/kernel.h> +#include <asm/machdep.h> #include <asm/vdso_datapage.h> #include <asm/rtas.h> #include <asm/uaccess.h> @@ -51,7 +52,7 @@ static int __init proc_ppc64_create(void) if (!root) return 1; - if (!(platform_is_pseries() || _machine == PLATFORM_CELL)) + if (!machine_is(pseries) && !machine_is(cell)) return 0; if (!proc_mkdir("rtas", root)) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index f698aa77127..2dd47d2dd99 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -45,6 +45,7 @@ #include <asm/prom.h> #include <asm/machdep.h> #include <asm/time.h> +#include <asm/syscalls.h> #ifdef CONFIG_PPC64 #include <asm/firmware.h> #endif @@ -362,7 +363,11 @@ static void show_instructions(struct pt_regs *regs) if (!(i % 8)) printk("\n"); - if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) { + /* We use __get_user here *only* to avoid an OOPS on a + * bad address because the pc *should* only be a + * kernel address. + */ + if (BAD_PC(pc) || __get_user(instr, (unsigned int __user *)pc)) { printk("XXXXXXXX "); } else { if (regs->nip == pc) @@ -765,7 +770,7 @@ out: return error; } -static int validate_sp(unsigned long sp, struct task_struct *p, +int validate_sp(unsigned long sp, struct task_struct *p, unsigned long nbytes) { unsigned long stack_page = (unsigned long)task_stack_page(p); @@ -803,6 +808,8 @@ static int validate_sp(unsigned long sp, struct task_struct *p, #define FRAME_MARKER 2 #endif +EXPORT_SYMBOL(validate_sp); + unsigned long get_wchan(struct task_struct *p) { unsigned long ip, sp; @@ -827,7 +834,6 @@ unsigned long get_wchan(struct task_struct *p) } while (count++ < 16); return 0; } -EXPORT_SYMBOL(get_wchan); static int kstack_depth_to_print = 64; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d63cd562d9d..4336390bcf3 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -383,14 +383,14 @@ static int __devinit finish_node_interrupts(struct device_node *np, /* Apple uses bits in there in a different way, let's * only keep the real sense bit on macs */ - if (_machine == PLATFORM_POWERMAC) + if (machine_is(powermac)) sense &= 0x1; np->intrs[intrcount].sense = map_mpic_senses[sense]; } #ifdef CONFIG_PPC64 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ - if (_machine == PLATFORM_POWERMAC && ic && ic->parent) { + if (machine_is(powermac) && ic && ic->parent) { char *name = get_property(ic->parent, "name", NULL); if (name && !strcmp(name, "u3")) np->intrs[intrcount].line += 128; @@ -570,6 +570,18 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, return rc; } +unsigned long __init of_get_flat_dt_root(void) +{ + unsigned long p = ((unsigned long)initial_boot_params) + + initial_boot_params->off_dt_struct; + + while(*((u32 *)p) == OF_DT_NOP) + p += 4; + BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE); + p += 4; + return _ALIGN(p + strlen((char *)p) + 1, 4); +} + /** * This function can be used within scan_flattened_dt callback to get * access to properties @@ -612,6 +624,25 @@ void* __init of_get_flat_dt_prop(unsigned long node, const char *name, } while(1); } +int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) +{ + const char* cp; + unsigned long cplen, l; + + cp = of_get_flat_dt_prop(node, "compatible", &cplen); + if (cp == NULL) + return 0; + while (cplen > 0) { + if (strncasecmp(cp, compat, strlen(compat)) == 0) + return 1; + l = strlen(cp) + 1; + cp += l; + cplen -= l; + } + + return 0; +} + static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, unsigned long align) { @@ -686,7 +717,7 @@ static unsigned long __init unflatten_dt_node(unsigned long mem, #ifdef DEBUG if ((strlen(p) + l + 1) != allocl) { DBG("%s: p: %d, l: %d, a: %d\n", - pathp, strlen(p), l, allocl); + pathp, (int)strlen(p), l, allocl); } #endif p += strlen(p); @@ -854,35 +885,73 @@ void __init unflatten_device_tree(void) DBG(" <- unflatten_device_tree()\n"); } - static int __init early_init_dt_scan_cpus(unsigned long node, - const char *uname, int depth, void *data) + const char *uname, int depth, + void *data) { + static int logical_cpuid = 0; + char *type = of_get_flat_dt_prop(node, "device_type", NULL); +#ifdef CONFIG_ALTIVEC u32 *prop; - unsigned long size; - char *type = of_get_flat_dt_prop(node, "device_type", &size); +#endif + u32 *intserv; + int i, nthreads; + unsigned long len; + int found = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - boot_cpuid = 0; - boot_cpuid_phys = 0; - if (initial_boot_params && initial_boot_params->version >= 2) { - /* version 2 of the kexec param format adds the phys cpuid - * of booted proc. - */ - boot_cpuid_phys = initial_boot_params->boot_cpuid_phys; + /* Get physical cpuid */ + intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); + if (intserv) { + nthreads = len / sizeof(int); } else { - /* Check if it's the boot-cpu, set it's hw index now */ - if (of_get_flat_dt_prop(node, + intserv = of_get_flat_dt_prop(node, "reg", NULL); + nthreads = 1; + } + + /* + * Now see if any of these threads match our boot cpu. + * NOTE: This must match the parsing done in smp_setup_cpu_maps. + */ + for (i = 0; i < nthreads; i++) { + /* + * version 2 of the kexec param format adds the phys cpuid of + * booted proc. + */ + if (initial_boot_params && initial_boot_params->version >= 2) { + if (intserv[i] == + initial_boot_params->boot_cpuid_phys) { + found = 1; + break; + } + } else { + /* + * Check if it's the boot-cpu, set it's hw index now, + * unfortunately this format did not support booting + * off secondary threads. + */ + if (of_get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) { - prop = of_get_flat_dt_prop(node, "reg", NULL); - if (prop != NULL) - boot_cpuid_phys = *prop; + found = 1; + break; + } } + +#ifdef CONFIG_SMP + /* logical cpu id is always 0 on UP kernels */ + logical_cpuid++; +#endif + } + + if (found) { + DBG("boot cpu: logical %d physical %d\n", logical_cpuid, + intserv[i]); + boot_cpuid = logical_cpuid; + set_hard_smp_processor_id(boot_cpuid, intserv[i]); } - set_hard_smp_processor_id(0, boot_cpuid_phys); #ifdef CONFIG_ALTIVEC /* Check if we have a VMX and eventually update CPU features */ @@ -901,16 +970,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC_PSERIES - /* - * Check for an SMT capable CPU and set the CPU feature. We do - * this by looking at the size of the ibm,ppc-interrupt-server#s - * property - */ - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", - &size); - cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; - if (prop && ((size / sizeof(u32)) > 1)) + if (nthreads > 1) cur_cpu_spec->cpu_features |= CPU_FTR_SMT; + else + cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; #endif return 0; @@ -919,7 +982,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node, static int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data) { - u32 *prop; unsigned long *lprop; unsigned long l; char *p; @@ -930,14 +992,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node, (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) return 0; - /* get platform type */ - prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL); - if (prop == NULL) - return 0; -#ifdef CONFIG_PPC_MULTIPLATFORM - _machine = *prop; -#endif - #ifdef CONFIG_PPC64 /* check if iommu is forced on or off */ if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) @@ -964,15 +1018,15 @@ static int __init early_init_dt_scan_chosen(unsigned long node, * set of RTAS infos now if available */ { - u64 *basep, *entryp; + u64 *basep, *entryp, *sizep; basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); - prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL); - if (basep && entryp && prop) { + sizep = of_get_flat_dt_prop(node, "linux,rtas-size", NULL); + if (basep && entryp && sizep) { rtas.base = *basep; rtas.entry = *entryp; - rtas.size = *prop; + rtas.size = *sizep; } } #endif /* CONFIG_PPC_RTAS */ @@ -1001,25 +1055,13 @@ static int __init early_init_dt_scan_chosen(unsigned long node, if (strstr(cmd_line, "mem=")) { char *p, *q; - unsigned long maxmem = 0; for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { q = p + 4; if (p > cmd_line && p[-1] != ' ') continue; - maxmem = simple_strtoul(q, &q, 0); - if (*q == 'k' || *q == 'K') { - maxmem <<= 10; - ++q; - } else if (*q == 'm' || *q == 'M') { - maxmem <<= 20; - ++q; - } else if (*q == 'g' || *q == 'G') { - maxmem <<= 30; - ++q; - } + memory_limit = memparse(q, &q); } - memory_limit = maxmem; } /* break now */ @@ -1755,7 +1797,7 @@ static int of_finish_dynamic_node(struct device_node *node) /* We don't support that function on PowerMac, at least * not yet */ - if (_machine == PLATFORM_POWERMAC) + if (machine_is(powermac)) return -ENODEV; /* fix up new node's linux_phandle field */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 813c2cd194c..7e4d54821a0 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -180,6 +180,16 @@ static unsigned long __initdata prom_tce_alloc_start; static unsigned long __initdata prom_tce_alloc_end; #endif +/* Platforms codes are now obsolete in the kernel. Now only used within this + * file and ultimately gone too. Feel free to change them if you need, they + * are not shared with anything outside of this file anymore + */ +#define PLATFORM_PSERIES 0x0100 +#define PLATFORM_PSERIES_LPAR 0x0101 +#define PLATFORM_LPAR 0x0001 +#define PLATFORM_POWERMAC 0x0400 +#define PLATFORM_GENERIC 0x0500 + static int __initdata of_platform; static char __initdata prom_cmd_line[COMMAND_LINE_SIZE]; @@ -397,6 +407,11 @@ static void __init __attribute__((noreturn)) prom_panic(const char *reason) reason = PTRRELOC(reason); #endif prom_print(reason); + /* Do not call exit because it clears the screen on pmac + * it also causes some sort of double-fault on early pmacs */ + if (RELOC(of_platform) == PLATFORM_POWERMAC) + asm("trap\n"); + /* ToDo: should put up an SRC here on p/iSeries */ call_prom("exit", 0, 0); @@ -1487,7 +1502,10 @@ static int __init prom_find_machine_type(void) int len, i = 0; #ifdef CONFIG_PPC64 phandle rtas; + int x; #endif + + /* Look for a PowerMac */ len = prom_getprop(_prom->root, "compatible", compat, sizeof(compat)-1); if (len > 0) { @@ -1500,28 +1518,35 @@ static int __init prom_find_machine_type(void) if (strstr(p, RELOC("Power Macintosh")) || strstr(p, RELOC("MacRISC"))) return PLATFORM_POWERMAC; -#ifdef CONFIG_PPC64 - if (strstr(p, RELOC("Momentum,Maple"))) - return PLATFORM_MAPLE; - if (strstr(p, RELOC("IBM,CPB"))) - return PLATFORM_CELL; -#endif i += sl + 1; } } #ifdef CONFIG_PPC64 + /* If not a mac, try to figure out if it's an IBM pSeries or any other + * PAPR compliant platform. We assume it is if : + * - /device_type is "chrp" (please, do NOT use that for future + * non-IBM designs ! + * - it has /rtas + */ + len = prom_getprop(_prom->root, "device_type", + compat, sizeof(compat)-1); + if (len <= 0) + return PLATFORM_GENERIC; + if (strncmp(compat, RELOC("chrp"), 4)) + return PLATFORM_GENERIC; + /* Default to pSeries. We need to know if we are running LPAR */ rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); - if (PHANDLE_VALID(rtas)) { - int x = prom_getproplen(rtas, "ibm,hypertas-functions"); - if (x != PROM_ERROR) { - prom_printf("Hypertas detected, assuming LPAR !\n"); - return PLATFORM_PSERIES_LPAR; - } + if (!PHANDLE_VALID(rtas)) + return PLATFORM_GENERIC; + x = prom_getproplen(rtas, "ibm,hypertas-functions"); + if (x != PROM_ERROR) { + prom_printf("Hypertas detected, assuming LPAR !\n"); + return PLATFORM_PSERIES_LPAR; } return PLATFORM_PSERIES; #else - return PLATFORM_CHRP; + return PLATFORM_GENERIC; #endif } @@ -2029,7 +2054,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, { struct prom_t *_prom; unsigned long hdr; - u32 getprop_rval; unsigned long offset = reloc_offset(); #ifdef CONFIG_PPC32 @@ -2060,6 +2084,12 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_init_stdout(); + /* + * Get default machine type. At this point, we do not differentiate + * between pSeries SMP and pSeries LPAR + */ + RELOC(of_platform) = prom_find_machine_type(); + /* Bail if this is a kdump kernel. */ if (PHYSICAL_START > 0) prom_panic("Error: You can't boot a kdump kernel from OF!\n"); @@ -2069,15 +2099,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_check_initrd(r3, r4); - /* - * Get default machine type. At this point, we do not differentiate - * between pSeries SMP and pSeries LPAR - */ - RELOC(of_platform) = prom_find_machine_type(); - getprop_rval = RELOC(of_platform); - prom_setprop(_prom->chosen, "/chosen", "linux,platform", - &getprop_rval, sizeof(getprop_rval)); - #ifdef CONFIG_PPC_PSERIES /* * On pSeries, inform the firmware about our capabilities diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index 1f03fb28cc0..9c9ad1fa9cc 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c @@ -257,12 +257,12 @@ static int __init proc_rtas_init(void) { struct proc_dir_entry *entry; - if (_machine != PLATFORM_PSERIES && _machine != PLATFORM_PSERIES_LPAR) - return 1; + if (!machine_is(pseries)) + return -ENODEV; rtas_node = of_find_node_by_name(NULL, "rtas"); if (rtas_node == NULL) - return 1; + return -ENODEV; entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL); if (entry) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index b5b2add7ad1..0112318213a 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -25,6 +25,7 @@ #include <asm/hvcall.h> #include <asm/semaphore.h> #include <asm/machdep.h> +#include <asm/firmware.h> #include <asm/page.h> #include <asm/param.h> #include <asm/system.h> @@ -32,6 +33,7 @@ #include <asm/uaccess.h> #include <asm/lmb.h> #include <asm/udbg.h> +#include <asm/syscalls.h> struct rtas_t rtas = { .lock = SPIN_LOCK_UNLOCKED @@ -576,26 +578,26 @@ static void rtas_percpu_suspend_me(void *info) * We use "waiting" to indicate our state. As long * as it is >0, we are still trying to all join up. * If it goes to 0, we have successfully joined up and - * one thread got H_Continue. If any error happens, + * one thread got H_CONTINUE. If any error happens, * we set it to <0. */ local_irq_save(flags); do { rc = plpar_hcall_norets(H_JOIN); smp_rmb(); - } while (rc == H_Success && data->waiting > 0); - if (rc == H_Success) + } while (rc == H_SUCCESS && data->waiting > 0); + if (rc == H_SUCCESS) goto out; - if (rc == H_Continue) { + if (rc == H_CONTINUE) { data->waiting = 0; data->args->args[data->args->nargs] = rtas_call(ibm_suspend_me_token, 0, 1, NULL); - for_each_cpu(i) + for_each_possible_cpu(i) plpar_hcall_norets(H_PROD,i); } else { data->waiting = -EBUSY; - printk(KERN_ERR "Error on H_Join hypervisor call\n"); + printk(KERN_ERR "Error on H_JOIN hypervisor call\n"); } out: @@ -622,9 +624,9 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) printk(KERN_ERR "Error doing global join\n"); /* Prod each CPU. This won't hurt, and will wake - * anyone we successfully put to sleep with H_Join + * anyone we successfully put to sleep with H_JOIN. */ - for_each_cpu(i) + for_each_possible_cpu(i) plpar_hcall_norets(H_PROD, i); return data.waiting; @@ -767,7 +769,7 @@ void __init rtas_initialize(void) * the stop-self token if any */ #ifdef CONFIG_PPC64 - if (_machine == PLATFORM_PSERIES_LPAR) { + if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); ibm_suspend_me_token = rtas_token("ibm,suspend-me"); } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index c1d62bf11f2..1d93e73a700 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -9,6 +9,9 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ + +#undef DEBUG + #include <linux/config.h> #include <linux/module.h> #include <linux/string.h> @@ -18,6 +21,7 @@ #include <linux/reboot.h> #include <linux/delay.h> #include <linux/initrd.h> +#include <linux/platform_device.h> #include <linux/ide.h> #include <linux/seq_file.h> #include <linux/ioport.h> @@ -41,6 +45,7 @@ #include <asm/time.h> #include <asm/cputable.h> #include <asm/sections.h> +#include <asm/firmware.h> #include <asm/btext.h> #include <asm/nvram.h> #include <asm/setup.h> @@ -56,8 +61,6 @@ #include "setup.h" -#undef DEBUG - #ifdef DEBUG #include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) @@ -65,10 +68,12 @@ #define DBG(fmt...) #endif -#ifdef CONFIG_PPC_MULTIPLATFORM -int _machine = 0; -EXPORT_SYMBOL(_machine); -#endif +/* The main machine-dep calls structure + */ +struct machdep_calls ppc_md; +EXPORT_SYMBOL(ppc_md); +struct machdep_calls *machine_id; +EXPORT_SYMBOL(machine_id); unsigned long klimit = (unsigned long) _end; @@ -168,7 +173,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); #endif /* CONFIG_SMP && CONFIG_PPC32 */ seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); - + if (ppc_md.name) + seq_printf(m, "platform\t: %s\n", ppc_md.name); if (ppc_md.show_cpuinfo != NULL) ppc_md.show_cpuinfo(m); @@ -352,12 +358,13 @@ void __init check_for_initrd(void) * must be called before using this. * * While we're here, we may as well set the "physical" cpu ids in the paca. + * + * NOTE: This must match the parsing done in early_init_dt_scan_cpus. */ void __init smp_setup_cpu_maps(void) { struct device_node *dn = NULL; int cpu = 0; - int swap_cpuid = 0; while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { int *intserv; @@ -376,30 +383,17 @@ void __init smp_setup_cpu_maps(void) for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { cpu_set(cpu, cpu_present_map); set_hard_smp_processor_id(cpu, intserv[j]); - - if (intserv[j] == boot_cpuid_phys) - swap_cpuid = cpu; cpu_set(cpu, cpu_possible_map); cpu++; } } - /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that - * boot cpu is logical 0. - */ - if (boot_cpuid_phys != get_hard_smp_processor_id(0)) { - u32 tmp; - tmp = get_hard_smp_processor_id(0); - set_hard_smp_processor_id(0, boot_cpuid_phys); - set_hard_smp_processor_id(swap_cpuid, tmp); - } - #ifdef CONFIG_PPC64 /* * On pSeries LPAR, we need to know how many cpus * could possibly be added to this partition. */ - if (_machine == PLATFORM_PSERIES_LPAR && + if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && (dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; unsigned int *ireg; @@ -438,7 +432,7 @@ void __init smp_setup_cpu_maps(void) /* * Do the sibling map; assume only two threads per processor. */ - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { cpu_set(cpu, cpu_sibling_map[cpu]); if (cpu_has_feature(CPU_FTR_SMT)) cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); @@ -468,3 +462,57 @@ static int __init early_xmon(char *p) } early_param("xmon", early_xmon); #endif + +static __init int add_pcspkr(void) +{ + struct device_node *np; + struct platform_device *pd; + int ret; + + np = of_find_compatible_node(NULL, NULL, "pnpPNP,100"); + of_node_put(np); + if (!np) + return -ENODEV; + + pd = platform_device_alloc("pcspkr", -1); + if (!pd) + return -ENOMEM; + + ret = platform_device_add(pd); + if (ret) + platform_device_put(pd); + + return ret; +} +device_initcall(add_pcspkr); + +void probe_machine(void) +{ + extern struct machdep_calls __machine_desc_start; + extern struct machdep_calls __machine_desc_end; + + /* + * Iterate all ppc_md structures until we find the proper + * one for the current machine type + */ + DBG("Probing machine type ...\n"); + + for (machine_id = &__machine_desc_start; + machine_id < &__machine_desc_end; + machine_id++) { + DBG(" %s ...", machine_id->name); + memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); + if (ppc_md.probe()) { + DBG(" match !\n"); + break; + } + DBG("\n"); + } + /* What can we do if we didn't find ? */ + if (machine_id >= &__machine_desc_end) { + DBG("No suitable machine found !\n"); + for (;;); + } + + printk(KERN_INFO "Using %s machine description\n", ppc_md.name); +} diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index dc2770df25b..69ac2570134 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -50,12 +50,8 @@ #include <asm/kgdb.h> #endif -extern void platform_init(void); extern void bootx_init(unsigned long r4, unsigned long phys); -extern void ppc6xx_idle(void); -extern void power4_idle(void); - boot_infos_t *boot_infos; struct ide_machdep_calls ppc_ide_md; @@ -70,10 +66,6 @@ unsigned int DMA_MODE_WRITE; int have_of = 1; #ifdef CONFIG_PPC_MULTIPLATFORM -extern void prep_init(void); -extern void pmac_init(void); -extern void chrp_init(void); - dev_t boot_dev; #endif /* CONFIG_PPC_MULTIPLATFORM */ @@ -85,9 +77,6 @@ unsigned long SYSRQ_KEY = 0x54; unsigned long vgacon_remap_base; #endif -struct machdep_calls ppc_md; -EXPORT_SYMBOL(ppc_md); - /* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started. @@ -111,7 +100,7 @@ unsigned long __init early_init(unsigned long dt_ptr) /* First zero the BSS -- use memset_io, some platforms don't have * caches on yet */ - memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start); + memset_io((void __iomem *)PTRRELOC(&__bss_start), 0, _end - __bss_start); /* * Identify the CPU type and fix up code sections @@ -123,48 +112,6 @@ unsigned long __init early_init(unsigned long dt_ptr) return KERNELBASE + offset; } -#ifdef CONFIG_PPC_MULTIPLATFORM -/* - * The PPC_MULTIPLATFORM version of platform_init... - */ -void __init platform_init(void) -{ - /* if we didn't get any bootinfo telling us what we are... */ - if (_machine == 0) { - /* prep boot loader tells us if we're prep or not */ - if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) ) - _machine = _MACH_prep; - } - -#ifdef CONFIG_PPC_PREP - /* not much more to do here, if prep */ - if (_machine == _MACH_prep) { - prep_init(); - return; - } -#endif - -#ifdef CONFIG_ADB - if (strstr(cmd_line, "adb_sync")) { - extern int __adb_probe_sync; - __adb_probe_sync = 1; - } -#endif /* CONFIG_ADB */ - - switch (_machine) { -#ifdef CONFIG_PPC_PMAC - case _MACH_Pmac: - pmac_init(); - break; -#endif -#ifdef CONFIG_PPC_CHRP - case _MACH_chrp: - chrp_init(); - break; -#endif - } -} -#endif /* * Find out what kind of machine we're on and save any data we need @@ -190,11 +137,12 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys) strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); #endif /* CONFIG_CMDLINE */ - /* Base init based on machine type */ - platform_init(); + probe_machine(); #ifdef CONFIG_6xx - ppc_md.power_save = ppc6xx_idle; + if (cpu_has_feature(CPU_FTR_CAN_DOZE) || + cpu_has_feature(CPU_FTR_CAN_NAP)) + ppc_md.power_save = ppc6xx_idle; #endif if (ppc_md.progress) @@ -272,7 +220,7 @@ int __init ppc_init(void) if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); /* register CPU devices */ - for_each_cpu(i) + for_each_possible_cpu(i) register_cpu(&cpu_devices[i], i, NULL); /* call platform init */ @@ -352,12 +300,6 @@ void __init setup_arch(char **cmdline_p) do_init_bootmem(); if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); -#ifdef CONFIG_PPC_OCP - /* Initialize OCP device list */ - ocp_early_init(); - if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab); -#endif - #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif @@ -366,7 +308,4 @@ void __init setup_arch(char **cmdline_p) if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); paging_init(); - - /* this is for modules since _machine can be a define -- Cort */ - ppc_md.ppc_machine = _machine; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e20c1fae342..13e91c4d70a 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -73,7 +73,6 @@ int have_of = 1; int boot_cpuid = 0; -int boot_cpuid_phys = 0; dev_t boot_dev; u64 ppc64_pft_size; @@ -96,11 +95,6 @@ int dcache_bsize; int icache_bsize; int ucache_bsize; -/* The main machine-dep calls structure - */ -struct machdep_calls ppc_md; -EXPORT_SYMBOL(ppc_md); - #ifdef CONFIG_MAGIC_SYSRQ unsigned long SYSRQ_KEY; #endif /* CONFIG_MAGIC_SYSRQ */ @@ -161,32 +155,6 @@ early_param("smt-enabled", early_smt_enabled); #define check_smt_enabled() #endif /* CONFIG_SMP */ -extern struct machdep_calls pSeries_md; -extern struct machdep_calls pmac_md; -extern struct machdep_calls maple_md; -extern struct machdep_calls cell_md; -extern struct machdep_calls iseries_md; - -/* Ultimately, stuff them in an elf section like initcalls... */ -static struct machdep_calls __initdata *machines[] = { -#ifdef CONFIG_PPC_PSERIES - &pSeries_md, -#endif /* CONFIG_PPC_PSERIES */ -#ifdef CONFIG_PPC_PMAC - &pmac_md, -#endif /* CONFIG_PPC_PMAC */ -#ifdef CONFIG_PPC_MAPLE - &maple_md, -#endif /* CONFIG_PPC_MAPLE */ -#ifdef CONFIG_PPC_CELL - &cell_md, -#endif -#ifdef CONFIG_PPC_ISERIES - &iseries_md, -#endif - NULL -}; - /* * Early initialization entry point. This is called by head.S * with MMU translation disabled. We rely on the "feature" of @@ -208,13 +176,10 @@ static struct machdep_calls __initdata *machines[] = { void __init early_setup(unsigned long dt_ptr) { - struct paca_struct *lpaca = get_paca(); - static struct machdep_calls **mach; - /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); - DBG(" -> early_setup()\n"); + DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* * Do early initializations using the flattened device @@ -223,22 +188,16 @@ void __init early_setup(unsigned long dt_ptr) */ early_init_devtree(__va(dt_ptr)); - /* - * Iterate all ppc_md structures until we find the proper - * one for the current machine type - */ - DBG("Probing machine type for platform %x...\n", _machine); + /* Now we know the logical id of our boot cpu, setup the paca. */ + setup_boot_paca(); - for (mach = machines; *mach; mach++) { - if ((*mach)->probe(_machine)) - break; - } - /* What can we do if we didn't find ? */ - if (*mach == NULL) { - DBG("No suitable machine found !\n"); - for (;;); - } - ppc_md = **mach; + /* Fix up paca fields required for the boot cpu */ + get_paca()->cpu_start = 1; + get_paca()->stab_real = __pa((u64)&initial_stab); + get_paca()->stab_addr = (u64)&initial_stab; + + /* Probe the machine type */ + probe_machine(); #ifdef CONFIG_CRASH_DUMP kdump_setup(); @@ -256,12 +215,10 @@ void __init early_setup(unsigned long dt_ptr) /* * Initialize stab / SLB management except on iSeries */ - if (!firmware_has_feature(FW_FEATURE_ISERIES)) { - if (cpu_has_feature(CPU_FTR_SLB)) - slb_initialize(); - else - stab_initialize(lpaca->stab_real); - } + if (cpu_has_feature(CPU_FTR_SLB)) + slb_initialize(); + else if (!firmware_has_feature(FW_FEATURE_ISERIES)) + stab_initialize(get_paca()->stab_real); DBG(" <- early_setup()\n"); } @@ -340,7 +297,7 @@ static void __init initialize_cache_info(void) const char *dc, *ic; /* Then read cache informations */ - if (_machine == PLATFORM_POWERMAC) { + if (machine_is(powermac)) { dc = "d-cache-block-size"; ic = "i-cache-block-size"; } else { @@ -484,7 +441,6 @@ void __init setup_system(void) printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller); - printk("platform = 0x%x\n", _machine); printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); printk("ppc64_caches.dcache_line_size = 0x%x\n", ppc64_caches.dline_size); @@ -516,7 +472,7 @@ static void __init irqstack_early_init(void) * interrupt stacks must be under 256MB, we cannot afford to take * SLB misses on them. */ - for_each_cpu(i) { + for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) __va(lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, 0x10000000)); @@ -549,7 +505,7 @@ static void __init emergency_stack_init(void) */ limit = min(0x10000000UL, lmb.rmo_size); - for_each_cpu(i) + for_each_possible_cpu(i) paca[i].emergency_sp = __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; } @@ -602,12 +558,6 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch(); - /* Use the default idle loop if the platform hasn't provided one. */ - if (NULL == ppc_md.idle_loop) { - ppc_md.idle_loop = default_idle; - printk(KERN_INFO "Using default idle loop\n"); - } - paging_init(); ppc64_boot_msg(0x15, "Setup Done"); } @@ -672,7 +622,7 @@ void __init setup_per_cpu_areas(void) size = PERCPU_ENOUGH_ROOM; #endif - for_each_cpu(i) { + for_each_possible_cpu(i) { ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); if (!ptr) panic("Cannot allocate cpu data for CPU %d\n", i); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d7a4e814974..01e3c08cb55 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -42,6 +42,7 @@ #include <asm/uaccess.h> #include <asm/cacheflush.h> +#include <asm/syscalls.h> #include <asm/sigcontext.h> #include <asm/vdso.h> #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 47f910380a6..27f65b95184 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -33,6 +33,7 @@ #include <asm/pgtable.h> #include <asm/unistd.h> #include <asm/cacheflush.h> +#include <asm/syscalls.h> #include <asm/vdso.h> #define DEBUG_SIG 0 @@ -211,7 +212,7 @@ static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs /* Default to using normal stack */ newsp = regs->gpr[1]; - if (ka->sa.sa_flags & SA_ONSTACK) { + if ((ka->sa.sa_flags & SA_ONSTACK) && current->sas_ss_size) { if (! on_sig_stack(regs->gpr[1])) newsp = (current->sas_ss_sp + current->sas_ss_size); } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 805eaedbc30..530f7dba0bd 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -362,7 +362,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) smp_space_timers(max_cpus); - for_each_cpu(cpu) + for_each_possible_cpu(cpu) if (cpu != boot_cpuid) smp_create_idle(cpu); } diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S new file mode 100644 index 00000000000..69773cc1a85 --- /dev/null +++ b/arch/powerpc/kernel/swsusp_32.S @@ -0,0 +1,349 @@ +#include <linux/config.h> +#include <linux/threads.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/cputable.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + + +/* + * Structure for storing CPU registers on the save area. + */ +#define SL_SP 0 +#define SL_PC 4 +#define SL_MSR 8 +#define SL_SDR1 0xc +#define SL_SPRG0 0x10 /* 4 sprg's */ +#define SL_DBAT0 0x20 +#define SL_IBAT0 0x28 +#define SL_DBAT1 0x30 +#define SL_IBAT1 0x38 +#define SL_DBAT2 0x40 +#define SL_IBAT2 0x48 +#define SL_DBAT3 0x50 +#define SL_IBAT3 0x58 +#define SL_TB 0x60 +#define SL_R2 0x68 +#define SL_CR 0x6c +#define SL_LR 0x70 +#define SL_R12 0x74 /* r12 to r31 */ +#define SL_SIZE (SL_R12 + 80) + + .section .data + .align 5 + +_GLOBAL(swsusp_save_area) + .space SL_SIZE + + + .section .text + .align 5 + +_GLOBAL(swsusp_arch_suspend) + + lis r11,swsusp_save_area@h + ori r11,r11,swsusp_save_area@l + + mflr r0 + stw r0,SL_LR(r11) + mfcr r0 + stw r0,SL_CR(r11) + stw r1,SL_SP(r11) + stw r2,SL_R2(r11) + stmw r12,SL_R12(r11) + + /* Save MSR & SDR1 */ + mfmsr r4 + stw r4,SL_MSR(r11) + mfsdr1 r4 + stw r4,SL_SDR1(r11) + + /* Get a stable timebase and save it */ +1: mftbu r4 + stw r4,SL_TB(r11) + mftb r5 + stw r5,SL_TB+4(r11) + mftbu r3 + cmpw r3,r4 + bne 1b + + /* Save SPRGs */ + mfsprg r4,0 + stw r4,SL_SPRG0(r11) + mfsprg r4,1 + stw r4,SL_SPRG0+4(r11) + mfsprg r4,2 + stw r4,SL_SPRG0+8(r11) + mfsprg r4,3 + stw r4,SL_SPRG0+12(r11) + + /* Save BATs */ + mfdbatu r4,0 + stw r4,SL_DBAT0(r11) + mfdbatl r4,0 + stw r4,SL_DBAT0+4(r11) + mfdbatu r4,1 + stw r4,SL_DBAT1(r11) + mfdbatl r4,1 + stw r4,SL_DBAT1+4(r11) + mfdbatu r4,2 + stw r4,SL_DBAT2(r11) + mfdbatl r4,2 + stw r4,SL_DBAT2+4(r11) + mfdbatu r4,3 + stw r4,SL_DBAT3(r11) + mfdbatl r4,3 + stw r4,SL_DBAT3+4(r11) + mfibatu r4,0 + stw r4,SL_IBAT0(r11) + mfibatl r4,0 + stw r4,SL_IBAT0+4(r11) + mfibatu r4,1 + stw r4,SL_IBAT1(r11) + mfibatl r4,1 + stw r4,SL_IBAT1+4(r11) + mfibatu r4,2 + stw r4,SL_IBAT2(r11) + mfibatl r4,2 + stw r4,SL_IBAT2+4(r11) + mfibatu r4,3 + stw r4,SL_IBAT3(r11) + mfibatl r4,3 + stw r4,SL_IBAT3+4(r11) + +#if 0 + /* Backup various CPU config stuffs */ + bl __save_cpu_setup +#endif + /* Call the low level suspend stuff (we should probably have made + * a stackframe... + */ + bl swsusp_save + + /* Restore LR from the save area */ + lis r11,swsusp_save_area@h + ori r11,r11,swsusp_save_area@l + lwz r0,SL_LR(r11) + mtlr r0 + + blr + + +/* Resume code */ +_GLOBAL(swsusp_arch_resume) + + /* Stop pending alitvec streams and memory accesses */ +BEGIN_FTR_SECTION + DSSALL +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + sync + + /* Disable MSR:DR to make sure we don't take a TLB or + * hash miss during the copy, as our hash table will + * for a while be unuseable. For .text, we assume we are + * covered by a BAT. This works only for non-G5 at this + * point. G5 will need a better approach, possibly using + * a small temporary hash table filled with large mappings, + * disabling the MMU completely isn't a good option for + * performance reasons. + * (Note that 750's may have the same performance issue as + * the G5 in this case, we should investigate using moving + * BATs for these CPUs) + */ + mfmsr r0 + sync + rlwinm r0,r0,0,28,26 /* clear MSR_DR */ + mtmsr r0 + sync + isync + + /* Load ptr the list of pages to copy in r3 */ + lis r11,(pagedir_nosave - KERNELBASE)@h + ori r11,r11,pagedir_nosave@l + lwz r10,0(r11) + + /* Copy the pages. This is a very basic implementation, to + * be replaced by something more cache efficient */ +1: + tophys(r3,r10) + li r0,256 + mtctr r0 + lwz r11,pbe_address(r3) /* source */ + tophys(r5,r11) + lwz r10,pbe_orig_address(r3) /* destination */ + tophys(r6,r10) +2: + lwz r8,0(r5) + lwz r9,4(r5) + lwz r10,8(r5) + lwz r11,12(r5) + addi r5,r5,16 + stw r8,0(r6) + stw r9,4(r6) + stw r10,8(r6) + stw r11,12(r6) + addi r6,r6,16 + bdnz 2b + lwz r10,pbe_next(r3) + cmpwi 0,r10,0 + bne 1b + + /* Do a very simple cache flush/inval of the L1 to ensure + * coherency of the icache + */ + lis r3,0x0002 + mtctr r3 + li r3, 0 +1: + lwz r0,0(r3) + addi r3,r3,0x0020 + bdnz 1b + isync + sync + + /* Now flush those cache lines */ + lis r3,0x0002 + mtctr r3 + li r3, 0 +1: + dcbf 0,r3 + addi r3,r3,0x0020 + bdnz 1b + sync + + /* Ok, we are now running with the kernel data of the old + * kernel fully restored. We can get to the save area + * easily now. As for the rest of the code, it assumes the + * loader kernel and the booted one are exactly identical + */ + lis r11,swsusp_save_area@h + ori r11,r11,swsusp_save_area@l + tophys(r11,r11) + +#if 0 + /* Restore various CPU config stuffs */ + bl __restore_cpu_setup +#endif + /* Restore the BATs, and SDR1. Then we can turn on the MMU. + * This is a bit hairy as we are running out of those BATs, + * but first, our code is probably in the icache, and we are + * writing the same value to the BAT, so that should be fine, + * though a better solution will have to be found long-term + */ + lwz r4,SL_SDR1(r11) + mtsdr1 r4 + lwz r4,SL_SPRG0(r11) + mtsprg 0,r4 + lwz r4,SL_SPRG0+4(r11) + mtsprg 1,r4 + lwz r4,SL_SPRG0+8(r11) + mtsprg 2,r4 + lwz r4,SL_SPRG0+12(r11) + mtsprg 3,r4 + +#if 0 + lwz r4,SL_DBAT0(r11) + mtdbatu 0,r4 + lwz r4,SL_DBAT0+4(r11) + mtdbatl 0,r4 + lwz r4,SL_DBAT1(r11) + mtdbatu 1,r4 + lwz r4,SL_DBAT1+4(r11) + mtdbatl 1,r4 + lwz r4,SL_DBAT2(r11) + mtdbatu 2,r4 + lwz r4,SL_DBAT2+4(r11) + mtdbatl 2,r4 + lwz r4,SL_DBAT3(r11) + mtdbatu 3,r4 + lwz r4,SL_DBAT3+4(r11) + mtdbatl 3,r4 + lwz r4,SL_IBAT0(r11) + mtibatu 0,r4 + lwz r4,SL_IBAT0+4(r11) + mtibatl 0,r4 + lwz r4,SL_IBAT1(r11) + mtibatu 1,r4 + lwz r4,SL_IBAT1+4(r11) + mtibatl 1,r4 + lwz r4,SL_IBAT2(r11) + mtibatu 2,r4 + lwz r4,SL_IBAT2+4(r11) + mtibatl 2,r4 + lwz r4,SL_IBAT3(r11) + mtibatu 3,r4 + lwz r4,SL_IBAT3+4(r11) + mtibatl 3,r4 +#endif + +BEGIN_FTR_SECTION + li r4,0 + mtspr SPRN_DBAT4U,r4 + mtspr SPRN_DBAT4L,r4 + mtspr SPRN_DBAT5U,r4 + mtspr SPRN_DBAT5L,r4 + mtspr SPRN_DBAT6U,r4 + mtspr SPRN_DBAT6L,r4 + mtspr SPRN_DBAT7U,r4 + mtspr SPRN_DBAT7L,r4 + mtspr SPRN_IBAT4U,r4 + mtspr SPRN_IBAT4L,r4 + mtspr SPRN_IBAT5U,r4 + mtspr SPRN_IBAT5L,r4 + mtspr SPRN_IBAT6U,r4 + mtspr SPRN_IBAT6L,r4 + mtspr SPRN_IBAT7U,r4 + mtspr SPRN_IBAT7L,r4 +END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) + + /* Flush all TLBs */ + lis r4,0x1000 +1: addic. r4,r4,-0x1000 + tlbie r4 + blt 1b + sync + + /* restore the MSR and turn on the MMU */ + lwz r3,SL_MSR(r11) + bl turn_on_mmu + tovirt(r11,r11) + + /* Restore TB */ + li r3,0 + mttbl r3 + lwz r3,SL_TB(r11) + lwz r4,SL_TB+4(r11) + mttbu r3 + mttbl r4 + + /* Kick decrementer */ + li r0,1 + mtdec r0 + + /* Restore the callee-saved registers and return */ + lwz r0,SL_CR(r11) + mtcr r0 + lwz r2,SL_R2(r11) + lmw r12,SL_R12(r11) + lwz r1,SL_SP(r11) + lwz r0,SL_LR(r11) + mtlr r0 + + // XXX Note: we don't really need to call swsusp_resume + + li r3,0 + blr + +/* FIXME:This construct is actually not useful since we don't shut + * down the instruction MMU, we could just flip back MSR-DR on. + */ +turn_on_mmu: + mflr r4 + mtsrr0 r4 + mtsrr1 r3 + sync + isync + rfi + diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index ad895c99813..9b69d99a910 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -40,6 +40,7 @@ #include <asm/uaccess.h> #include <asm/ipc.h> #include <asm/semaphore.h> +#include <asm/syscalls.h> #include <asm/time.h> #include <asm/unistd.h> diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 0f0c3a9ae2e..73560ef6f80 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -65,20 +65,20 @@ static int __init smt_setup(void) unsigned int cpu; if (!cpu_has_feature(CPU_FTR_SMT)) - return 1; + return -ENODEV; options = find_path_device("/options"); if (!options) - return 1; + return -ENODEV; val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay", NULL); if (!smt_snooze_cmdline && val) { - for_each_cpu(cpu) + for_each_possible_cpu(cpu) per_cpu(smt_snooze_delay, cpu) = *val; } - return 1; + return 0; } __initcall(smt_setup); @@ -93,7 +93,7 @@ static int __init setup_smt_snooze_delay(char *str) smt_snooze_cmdline = 1; if (get_option(&str, &snooze)) { - for_each_cpu(cpu) + for_each_possible_cpu(cpu) per_cpu(smt_snooze_delay, cpu) = snooze; } @@ -347,7 +347,7 @@ static int __init topology_init(void) register_cpu_notifier(&sysfs_cpu_nb); - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); #ifdef CONFIG_NUMA diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 1ad55f0466f..a14c9640384 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -322,3 +322,5 @@ SYSCALL(spu_create) COMPAT_SYS(pselect6) COMPAT_SYS(ppoll) SYSCALL(unshare) +SYSCALL(splice) +SYSCALL(tee) diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c new file mode 100644 index 00000000000..26bd8ea35a4 --- /dev/null +++ b/arch/powerpc/kernel/tau_6xx.c @@ -0,0 +1,271 @@ +/* + * temp.c Thermal management for cpu's with Thermal Assist Units + * + * Written by Troy Benjegerdes <hozer@drgw.net> + * + * TODO: + * dynamic power management to limit peak CPU temp (using ICTC) + * calibration??? + * + * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery + * life in portables, and add a 'performance/watt' metric somewhere in /proc + */ + +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/init.h> + +#include <asm/io.h> +#include <asm/reg.h> +#include <asm/nvram.h> +#include <asm/cache.h> +#include <asm/8xx_immap.h> +#include <asm/machdep.h> + +static struct tau_temp +{ + int interrupts; + unsigned char low; + unsigned char high; + unsigned char grew; +} tau[NR_CPUS]; + +struct timer_list tau_timer; + +#undef DEBUG + +/* TODO: put these in a /proc interface, with some sanity checks, and maybe + * dynamic adjustment to minimize # of interrupts */ +/* configurable values for step size and how much to expand the window when + * we get an interrupt. These are based on the limit that was out of range */ +#define step_size 2 /* step size when temp goes out of range */ +#define window_expand 1 /* expand the window by this much */ +/* configurable values for shrinking the window */ +#define shrink_timer 2*HZ /* period between shrinking the window */ +#define min_window 2 /* minimum window size, degrees C */ + +void set_thresholds(unsigned long cpu) +{ +#ifdef CONFIG_TAU_INT + /* + * setup THRM1, + * threshold, valid bit, enable interrupts, interrupt when below threshold + */ + mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); + + /* setup THRM2, + * threshold, valid bit, enable interrupts, interrupt when above threshhold + */ + mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); +#else + /* same thing but don't enable interrupts */ + mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); + mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); +#endif +} + +void TAUupdate(int cpu) +{ + unsigned thrm; + +#ifdef DEBUG + printk("TAUupdate "); +#endif + + /* if both thresholds are crossed, the step_sizes cancel out + * and the window winds up getting expanded twice. */ + if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ + if(thrm & THRM1_TIN){ /* crossed low threshold */ + if (tau[cpu].low >= step_size){ + tau[cpu].low -= step_size; + tau[cpu].high -= (step_size - window_expand); + } + tau[cpu].grew = 1; +#ifdef DEBUG + printk("low threshold crossed "); +#endif + } + } + if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ + if(thrm & THRM1_TIN){ /* crossed high threshold */ + if (tau[cpu].high <= 127-step_size){ + tau[cpu].low += (step_size - window_expand); + tau[cpu].high += step_size; + } + tau[cpu].grew = 1; +#ifdef DEBUG + printk("high threshold crossed "); +#endif + } + } + +#ifdef DEBUG + printk("grew = %d\n", tau[cpu].grew); +#endif + +#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ + set_thresholds(cpu); +#endif + +} + +#ifdef CONFIG_TAU_INT +/* + * TAU interrupts - called when we have a thermal assist unit interrupt + * with interrupts disabled + */ + +void TAUException(struct pt_regs * regs) +{ + int cpu = smp_processor_id(); + + irq_enter(); + tau[cpu].interrupts++; + + TAUupdate(cpu); + + irq_exit(); +} +#endif /* CONFIG_TAU_INT */ + +static void tau_timeout(void * info) +{ + int cpu; + unsigned long flags; + int size; + int shrink; + + /* disabling interrupts *should* be okay */ + local_irq_save(flags); + cpu = smp_processor_id(); + +#ifndef CONFIG_TAU_INT + TAUupdate(cpu); +#endif + + size = tau[cpu].high - tau[cpu].low; + if (size > min_window && ! tau[cpu].grew) { + /* do an exponential shrink of half the amount currently over size */ + shrink = (2 + size - min_window) / 4; + if (shrink) { + tau[cpu].low += shrink; + tau[cpu].high -= shrink; + } else { /* size must have been min_window + 1 */ + tau[cpu].low += 1; +#if 1 /* debug */ + if ((tau[cpu].high - tau[cpu].low) != min_window){ + printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__); + } +#endif + } + } + + tau[cpu].grew = 0; + + set_thresholds(cpu); + + /* + * Do the enable every time, since otherwise a bunch of (relatively) + * complex sleep code needs to be added. One mtspr every time + * tau_timeout is called is probably not a big deal. + * + * Enable thermal sensor and set up sample interval timer + * need 20 us to do the compare.. until a nice 'cpu_speed' function + * call is implemented, just assume a 500 mhz clock. It doesn't really + * matter if we take too long for a compare since it's all interrupt + * driven anyway. + * + * use a extra long time.. (60 us @ 500 mhz) + */ + mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); + + local_irq_restore(flags); +} + +static void tau_timeout_smp(unsigned long unused) +{ + + /* schedule ourselves to be run again */ + mod_timer(&tau_timer, jiffies + shrink_timer) ; + on_each_cpu(tau_timeout, NULL, 1, 0); +} + +/* + * setup the TAU + * + * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound. + * Start off at zero + */ + +int tau_initialized = 0; + +void __init TAU_init_smp(void * info) +{ + unsigned long cpu = smp_processor_id(); + + /* set these to a reasonable value and let the timer shrink the + * window */ + tau[cpu].low = 5; + tau[cpu].high = 120; + + set_thresholds(cpu); +} + +int __init TAU_init(void) +{ + /* We assume in SMP that if one CPU has TAU support, they + * all have it --BenH + */ + if (!cpu_has_feature(CPU_FTR_TAU)) { + printk("Thermal assist unit not available\n"); + tau_initialized = 0; + return 1; + } + + + /* first, set up the window shrinking timer */ + init_timer(&tau_timer); + tau_timer.function = tau_timeout_smp; + tau_timer.expires = jiffies + shrink_timer; + add_timer(&tau_timer); + + on_each_cpu(TAU_init_smp, NULL, 1, 0); + + printk("Thermal assist unit "); +#ifdef CONFIG_TAU_INT + printk("using interrupts, "); +#else + printk("using timers, "); +#endif + printk("shrink_timer: %d jiffies\n", shrink_timer); + tau_initialized = 1; + + return 0; +} + +__initcall(TAU_init); + +/* + * return current temp + */ + +u32 cpu_temp_both(unsigned long cpu) +{ + return ((tau[cpu].high << 16) | tau[cpu].low); +} + +int cpu_temp(unsigned long cpu) +{ + return ((tau[cpu].high + tau[cpu].low) / 2); +} + +int tau_interrupts(unsigned long cpu) +{ + return (tau[cpu].interrupts); +} diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 4a27218a086..24e3ad756de 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -261,7 +261,7 @@ void snapshot_timebases(void) if (!cpu_has_feature(CPU_FTR_PURR)) return; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); } @@ -751,7 +751,7 @@ void __init smp_space_timers(unsigned int max_cpus) * systems works better if the two threads' timebase interrupts * are staggered by half a jiffy with respect to each other. */ - for_each_cpu(i) { + for_each_possible_cpu(i) { if (i == boot_cpuid) continue; if (i == (boot_cpuid ^ 1)) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 9763faab673..064a5256469 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -97,7 +97,6 @@ static DEFINE_SPINLOCK(die_lock); int die(const char *str, struct pt_regs *regs, long err) { static int die_counter, crash_dump_start = 0; - int nl = 0; if (debugger(regs)) return 1; @@ -106,7 +105,7 @@ int die(const char *str, struct pt_regs *regs, long err) spin_lock_irq(&die_lock); bust_spinlocks(1); #ifdef CONFIG_PMAC_BACKLIGHT - if (_machine == _MACH_Pmac) { + if (machine_is(powermac)) { set_backlight_enable(1); set_backlight_level(BACKLIGHT_MAX); } @@ -114,46 +113,18 @@ int die(const char *str, struct pt_regs *regs, long err) printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); - nl = 1; #endif #ifdef CONFIG_SMP printk("SMP NR_CPUS=%d ", NR_CPUS); - nl = 1; #endif #ifdef CONFIG_DEBUG_PAGEALLOC printk("DEBUG_PAGEALLOC "); - nl = 1; #endif #ifdef CONFIG_NUMA printk("NUMA "); - nl = 1; #endif -#ifdef CONFIG_PPC64 - switch (_machine) { - case PLATFORM_PSERIES: - printk("PSERIES "); - nl = 1; - break; - case PLATFORM_PSERIES_LPAR: - printk("PSERIES LPAR "); - nl = 1; - break; - case PLATFORM_ISERIES_LPAR: - printk("ISERIES LPAR "); - nl = 1; - break; - case PLATFORM_POWERMAC: - printk("POWERMAC "); - nl = 1; - break; - case PLATFORM_CELL: - printk("CELL "); - nl = 1; - break; - } -#endif - if (nl) - printk("\n"); + printk("%s\n", ppc_md.name ? "" : ppc_md.name); + print_modules(); show_regs(regs); bust_spinlocks(0); @@ -257,7 +228,7 @@ void system_reset_exception(struct pt_regs *regs) */ static inline int check_io_access(struct pt_regs *regs) { -#ifdef CONFIG_PPC_PMAC +#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) unsigned long msr = regs->msr; const struct exception_table_entry *entry; unsigned int *nip = (unsigned int *)regs->nip; @@ -290,7 +261,7 @@ static inline int check_io_access(struct pt_regs *regs) return 1; } } -#endif /* CONFIG_PPC_PMAC */ +#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */ return 0; } @@ -337,8 +308,8 @@ platform_machine_check(struct pt_regs *regs) void machine_check_exception(struct pt_regs *regs) { -#ifdef CONFIG_PPC64 int recover = 0; + unsigned long reason = get_mc_reason(regs); /* See if any machine dependent calls */ if (ppc_md.machine_check_exception) @@ -346,8 +317,6 @@ void machine_check_exception(struct pt_regs *regs) if (recover) return; -#else - unsigned long reason = get_mc_reason(regs); if (user_mode(regs)) { regs->msr |= MSR_RI; @@ -491,7 +460,6 @@ void machine_check_exception(struct pt_regs *regs) * additional info, e.g. bus error registers. */ platform_machine_check(regs); -#endif /* CONFIG_PPC64 */ if (debugger_fault_handler(regs)) return; diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index ec837036842..573afb68d69 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -33,6 +33,7 @@ #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/sections.h> +#include <asm/firmware.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> @@ -667,7 +668,13 @@ void __init vdso_init(void) vdso_data->version.major = SYSTEMCFG_MAJOR; vdso_data->version.minor = SYSTEMCFG_MINOR; vdso_data->processor = mfspr(SPRN_PVR); - vdso_data->platform = _machine; + /* + * Fake the old platform number for pSeries and iSeries and add + * in LPAR bit if necessary + */ + vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; + if (firmware_has_feature(FW_FEATURE_LPAR)) + vdso_data->platform |= 1; vdso_data->physicalMemorySize = lmb_phys_mem_size(); vdso_data->dcache_size = ppc64_caches.dsize; vdso_data->dcache_line_size = ppc64_caches.dline_size; diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index e0464278191..0c6a37b29dd 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S @@ -261,7 +261,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt32) .Lcie_start: .long 0 /* CIE ID */ .byte 1 /* Version number */ - .string "zR" /* NUL-terminated augmentation string */ + .string "zRS" /* NUL-terminated augmentation string */ .uleb128 4 /* Code alignment factor */ .sleb128 -4 /* Data alignment factor */ .byte 67 /* Return address register column, ap */ diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 31b604ab56d..7479edb101b 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -263,7 +263,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt64) .Lcie_start: .long 0 /* CIE ID */ .byte 1 /* Version number */ - .string "zR" /* NUL-terminated augmentation string */ + .string "zRS" /* NUL-terminated augmentation string */ .uleb128 4 /* Code alignment factor */ .sleb128 -8 /* Data alignment factor */ .byte 67 /* Return address register column, ap */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 7fa7b15fd8e..fe79c2584cb 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -1,9 +1,11 @@ #include <linux/config.h> #ifdef CONFIG_PPC64 #include <asm/page.h> +#define PROVIDE32(x) PROVIDE(__unused__##x) #else #define PAGE_SIZE 4096 #define KERNELBASE CONFIG_KERNEL_START +#define PROVIDE32(x) PROVIDE(x) #endif #include <asm-generic/vmlinux.lds.h> @@ -18,43 +20,42 @@ jiffies = jiffies_64 + 4; #endif SECTIONS { - /* Sections to be discarded. */ - /DISCARD/ : { - *(.exitcall.exit) - *(.exit.data) - } - - . = KERNELBASE; - - /* Read-only sections, merged into text segment: */ - .text : { - *(.text .text.*) - SCHED_TEXT - LOCK_TEXT - KPROBES_TEXT - *(.fixup) -#ifdef CONFIG_PPC32 - *(.got1) - __got2_start = .; - *(.got2) - __got2_end = .; -#else - . = ALIGN(PAGE_SIZE); - _etext = .; -#endif - } -#ifdef CONFIG_PPC32 - _etext = .; - PROVIDE (etext = .); + /* Sections to be discarded. */ + /DISCARD/ : { + *(.exitcall.exit) + *(.exit.data) + } - RODATA - .fini : { *(.fini) } =0 - .ctors : { *(.ctors) } - .dtors : { *(.dtors) } + . = KERNELBASE; - .fixup : { *(.fixup) } -#endif +/* + * Text, read only data and other permanent read-only sections + */ + + /* Text and gots */ + .text : { + *(.text .text.*) + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.fixup) +#ifdef CONFIG_PPC32 + *(.got1) + __got2_start = .; + *(.got2) + __got2_end = .; +#endif /* CONFIG_PPC32 */ + + . = ALIGN(PAGE_SIZE); + _etext = .; + PROVIDE32 (etext = .); + } + + /* Read-only data */ + RODATA + + /* Exception & bug tables */ __ex_table : { __start___ex_table = .; *(__ex_table) @@ -67,192 +68,172 @@ SECTIONS __stop___bug_table = .; } -#ifdef CONFIG_PPC64 +/* + * Init sections discarded at runtime + */ + . = ALIGN(PAGE_SIZE); + __init_begin = .; + + .init.text : { + _sinittext = .; + *(.init.text) + _einittext = .; + } + + /* .exit.text is discarded at runtime, not link time, + * to deal with references from __bug_table + */ + .exit.text : { *(.exit.text) } + + .init.data : { + *(.init.data); + __vtop_table_begin = .; + *(.vtop_fixup); + __vtop_table_end = .; + __ptov_table_begin = .; + *(.ptov_fixup); + __ptov_table_end = .; + } + + . = ALIGN(16); + .init.setup : { + __setup_start = .; + *(.init.setup) + __setup_end = .; + } + + .initcall.init : { + __initcall_start = .; + *(.initcall1.init) + *(.initcall2.init) + *(.initcall3.init) + *(.initcall4.init) + *(.initcall5.init) + *(.initcall6.init) + *(.initcall7.init) + __initcall_end = .; + } + + .con_initcall.init : { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + + SECURITY_INIT + + . = ALIGN(8); __ftr_fixup : { __start___ftr_fixup = .; *(__ftr_fixup) __stop___ftr_fixup = .; } - RODATA -#endif + . = ALIGN(PAGE_SIZE); + .init.ramfs : { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + } #ifdef CONFIG_PPC32 - /* Read-write section, merged into data segment: */ - . = ALIGN(PAGE_SIZE); - _sdata = .; - .data : - { - *(.data) - *(.data1) - *(.sdata) - *(.sdata2) - *(.got.plt) *(.got) - *(.dynamic) - CONSTRUCTORS - } - - . = ALIGN(PAGE_SIZE); - __nosave_begin = .; - .data_nosave : { *(.data.nosave) } - . = ALIGN(PAGE_SIZE); - __nosave_end = .; - - . = ALIGN(32); - .data.cacheline_aligned : { *(.data.cacheline_aligned) } - - _edata = .; - PROVIDE (edata = .); - - . = ALIGN(8192); - .data.init_task : { *(.data.init_task) } + . = ALIGN(32); +#else + . = ALIGN(128); #endif + .data.percpu : { + __per_cpu_start = .; + *(.data.percpu) + __per_cpu_end = .; + } - /* will be freed after init */ - . = ALIGN(PAGE_SIZE); - __init_begin = .; - .init.text : { - _sinittext = .; - *(.init.text) - _einittext = .; - } -#ifdef CONFIG_PPC32 - /* .exit.text is discarded at runtime, not link time, - to deal with references from __bug_table */ - .exit.text : { *(.exit.text) } -#endif - .init.data : { - *(.init.data); - __vtop_table_begin = .; - *(.vtop_fixup); - __vtop_table_end = .; - __ptov_table_begin = .; - *(.ptov_fixup); - __ptov_table_end = .; - } - - . = ALIGN(16); - .init.setup : { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - - .initcall.init : { - __initcall_start = .; - *(.initcall1.init) - *(.initcall2.init) - *(.initcall3.init) - *(.initcall4.init) - *(.initcall5.init) - *(.initcall6.init) - *(.initcall7.init) - __initcall_end = .; - } - - .con_initcall.init : { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - - SECURITY_INIT + . = ALIGN(8); + .machine.desc : { + __machine_desc_start = . ; + *(.machine.desc) + __machine_desc_end = . ; + } + + /* freed after init ends here */ + . = ALIGN(PAGE_SIZE); + __init_end = .; + +/* + * And now the various read/write data + */ + + . = ALIGN(PAGE_SIZE); + _sdata = .; #ifdef CONFIG_PPC32 - __start___ftr_fixup = .; - __ftr_fixup : { *(__ftr_fixup) } - __stop___ftr_fixup = .; + .data : + { + *(.data) + *(.sdata) + *(.got.plt) *(.got) + } #else - . = ALIGN(PAGE_SIZE); - .init.ramfs : { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - } -#endif + .data : { + *(.data .data.rel* .toc1) + *(.branch_lt) + } -#ifdef CONFIG_PPC32 - . = ALIGN(32); + .opd : { + *(.opd) + } + + .got : { + __toc_start = .; + *(.got) + *(.toc) + } #endif - .data.percpu : { - __per_cpu_start = .; - *(.data.percpu) - __per_cpu_end = .; - } - . = ALIGN(PAGE_SIZE); -#ifdef CONFIG_PPC64 - . = ALIGN(16384); - __init_end = .; - /* freed after init ends here */ - - /* Read/write sections */ - . = ALIGN(PAGE_SIZE); - . = ALIGN(16384); - _sdata = .; - /* The initial task and kernel stack */ - .data.init_task : { - *(.data.init_task) - } - - . = ALIGN(PAGE_SIZE); - .data.page_aligned : { - *(.data.page_aligned) - } - - .data.cacheline_aligned : { - *(.data.cacheline_aligned) - } - - .data : { - *(.data .data.rel* .toc1) - *(.branch_lt) - } - - .opd : { - *(.opd) - } - - .got : { - __toc_start = .; - *(.got) - *(.toc) - . = ALIGN(PAGE_SIZE); - _edata = .; - } - - . = ALIGN(PAGE_SIZE); + . = ALIGN(PAGE_SIZE); + _edata = .; + PROVIDE32 (edata = .); + + /* The initial task and kernel stack */ +#ifdef CONFIG_PPC32 + . = ALIGN(8192); #else - __initramfs_start = .; - .init.ramfs : { - *(.init.ramfs) - } - __initramfs_end = .; + . = ALIGN(16384); +#endif + .data.init_task : { + *(.data.init_task) + } - . = ALIGN(4096); - __init_end = .; + . = ALIGN(PAGE_SIZE); + .data.page_aligned : { + *(.data.page_aligned) + } - . = ALIGN(4096); - _sextratext = .; - _eextratext = .; + .data.cacheline_aligned : { + *(.data.cacheline_aligned) + } - __bss_start = .; -#endif + . = ALIGN(PAGE_SIZE); + __data_nosave : { + __nosave_begin = .; + *(.data.nosave) + . = ALIGN(PAGE_SIZE); + __nosave_end = .; + } - .bss : { - __bss_start = .; - *(.sbss) *(.scommon) - *(.dynbss) - *(.bss) - *(COMMON) - __bss_stop = .; - } +/* + * And finally the bss + */ + + .bss : { + __bss_start = .; + *(.sbss) *(.scommon) + *(.dynbss) + *(.bss) + *(COMMON) + __bss_stop = .; + } -#ifdef CONFIG_PPC64 - . = ALIGN(PAGE_SIZE); -#endif - _end = . ; -#ifdef CONFIG_PPC32 - PROVIDE (end = .); -#endif + . = ALIGN(PAGE_SIZE); + _end = . ; + PROVIDE32 (end = .); } |