summaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/Makefile28
-rw-r--r--arch/ppc/kernel/cpu_setup_6xx.S474
-rw-r--r--arch/ppc/kernel/entry.S60
-rw-r--r--arch/ppc/kernel/head.S183
-rw-r--r--arch/ppc/kernel/idle.c112
-rw-r--r--arch/ppc/kernel/idle_6xx.S233
-rw-r--r--arch/ppc/kernel/idle_power4.S91
-rw-r--r--arch/ppc/kernel/l2cr.S471
-rw-r--r--arch/ppc/kernel/module.c320
-rw-r--r--arch/ppc/kernel/pci.c396
-rw-r--r--arch/ppc/kernel/perfmon_fsl_booke.c222
-rw-r--r--arch/ppc/kernel/ppc_htab.c8
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c26
-rw-r--r--arch/ppc/kernel/setup.c256
-rw-r--r--arch/ppc/kernel/smp.c2
-rw-r--r--arch/ppc/kernel/swsusp.S349
-rw-r--r--arch/ppc/kernel/temp.c271
17 files changed, 22 insertions, 3480 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index e399bbb969a..466437f4bcb 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -1,48 +1,24 @@
#
# Makefile for the linux kernel.
#
-ifneq ($(CONFIG_PPC_MERGE),y)
-
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
-extra-$(CONFIG_6xx) += idle_6xx.o
extra-y += vmlinux.lds
-obj-y := entry.o traps.o idle.o time.o misc.o \
+obj-y := entry.o traps.o time.o misc.o \
setup.o \
ppc_htab.o
-obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
-obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
+obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_RAPIDIO) += rio.o
obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
-obj-$(CONFIG_TAU) += temp.o
-ifndef CONFIG_E200
-obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
-endif
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
ifndef CONFIG_MATH_EMULATION
obj-$(CONFIG_8xx) += softemu8xx.o
endif
-
-# These are here while we do the architecture merge
-
-else
-obj-y := idle.o
-obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
-obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
-obj-$(CONFIG_KGDB) += ppc-stub.o
-obj-$(CONFIG_TAU) += temp.o
-ifndef CONFIG_E200
-obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
-endif
-endif
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
deleted file mode 100644
index 55ed7716636..00000000000
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ /dev/null
@@ -1,474 +0,0 @@
-/*
- * This file contains low level CPU setup functions.
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/cache.h>
-
-_GLOBAL(__setup_cpu_603)
- b setup_common_caches
-_GLOBAL(__setup_cpu_604)
- mflr r4
- bl setup_common_caches
- bl setup_604_hid0
- mtlr r4
- blr
-_GLOBAL(__setup_cpu_750)
- mflr r4
- bl __init_fpu_registers
- bl setup_common_caches
- bl setup_750_7400_hid0
- mtlr r4
- blr
-_GLOBAL(__setup_cpu_750cx)
- mflr r4
- bl __init_fpu_registers
- bl setup_common_caches
- bl setup_750_7400_hid0
- bl setup_750cx
- mtlr r4
- blr
-_GLOBAL(__setup_cpu_750fx)
- mflr r4
- bl __init_fpu_registers
- bl setup_common_caches
- bl setup_750_7400_hid0
- bl setup_750fx
- mtlr r4
- blr
-_GLOBAL(__setup_cpu_7400)
- mflr r4
- bl __init_fpu_registers
- bl setup_7400_workarounds
- bl setup_common_caches
- bl setup_750_7400_hid0
- mtlr r4
- blr
-_GLOBAL(__setup_cpu_7410)
- mflr r4
- bl __init_fpu_registers
- bl setup_7410_workarounds
- bl setup_common_caches
- bl setup_750_7400_hid0
- li r3,0
- mtspr SPRN_L2CR2,r3
- mtlr r4
- blr
-_GLOBAL(__setup_cpu_745x)
- mflr r4
- bl setup_common_caches
- bl setup_745x_specifics
- mtlr r4
- blr
-
-/* Enable caches for 603's, 604, 750 & 7400 */
-setup_common_caches:
- mfspr r11,SPRN_HID0
- andi. r0,r11,HID0_DCE
- ori r11,r11,HID0_ICE|HID0_DCE
- ori r8,r11,HID0_ICFI
- bne 1f /* don't invalidate the D-cache */
- ori r8,r8,HID0_DCI /* unless it wasn't enabled */
-1: sync
- mtspr SPRN_HID0,r8 /* enable and invalidate caches */
- sync
- mtspr SPRN_HID0,r11 /* enable caches */
- sync
- isync
- blr
-
-/* 604, 604e, 604ev, ...
- * Enable superscalar execution & branch history table
- */
-setup_604_hid0:
- mfspr r11,SPRN_HID0
- ori r11,r11,HID0_SIED|HID0_BHTE
- ori r8,r11,HID0_BTCD
- sync
- mtspr SPRN_HID0,r8 /* flush branch target address cache */
- sync /* on 604e/604r */
- mtspr SPRN_HID0,r11
- sync
- isync
- blr
-
-/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
- * erratas we work around here.
- * Moto MPC710CE.pdf describes them, those are errata
- * #3, #4 and #5
- * Note that we assume the firmware didn't choose to
- * apply other workarounds (there are other ones documented
- * in the .pdf). It appear that Apple firmware only works
- * around #3 and with the same fix we use. We may want to
- * check if the CPU is using 60x bus mode in which case
- * the workaround for errata #4 is useless. Also, we may
- * want to explicitely clear HID0_NOPDST as this is not
- * needed once we have applied workaround #5 (though it's
- * not set by Apple's firmware at least).
- */
-setup_7400_workarounds:
- mfpvr r3
- rlwinm r3,r3,0,20,31
- cmpwi 0,r3,0x0207
- ble 1f
- blr
-setup_7410_workarounds:
- mfpvr r3
- rlwinm r3,r3,0,20,31
- cmpwi 0,r3,0x0100
- bnelr
-1:
- mfspr r11,SPRN_MSSSR0
- /* Errata #3: Set L1OPQ_SIZE to 0x10 */
- rlwinm r11,r11,0,9,6
- oris r11,r11,0x0100
- /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
- oris r11,r11,0x0002
- /* Errata #5: Set DRLT_SIZE to 0x01 */
- rlwinm r11,r11,0,5,2
- oris r11,r11,0x0800
- sync
- mtspr SPRN_MSSSR0,r11
- sync
- isync
- blr
-
-/* 740/750/7400/7410
- * Enable Store Gathering (SGE), Address Brodcast (ABE),
- * Branch History Table (BHTE), Branch Target ICache (BTIC)
- * Dynamic Power Management (DPM), Speculative (SPD)
- * Clear Instruction cache throttling (ICTC)
- */
-setup_750_7400_hid0:
- mfspr r11,SPRN_HID0
- ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
- oris r11,r11,HID0_DPM@h
-BEGIN_FTR_SECTION
- xori r11,r11,HID0_BTIC
-END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
-BEGIN_FTR_SECTION
- xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
-END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
- li r3,HID0_SPD
- andc r11,r11,r3 /* clear SPD: enable speculative */
- li r3,0
- mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
- isync
- mtspr SPRN_HID0,r11
- sync
- isync
- blr
-
-/* 750cx specific
- * Looks like we have to disable NAP feature for some PLL settings...
- * (waiting for confirmation)
- */
-setup_750cx:
- mfspr r10, SPRN_HID1
- rlwinm r10,r10,4,28,31
- cmpwi cr0,r10,7
- cmpwi cr1,r10,9
- cmpwi cr2,r10,11
- cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
- bnelr
- lwz r6,CPU_SPEC_FEATURES(r5)
- li r7,CPU_FTR_CAN_NAP
- andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r5)
- blr
-
-/* 750fx specific
- */
-setup_750fx:
- blr
-
-/* MPC 745x
- * Enable Store Gathering (SGE), Branch Folding (FOLD)
- * Branch History Table (BHTE), Branch Target ICache (BTIC)
- * Dynamic Power Management (DPM), Speculative (SPD)
- * Ensure our data cache instructions really operate.
- * Timebase has to be running or we wouldn't have made it here,
- * just ensure we don't disable it.
- * Clear Instruction cache throttling (ICTC)
- * Enable L2 HW prefetch
- */
-setup_745x_specifics:
- /* We check for the presence of an L3 cache setup by
- * the firmware. If any, we disable NAP capability as
- * it's known to be bogus on rev 2.1 and earlier
- */
- mfspr r11,SPRN_L3CR
- andis. r11,r11,L3CR_L3E@h
- beq 1f
- lwz r6,CPU_SPEC_FEATURES(r5)
- andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
- beq 1f
- li r7,CPU_FTR_CAN_NAP
- andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r5)
-1:
- mfspr r11,SPRN_HID0
-
- /* All of the bits we have to set.....
- */
- ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE
- ori r11,r11,HID0_LRSTK | HID0_BTIC
- oris r11,r11,HID0_DPM@h
-BEGIN_FTR_SECTION
- xori r11,r11,HID0_BTIC
-END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
-BEGIN_FTR_SECTION
- xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
-END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
-
- /* All of the bits we have to clear....
- */
- li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
- andc r11,r11,r3 /* clear SPD: enable speculative */
- li r3,0
-
- mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
- isync
- mtspr SPRN_HID0,r11
- sync
- isync
-
- /* Enable L2 HW prefetch, if L2 is enabled
- */
- mfspr r3,SPRN_L2CR
- andis. r3,r3,L2CR_L2E@h
- beqlr
- mfspr r3,SPRN_MSSCR0
- ori r3,r3,3
- sync
- mtspr SPRN_MSSCR0,r3
- sync
- isync
- blr
-
-/*
- * Initialize the FPU registers. This is needed to work around an errata
- * in some 750 cpus where using a not yet initialized FPU register after
- * power on reset may hang the CPU
- */
-_GLOBAL(__init_fpu_registers)
- mfmsr r10
- ori r11,r10,MSR_FP
- mtmsr r11
- isync
- addis r9,r3,empty_zero_page@ha
- addi r9,r9,empty_zero_page@l
- REST_32FPRS(0,r9)
- sync
- mtmsr r10
- isync
- blr
-
-
-/* Definitions for the table use to save CPU states */
-#define CS_HID0 0
-#define CS_HID1 4
-#define CS_HID2 8
-#define CS_MSSCR0 12
-#define CS_MSSSR0 16
-#define CS_ICTRL 20
-#define CS_LDSTCR 24
-#define CS_LDSTDB 28
-#define CS_SIZE 32
-
- .data
- .balign L1_CACHE_BYTES
-cpu_state_storage:
- .space CS_SIZE
- .balign L1_CACHE_BYTES,0
- .text
-
-/* Called in normal context to backup CPU 0 state. This
- * does not include cache settings. This function is also
- * called for machine sleep. This does not include the MMU
- * setup, BATs, etc... but rather the "special" registers
- * like HID0, HID1, MSSCR0, etc...
- */
-_GLOBAL(__save_cpu_setup)
- /* Some CR fields are volatile, we back it up all */
- mfcr r7
-
- /* Get storage ptr */
- lis r5,cpu_state_storage@h
- ori r5,r5,cpu_state_storage@l
-
- /* Save HID0 (common to all CONFIG_6xx cpus) */
- mfspr r3,SPRN_HID0
- stw r3,CS_HID0(r5)
-
- /* Now deal with CPU type dependent registers */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmplwi cr0,r3,0x8000 /* 7450 */
- cmplwi cr1,r3,0x000c /* 7400 */
- cmplwi cr2,r3,0x800c /* 7410 */
- cmplwi cr3,r3,0x8001 /* 7455 */
- cmplwi cr4,r3,0x8002 /* 7457 */
- cmplwi cr5,r3,0x8003 /* 7447A */
- cmplwi cr6,r3,0x7000 /* 750FX */
- cmplwi cr7,r3,0x8004 /* 7448 */
- /* cr1 is 7400 || 7410 */
- cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
- /* cr0 is 74xx */
- cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
- bne 1f
- /* Backup 74xx specific regs */
- mfspr r4,SPRN_MSSCR0
- stw r4,CS_MSSCR0(r5)
- mfspr r4,SPRN_MSSSR0
- stw r4,CS_MSSSR0(r5)
- beq cr1,1f
- /* Backup 745x specific registers */
- mfspr r4,SPRN_HID1
- stw r4,CS_HID1(r5)
- mfspr r4,SPRN_ICTRL
- stw r4,CS_ICTRL(r5)
- mfspr r4,SPRN_LDSTCR
- stw r4,CS_LDSTCR(r5)
- mfspr r4,SPRN_LDSTDB
- stw r4,CS_LDSTDB(r5)
-1:
- bne cr6,1f
- /* Backup 750FX specific registers */
- mfspr r4,SPRN_HID1
- stw r4,CS_HID1(r5)
- /* If rev 2.x, backup HID2 */
- mfspr r3,SPRN_PVR
- andi. r3,r3,0xff00
- cmpwi cr0,r3,0x0200
- bne 1f
- mfspr r4,SPRN_HID2
- stw r4,CS_HID2(r5)
-1:
- mtcr r7
- blr
-
-/* Called with no MMU context (typically MSR:IR/DR off) to
- * restore CPU state as backed up by the previous
- * function. This does not include cache setting
- */
-_GLOBAL(__restore_cpu_setup)
- /* Some CR fields are volatile, we back it up all */
- mfcr r7
-
- /* Get storage ptr */
- lis r5,(cpu_state_storage-KERNELBASE)@h
- ori r5,r5,cpu_state_storage@l
-
- /* Restore HID0 */
- lwz r3,CS_HID0(r5)
- sync
- isync
- mtspr SPRN_HID0,r3
- sync
- isync
-
- /* Now deal with CPU type dependent registers */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmplwi cr0,r3,0x8000 /* 7450 */
- cmplwi cr1,r3,0x000c /* 7400 */
- cmplwi cr2,r3,0x800c /* 7410 */
- cmplwi cr3,r3,0x8001 /* 7455 */
- cmplwi cr4,r3,0x8002 /* 7457 */
- cmplwi cr5,r3,0x8003 /* 7447A */
- cmplwi cr6,r3,0x7000 /* 750FX */
- cmplwi cr7,r3,0x8004 /* 7448 */
- /* cr1 is 7400 || 7410 */
- cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
- /* cr0 is 74xx */
- cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
- cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
- bne 2f
- /* Restore 74xx specific regs */
- lwz r4,CS_MSSCR0(r5)
- sync
- mtspr SPRN_MSSCR0,r4
- sync
- isync
- lwz r4,CS_MSSSR0(r5)
- sync
- mtspr SPRN_MSSSR0,r4
- sync
- isync
- bne cr2,1f
- /* Clear 7410 L2CR2 */
- li r4,0
- mtspr SPRN_L2CR2,r4
-1: beq cr1,2f
- /* Restore 745x specific registers */
- lwz r4,CS_HID1(r5)
- sync
- mtspr SPRN_HID1,r4
- isync
- sync
- lwz r4,CS_ICTRL(r5)
- sync
- mtspr SPRN_ICTRL,r4
- isync
- sync
- lwz r4,CS_LDSTCR(r5)
- sync
- mtspr SPRN_LDSTCR,r4
- isync
- sync
- lwz r4,CS_LDSTDB(r5)
- sync
- mtspr SPRN_LDSTDB,r4
- isync
- sync
-2: bne cr6,1f
- /* Restore 750FX specific registers
- * that is restore HID2 on rev 2.x and PLL config & switch
- * to PLL 0 on all
- */
- /* If rev 2.x, restore HID2 with low voltage bit cleared */
- mfspr r3,SPRN_PVR
- andi. r3,r3,0xff00
- cmpwi cr0,r3,0x0200
- bne 4f
- lwz r4,CS_HID2(r5)
- rlwinm r4,r4,0,19,17
- mtspr SPRN_HID2,r4
- sync
-4:
- lwz r4,CS_HID1(r5)
- rlwinm r5,r4,0,16,14
- mtspr SPRN_HID1,r5
- /* Wait for PLL to stabilize */
- mftbl r5
-3: mftbl r6
- sub r6,r6,r5
- cmplwi cr0,r6,10000
- ble 3b
- /* Setup final PLL */
- mtspr SPRN_HID1,r4
-1:
- mtcr r7
- blr
-
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 3a281597848..5891ecbdc70 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -135,10 +135,10 @@ transfer_to_handler:
mfspr r11,SPRN_HID0
mtcr r11
BEGIN_FTR_SECTION
- bt- 8,power_save_6xx_restore /* Check DOZE */
+ bt- 8,4f /* Check DOZE */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
BEGIN_FTR_SECTION
- bt- 9,power_save_6xx_restore /* Check NAP */
+ bt- 9,4f /* Check NAP */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
#endif /* CONFIG_6xx */
.globl transfer_to_handler_cont
@@ -157,6 +157,10 @@ transfer_to_handler_cont:
SYNC
RFI /* jump to handler, enable MMU */
+#ifdef CONFIG_6xx
+4: b power_save_6xx_restore
+#endif
+
/*
* On kernel stack overflow, load up an initial stack pointer
* and call StackOverflow(regs), which should not return.
@@ -926,55 +930,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
b 4b
.comm ee_restarts,4
-
-/*
- * PROM code for specific machines follows. Put it
- * here so it's easy to add arch-specific sections later.
- * -- Cort
- */
-#ifdef CONFIG_PPC_OF
-/*
- * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
- * called with the MMU off.
- */
-_GLOBAL(enter_rtas)
- stwu r1,-INT_FRAME_SIZE(r1)
- mflr r0
- stw r0,INT_FRAME_SIZE+4(r1)
- lis r4,rtas_data@ha
- lwz r4,rtas_data@l(r4)
- lis r6,1f@ha /* physical return address for rtas */
- addi r6,r6,1f@l
- tophys(r6,r6)
- tophys(r7,r1)
- lis r8,rtas_entry@ha
- lwz r8,rtas_entry@l(r8)
- mfmsr r9
- stw r9,8(r1)
- LOAD_MSR_KERNEL(r0,MSR_KERNEL)
- SYNC /* disable interrupts so SRR0/1 */
- MTMSRD(r0) /* don't get trashed */
- li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
- mtlr r6
- CLR_TOP32(r7)
- mtspr SPRN_SPRG2,r7
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- RFI
-1: tophys(r9,r1)
- lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
- lwz r9,8(r9) /* original msr value */
- FIX_SRR1(r9,r0)
- addi r1,r1,INT_FRAME_SIZE
- li r0,0
- mtspr SPRN_SPRG2,r0
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- RFI /* return to caller */
-
- .globl machine_check_in_rtas
-machine_check_in_rtas:
- twi 31,0,0
- /* XXX load up BATs and panic */
-
-#endif /* CONFIG_PPC_OF */
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 53ea845fb91..01303efedda 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -37,19 +37,6 @@
#include <asm/amigappc.h>
#endif
-#ifdef CONFIG_PPC64BRIDGE
-#define LOAD_BAT(n, reg, RA, RB) \
- ld RA,(n*32)+0(reg); \
- ld RB,(n*32)+8(reg); \
- mtspr SPRN_IBAT##n##U,RA; \
- mtspr SPRN_IBAT##n##L,RB; \
- ld RA,(n*32)+16(reg); \
- ld RB,(n*32)+24(reg); \
- mtspr SPRN_DBAT##n##U,RA; \
- mtspr SPRN_DBAT##n##L,RB; \
-
-#else /* CONFIG_PPC64BRIDGE */
-
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
/* see the comment for clear_bats() -- Cort */ \
@@ -66,7 +53,6 @@
mtspr SPRN_DBAT##n##U,RA; \
mtspr SPRN_DBAT##n##L,RB; \
1:
-#endif /* CONFIG_PPC64BRIDGE */
.text
.stabs "arch/ppc/kernel/",N_SO,0,0,0f
@@ -129,11 +115,6 @@ _start:
.globl __start
__start:
-/*
- * We have to do any OF calls before we map ourselves to KERNELBASE,
- * because OF may have I/O devices mapped into that area
- * (particularly on CHRP).
- */
mr r31,r3 /* save parameters */
mr r30,r4
mr r29,r5
@@ -148,14 +129,6 @@ __start:
*/
bl early_init
-/*
- * On POWER4, we first need to tweak some CPU configuration registers
- * like real mode cache inhibit or exception base
- */
-#ifdef CONFIG_POWER4
- bl __970_cpu_preinit
-#endif /* CONFIG_POWER4 */
-
#ifdef CONFIG_APUS
/* On APUS the __va/__pa constants need to be set to the correct
* values before continuing.
@@ -169,7 +142,6 @@ __start:
*/
bl mmu_off
__after_mmu_off:
-#ifndef CONFIG_POWER4
bl clear_bats
bl flush_tlbs
@@ -177,10 +149,6 @@ __after_mmu_off:
#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
bl setup_disp_bat
#endif
-#else /* CONFIG_POWER4 */
- bl reloc_offset
- bl initial_mm_power4
-#endif /* CONFIG_POWER4 */
/*
* Call setup_cpu for CPU 0 and initialize 6xx Idle
@@ -192,18 +160,11 @@ __after_mmu_off:
bl reloc_offset
bl init_idle_6xx
#endif /* CONFIG_6xx */
-#ifdef CONFIG_POWER4
- bl reloc_offset
- bl init_idle_power4
-#endif /* CONFIG_POWER4 */
#ifndef CONFIG_APUS
/*
* We need to run with _start at physical address 0.
- * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
- * the exception vectors at 0 (and therefore this copy
- * overwrites OF's exception vectors with our own).
* If the MMU is already turned on, we copy stuff to KERNELBASE,
* otherwise we copy it to 0.
*/
@@ -358,51 +319,19 @@ i##n: \
#endif
/* Machine check */
-/*
- * On CHRP, this is complicated by the fact that we could get a
- * machine check inside RTAS, and we have no guarantee that certain
- * critical registers will have the values we expect. The set of
- * registers that might have bad values includes all the GPRs
- * and all the BATs. We indicate that we are in RTAS by putting
- * a non-zero value, the address of the exception frame to use,
- * in SPRG2. The machine check handler checks SPRG2 and uses its
- * value if it is non-zero. If we ever needed to free up SPRG2,
- * we could use a field in the thread_info or thread_struct instead.
- * (Other exception handlers assume that r1 is a valid kernel stack
- * pointer when we take an exception from supervisor mode.)
- * -- paulus.
- */
. = 0x200
mtspr SPRN_SPRG0,r10
mtspr SPRN_SPRG1,r11
mfcr r10
-#ifdef CONFIG_PPC_CHRP
- mfspr r11,SPRN_SPRG2
- cmpwi 0,r11,0
- bne 7f
-#endif /* CONFIG_PPC_CHRP */
EXCEPTION_PROLOG_1
7: EXCEPTION_PROLOG_2
addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_CHRP
- mfspr r4,SPRN_SPRG2
- cmpwi cr1,r4,0
- bne cr1,1f
-#endif
EXC_XFER_STD(0x200, machine_check_exception)
-#ifdef CONFIG_PPC_CHRP
-1: b machine_check_in_rtas
-#endif
/* Data access exception. */
. = 0x300
-#ifdef CONFIG_PPC64BRIDGE
- b DataAccess
-DataAccessCont:
-#else
DataAccess:
EXCEPTION_PROLOG
-#endif /* CONFIG_PPC64BRIDGE */
mfspr r10,SPRN_DSISR
andis. r0,r10,0xa470 /* weird error? */
bne 1f /* if not, try to put a PTE */
@@ -414,21 +343,10 @@ DataAccess:
mfspr r4,SPRN_DAR
EXC_XFER_EE_LITE(0x300, handle_page_fault)
-#ifdef CONFIG_PPC64BRIDGE
-/* SLB fault on data access. */
- . = 0x380
- b DataSegment
-#endif /* CONFIG_PPC64BRIDGE */
-
/* Instruction access exception. */
. = 0x400
-#ifdef CONFIG_PPC64BRIDGE
- b InstructionAccess
-InstructionAccessCont:
-#else
InstructionAccess:
EXCEPTION_PROLOG
-#endif /* CONFIG_PPC64BRIDGE */
andis. r0,r9,0x4000 /* no pte found? */
beq 1f /* if so, try to put a PTE */
li r3,0 /* into the hash table */
@@ -438,12 +356,6 @@ InstructionAccess:
mr r5,r9
EXC_XFER_EE_LITE(0x400, handle_page_fault)
-#ifdef CONFIG_PPC64BRIDGE
-/* SLB fault on instruction access. */
- . = 0x480
- b InstructionSegment
-#endif /* CONFIG_PPC64BRIDGE */
-
/* External interrupt */
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
@@ -708,15 +620,9 @@ DataStoreTLBMiss:
EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
-#ifdef CONFIG_POWER4
- EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE)
- EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
-#else /* !CONFIG_POWER4 */
EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
-#endif /* CONFIG_POWER4 */
EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
@@ -754,28 +660,6 @@ AltiVecUnavailable:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
-#ifdef CONFIG_PPC64BRIDGE
-DataAccess:
- EXCEPTION_PROLOG
- b DataAccessCont
-
-InstructionAccess:
- EXCEPTION_PROLOG
- b InstructionAccessCont
-
-DataSegment:
- EXCEPTION_PROLOG
- addi r3,r1,STACK_FRAME_OVERHEAD
- mfspr r4,SPRN_DAR
- stw r4,_DAR(r11)
- EXC_XFER_STD(0x380, unknown_exception)
-
-InstructionSegment:
- EXCEPTION_PROLOG
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x480, unknown_exception)
-#endif /* CONFIG_PPC64BRIDGE */
-
#ifdef CONFIG_ALTIVEC
/* Note that the AltiVec support is closely modeled after the FP
* support. Changes to one are likely to be applicable to the
@@ -1048,13 +932,6 @@ __secondary_start_pmac_0:
.globl __secondary_start
__secondary_start:
-#ifdef CONFIG_PPC64BRIDGE
- mfmsr r0
- clrldi r0,r0,1 /* make sure it's in 32-bit mode */
- SYNC
- MTMSRD(r0)
- isync
-#endif
/* Copy some CPU settings from CPU 0 */
bl __restore_cpu_setup
@@ -1065,10 +942,6 @@ __secondary_start:
lis r3,-KERNELBASE@h
bl init_idle_6xx
#endif /* CONFIG_6xx */
-#ifdef CONFIG_POWER4
- lis r3,-KERNELBASE@h
- bl init_idle_power4
-#endif /* CONFIG_POWER4 */
/* get current_thread_info and current */
lis r1,secondary_ti@ha
@@ -1109,12 +982,12 @@ __secondary_start:
* Those generic dummy functions are kept for CPUs not
* included in CONFIG_6xx
*/
-#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
+#if !defined(CONFIG_6xx)
_GLOBAL(__save_cpu_setup)
blr
_GLOBAL(__restore_cpu_setup)
blr
-#endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */
+#endif /* !defined(CONFIG_6xx) */
/*
@@ -1132,11 +1005,6 @@ load_up_mmu:
tophys(r6,r6)
lwz r6,_SDR1@l(r6)
mtspr SPRN_SDR1,r6
-#ifdef CONFIG_PPC64BRIDGE
- /* clear the ASR so we only use the pseudo-segment registers. */
- li r6,0
- mtasr r6
-#endif /* CONFIG_PPC64BRIDGE */
li r0,16 /* load up segment register values */
mtctr r0 /* for context 0 */
lis r3,0x2000 /* Ku = 1, VSID = 0 */
@@ -1145,7 +1013,7 @@ load_up_mmu:
addi r3,r3,0x111 /* increment VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
-#ifndef CONFIG_POWER4
+
/* Load the BAT registers with the values set up by MMU_init.
MMU_init takes care of whether we're on a 601 or not. */
mfpvr r3
@@ -1158,7 +1026,7 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
-#endif /* CONFIG_POWER4 */
+
blr
/*
@@ -1269,9 +1137,6 @@ _GLOBAL(set_context)
li r4,0
isync
3:
-#ifdef CONFIG_PPC64BRIDGE
- slbie r4
-#endif /* CONFIG_PPC64BRIDGE */
mtsrin r3,r4
addi r3,r3,0x111 /* next VSID */
rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
@@ -1358,7 +1223,6 @@ mmu_off:
sync
RFI
-#ifndef CONFIG_POWER4
/*
* Use the first pair of BAT registers to map the 1st 16MB
* of RAM to KERNELBASE. From this point on we can't safely
@@ -1366,7 +1230,6 @@ mmu_off:
*/
initial_bats:
lis r11,KERNELBASE@h
-#ifndef CONFIG_PPC64BRIDGE
mfspr r9,SPRN_PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi 0,r9,1
@@ -1381,7 +1244,6 @@ initial_bats:
mtspr SPRN_IBAT1L,r10
isync
blr
-#endif /* CONFIG_PPC64BRIDGE */
4: tophys(r8,r11)
#ifdef CONFIG_SMP
@@ -1395,11 +1257,6 @@ initial_bats:
ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
#endif /* CONFIG_APUS */
-#ifdef CONFIG_PPC64BRIDGE
- /* clear out the high 32 bits in the BAT */
- clrldi r11,r11,32
- clrldi r8,r8,32
-#endif /* CONFIG_PPC64BRIDGE */
mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
mtspr SPRN_IBAT0L,r8
@@ -1432,38 +1289,6 @@ setup_disp_bat:
#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
-#else /* CONFIG_POWER4 */
-/*
- * Load up the SDR1 and segment register values now
- * since we don't have the BATs.
- * Also make sure we are running in 32-bit mode.
- */
-
-initial_mm_power4:
- addis r14,r3,_SDR1@ha /* get the value from _SDR1 */
- lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */
- mtspr SPRN_SDR1,r14
- slbia
- lis r4,0x2000 /* set pseudo-segment reg 12 */
- ori r5,r4,0x0ccc
- mtsr 12,r5
-#if 0
- ori r5,r4,0x0888 /* set pseudo-segment reg 8 */
- mtsr 8,r5 /* (for access to serial port) */
-#endif
-#ifdef CONFIG_BOOTX_TEXT
- ori r5,r4,0x0999 /* set pseudo-segment reg 9 */
- mtsr 9,r5 /* (for access to screen) */
-#endif
- mfmsr r0
- clrldi r0,r0,1
- sync
- mtmsr r0
- isync
- blr
-
-#endif /* CONFIG_POWER4 */
-
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
* We first disable the MMU, and then jump to the ROM reset address.
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
deleted file mode 100644
index 1be3ca5bae4..00000000000
--- a/arch/ppc/kernel/idle.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Idle daemon for PowerPC. Idle daemon will handle any action
- * that needs to be taken when the system becomes idle.
- *
- * Written by Cort Dougan (cort@cs.nmt.edu). Subsequently hacked
- * on by Tom Rini, Armin Kuster, Paul Mackerras and others.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/sysctl.h>
-#include <linux/cpu.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/mmu.h>
-#include <asm/cache.h>
-#include <asm/cputable.h>
-#include <asm/machdep.h>
-#include <asm/smp.h>
-
-void default_idle(void)
-{
- void (*powersave)(void);
-
- powersave = ppc_md.power_save;
-
- if (!need_resched()) {
- if (powersave != NULL)
- powersave();
-#ifdef CONFIG_SMP
- else {
- set_thread_flag(TIF_POLLING_NRFLAG);
- while (!need_resched() &&
- !cpu_is_offline(smp_processor_id()))
- barrier();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- }
-#endif
- }
-}
-
-/*
- * The body of the idle task.
- */
-void cpu_idle(void)
-{
- int cpu = smp_processor_id();
-
- for (;;) {
- while (!need_resched()) {
- if (ppc_md.idle != NULL)
- ppc_md.idle();
- else
- default_idle();
- }
-
- if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
- cpu_die();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-}
-
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx)
-/*
- * Register the sysctl to set/clear powersave_nap.
- */
-extern int powersave_nap;
-
-static ctl_table powersave_nap_ctl_table[]={
- {
- .ctl_name = KERN_PPC_POWERSAVE_NAP,
- .procname = "powersave-nap",
- .data = &powersave_nap,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- { 0, },
-};
-static ctl_table powersave_nap_sysctl_root[] = {
- { 1, "kernel", NULL, 0, 0755, powersave_nap_ctl_table, },
- { 0,},
-};
-
-static int __init
-register_powersave_nap_sysctl(void)
-{
- register_sysctl_table(powersave_nap_sysctl_root, 0);
-
- return 0;
-}
-
-__initcall(register_powersave_nap_sysctl);
-#endif
diff --git a/arch/ppc/kernel/idle_6xx.S b/arch/ppc/kernel/idle_6xx.S
deleted file mode 100644
index 1a2194cf682..00000000000
--- a/arch/ppc/kernel/idle_6xx.S
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * This file contains the power_save function for 6xx & 7xxx CPUs
- * rewritten in assembler
- *
- * Warning ! This code assumes that if your machine has a 750fx
- * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
- * if this is not the case some additional changes will have to
- * be done to check a runtime var (a bit like powersave-nap)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-
-#undef DEBUG
-
- .text
-
-/*
- * Init idle, called at early CPU setup time from head.S for each CPU
- * Make sure no rest of NAP mode remains in HID0, save default
- * values for some CPU specific registers. Called with r24
- * containing CPU number and r3 reloc offset
- */
-_GLOBAL(init_idle_6xx)
-BEGIN_FTR_SECTION
- mfspr r4,SPRN_HID0
- rlwinm r4,r4,0,10,8 /* Clear NAP */
- mtspr SPRN_HID0, r4
- b 1f
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
- blr
-1:
- slwi r5,r24,2
- add r5,r5,r3
-BEGIN_FTR_SECTION
- mfspr r4,SPRN_MSSCR0
- addis r6,r5, nap_save_msscr0@ha
- stw r4,nap_save_msscr0@l(r6)
-END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-BEGIN_FTR_SECTION
- mfspr r4,SPRN_HID1
- addis r6,r5,nap_save_hid1@ha
- stw r4,nap_save_hid1@l(r6)
-END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
- blr
-
-/*
- * Here is the power_save_6xx function. This could eventually be
- * split into several functions & changing the function pointer
- * depending on the various features.
- */
-_GLOBAL(ppc6xx_idle)
- /* Check if we can nap or doze, put HID0 mask in r3
- */
- lis r3, 0
-BEGIN_FTR_SECTION
- lis r3,HID0_DOZE@h
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-BEGIN_FTR_SECTION
- /* We must dynamically check for the NAP feature as it
- * can be cleared by CPU init after the fixups are done
- */
- lis r4,cur_cpu_spec@ha
- lwz r4,cur_cpu_spec@l(r4)
- lwz r4,CPU_SPEC_FEATURES(r4)
- andi. r0,r4,CPU_FTR_CAN_NAP
- beq 1f
- /* Now check if user or arch enabled NAP mode */
- lis r4,powersave_nap@ha
- lwz r4,powersave_nap@l(r4)
- cmpwi 0,r4,0
- beq 1f
- lis r3,HID0_NAP@h
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
- cmpwi 0,r3,0
- beqlr
-
- /* Clear MSR:EE */
- mfmsr r7
- rlwinm r0,r7,0,17,15
- mtmsr r0
-
- /* Check current_thread_info()->flags */
- rlwinm r4,r1,0,0,18
- lwz r4,TI_FLAGS(r4)
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- mtmsr r7 /* out of line this ? */
- blr
-1:
- /* Some pre-nap cleanups needed on some CPUs */
- andis. r0,r3,HID0_NAP@h
- beq 2f
-BEGIN_FTR_SECTION
- /* Disable L2 prefetch on some 745x and try to ensure
- * L2 prefetch engines are idle. As explained by errata
- * text, we can't be sure they are, we just hope very hard
- * that well be enough (sic !). At least I noticed Apple
- * doesn't even bother doing the dcbf's here...
- */
- mfspr r4,SPRN_MSSCR0
- rlwinm r4,r4,0,0,29
- sync
- mtspr SPRN_MSSCR0,r4
- sync
- isync
- lis r4,KERNELBASE@h
- dcbf 0,r4
- dcbf 0,r4
- dcbf 0,r4
- dcbf 0,r4
-END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-#ifdef DEBUG
- lis r6,nap_enter_count@ha
- lwz r4,nap_enter_count@l(r6)
- addi r4,r4,1
- stw r4,nap_enter_count@l(r6)
-#endif
-2:
-BEGIN_FTR_SECTION
- /* Go to low speed mode on some 750FX */
- lis r4,powersave_lowspeed@ha
- lwz r4,powersave_lowspeed@l(r4)
- cmpwi 0,r4,0
- beq 1f
- mfspr r4,SPRN_HID1
- oris r4,r4,0x0001
- mtspr SPRN_HID1,r4
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
-
- /* Go to NAP or DOZE now */
- mfspr r4,SPRN_HID0
- lis r5,(HID0_NAP|HID0_SLEEP)@h
-BEGIN_FTR_SECTION
- oris r5,r5,HID0_DOZE@h
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
- andc r4,r4,r5
- or r4,r4,r3
-BEGIN_FTR_SECTION
- oris r4,r4,HID0_DPM@h /* that should be done once for all */
-END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
- mtspr SPRN_HID0,r4
-BEGIN_FTR_SECTION
- DSSALL
- sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- ori r7,r7,MSR_EE /* Could be ommited (already set) */
- oris r7,r7,MSR_POW@h
- sync
- isync
- mtmsr r7
- isync
- sync
- blr
-
-/*
- * Return from NAP/DOZE mode, restore some CPU specific registers,
- * we are called with DR/IR still off and r2 containing physical
- * address of current.
- */
-_GLOBAL(power_save_6xx_restore)
- mfspr r11,SPRN_HID0
- rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
- cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
-BEGIN_FTR_SECTION
- rlwinm r11,r11,0,9,7 /* Clear DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
- mtspr SPRN_HID0, r11
-
-#ifdef DEBUG
- beq cr1,1f
- lis r11,(nap_return_count-KERNELBASE)@ha
- lwz r9,nap_return_count@l(r11)
- addi r9,r9,1
- stw r9,nap_return_count@l(r11)
-1:
-#endif
-
- rlwinm r9,r1,0,0,18
- tophys(r9,r9)
- lwz r11,TI_CPU(r9)
- slwi r11,r11,2
- /* Todo make sure all these are in the same page
- * and load r22 (@ha part + CPU offset) only once
- */
-BEGIN_FTR_SECTION
- beq cr1,1f
- addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
- lwz r9,nap_save_msscr0@l(r9)
- mtspr SPRN_MSSCR0, r9
- sync
- isync
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-BEGIN_FTR_SECTION
- addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
- lwz r9,nap_save_hid1@l(r9)
- mtspr SPRN_HID1, r9
-END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
- b transfer_to_handler_cont
-
- .data
-
-_GLOBAL(nap_save_msscr0)
- .space 4*NR_CPUS
-
-_GLOBAL(nap_save_hid1)
- .space 4*NR_CPUS
-
-_GLOBAL(powersave_nap)
- .long 0
-_GLOBAL(powersave_lowspeed)
- .long 0
-
-#ifdef DEBUG
-_GLOBAL(nap_enter_count)
- .space 4
-_GLOBAL(nap_return_count)
- .space 4
-#endif
diff --git a/arch/ppc/kernel/idle_power4.S b/arch/ppc/kernel/idle_power4.S
deleted file mode 100644
index cc0d535365c..00000000000
--- a/arch/ppc/kernel/idle_power4.S
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * This file contains the power_save function for 6xx & 7xxx CPUs
- * rewritten in assembler
- *
- * Warning ! This code assumes that if your machine has a 750fx
- * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
- * if this is not the case some additional changes will have to
- * be done to check a runtime var (a bit like powersave-nap)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-
-#undef DEBUG
-
- .text
-
-/*
- * Init idle, called at early CPU setup time from head.S for each CPU
- * So nothing for now. Called with r24 containing CPU number and r3
- * reloc offset
- */
- .globl init_idle_power4
-init_idle_power4:
- blr
-
-/*
- * Here is the power_save_6xx function. This could eventually be
- * split into several functions & changing the function pointer
- * depending on the various features.
- */
- .globl power4_idle
-power4_idle:
-BEGIN_FTR_SECTION
- blr
-END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
- /* We must dynamically check for the NAP feature as it
- * can be cleared by CPU init after the fixups are done
- */
- lis r4,cur_cpu_spec@ha
- lwz r4,cur_cpu_spec@l(r4)
- lwz r4,CPU_SPEC_FEATURES(r4)
- andi. r0,r4,CPU_FTR_CAN_NAP
- beqlr
- /* Now check if user or arch enabled NAP mode */
- lis r4,powersave_nap@ha
- lwz r4,powersave_nap@l(r4)
- cmpwi 0,r4,0
- beqlr
-
- /* Clear MSR:EE */
- mfmsr r7
- rlwinm r0,r7,0,17,15
- mtmsr r0
-
- /* Check current_thread_info()->flags */
- rlwinm r4,r1,0,0,18
- lwz r4,TI_FLAGS(r4)
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- mtmsr r7 /* out of line this ? */
- blr
-1:
- /* Go to NAP now */
-BEGIN_FTR_SECTION
- DSSALL
- sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- ori r7,r7,MSR_EE /* Could be ommited (already set) */
- oris r7,r7,MSR_POW@h
- sync
- isync
- mtmsr r7
- isync
- sync
- blr
-
- .globl powersave_nap
-powersave_nap:
- .long 0
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
deleted file mode 100644
index d7f4e982b53..00000000000
--- a/arch/ppc/kernel/l2cr.S
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- L2CR functions
- Copyright © 1997-1998 by PowerLogix R & D, Inc.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-/*
- Thur, Dec. 12, 1998.
- - First public release, contributed by PowerLogix.
- ***********
- Sat, Aug. 7, 1999.
- - Terry: Made sure code disabled interrupts before running. (Previously
- it was assumed interrupts were already disabled).
- - Terry: Updated for tentative G4 support. 4MB of memory is now flushed
- instead of 2MB. (Prob. only 3 is necessary).
- - Terry: Updated for workaround to HID0[DPM] processor bug
- during global invalidates.
- ***********
- Thu, July 13, 2000.
- - Terry: Added isync to correct for an errata.
-
- 22 August 2001.
- - DanM: Finally added the 7450 patch I've had for the past
- several months. The L2CR is similar, but I'm going
- to assume the user of this functions knows what they
- are doing.
-
- Author: Terry Greeniaus (tgree@phys.ualberta.ca)
- Please e-mail updates to this file to me, thanks!
-*/
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/ppc_asm.h>
-#include <asm/cache.h>
-#include <asm/page.h>
-
-/* Usage:
-
- When setting the L2CR register, you must do a few special
- things. If you are enabling the cache, you must perform a
- global invalidate. If you are disabling the cache, you must
- flush the cache contents first. This routine takes care of
- doing these things. When first enabling the cache, make sure
- you pass in the L2CR you want, as well as passing in the
- global invalidate bit set. A global invalidate will only be
- performed if the L2I bit is set in applyThis. When enabling
- the cache, you should also set the L2E bit in applyThis. If
- you want to modify the L2CR contents after the cache has been
- enabled, the recommended procedure is to first call
- __setL2CR(0) to disable the cache and then call it again with
- the new values for L2CR. Examples:
-
- _setL2CR(0) - disables the cache
- _setL2CR(0xB3A04000) - enables my G3 upgrade card:
- - L2E set to turn on the cache
- - L2SIZ set to 1MB
- - L2CLK set to 1:1
- - L2RAM set to pipelined synchronous late-write
- - L2I set to perform a global invalidation
- - L2OH set to 0.5 nS
- - L2DF set because this upgrade card
- requires it
-
- A similar call should work for your card. You need to know
- the correct setting for your card and then place them in the
- fields I have outlined above. Other fields support optional
- features, such as L2DO which caches only data, or L2TS which
- causes cache pushes from the L1 cache to go to the L2 cache
- instead of to main memory.
-
-IMPORTANT:
- Starting with the 7450, the bits in this register have moved
- or behave differently. The Enable, Parity Enable, Size,
- and L2 Invalidate are the only bits that have not moved.
- The size is read-only for these processors with internal L2
- cache, and the invalidate is a control as well as status.
- -- Dan
-
-*/
-/*
- * Summary: this procedure ignores the L2I bit in the value passed in,
- * flushes the cache if it was already enabled, always invalidates the
- * cache, then enables the cache if the L2E bit is set in the value
- * passed in.
- * -- paulus.
- */
-_GLOBAL(_set_L2CR)
- /* Make sure this is a 750 or 7400 chip */
-BEGIN_FTR_SECTION
- li r3,-1
- blr
-END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
-
- mflr r9
-
- /* Stop DST streams */
-BEGIN_FTR_SECTION
- DSSALL
- sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-
- /* Turn off interrupts and data relocation. */
- mfmsr r7 /* Save MSR in r7 */
- rlwinm r4,r7,0,17,15
- rlwinm r4,r4,0,28,26 /* Turn off DR bit */
- sync
- mtmsr r4
- isync
-
- /* Before we perform the global invalidation, we must disable dynamic
- * power management via HID0[DPM] to work around a processor bug where
- * DPM can possibly interfere with the state machine in the processor
- * that invalidates the L2 cache tags.
- */
- mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
- rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
- sync
- mtspr SPRN_HID0,r4 /* Disable DPM */
- sync
-
- /* Get the current enable bit of the L2CR into r4 */
- mfspr r4,SPRN_L2CR
-
- /* Tweak some bits */
- rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
- rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
- rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
-
- /* Check to see if we need to flush */
- rlwinm. r4,r4,0,0,0
- beq 2f
-
- /* Flush the cache. First, read the first 4MB of memory (physical) to
- * put new data in the cache. (Actually we only need
- * the size of the L2 cache plus the size of the L1 cache, but 4MB will
- * cover everything just to be safe).
- */
-
- /**** Might be a good idea to set L2DO here - to prevent instructions
- from getting into the cache. But since we invalidate
- the next time we enable the cache it doesn't really matter.
- Don't do this unless you accomodate all processor variations.
- The bit moved on the 7450.....
- ****/
-
-BEGIN_FTR_SECTION
- /* Disable L2 prefetch on some 745x and try to ensure
- * L2 prefetch engines are idle. As explained by errata
- * text, we can't be sure they are, we just hope very hard
- * that well be enough (sic !). At least I noticed Apple
- * doesn't even bother doing the dcbf's here...
- */
- mfspr r4,SPRN_MSSCR0
- rlwinm r4,r4,0,0,29
- sync
- mtspr SPRN_MSSCR0,r4
- sync
- isync
- lis r4,KERNELBASE@h
- dcbf 0,r4
- dcbf 0,r4
- dcbf 0,r4
- dcbf 0,r4
-END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
-
- /* TODO: use HW flush assist when available */
-
- lis r4,0x0002
- mtctr r4
- li r4,0
-1:
- lwzx r0,r0,r4
- addi r4,r4,32 /* Go to start of next cache line */
- bdnz 1b
- isync
-
- /* Now, flush the first 4MB of memory */
- lis r4,0x0002
- mtctr r4
- li r4,0
- sync
-1:
- dcbf 0,r4
- addi r4,r4,32 /* Go to start of next cache line */
- bdnz 1b
-
-2:
- /* Set up the L2CR configuration bits (and switch L2 off) */
- /* CPU errata: Make sure the mtspr below is already in the
- * L1 icache
- */
- b 20f
- .balign L1_CACHE_BYTES
-22:
- sync
- mtspr SPRN_L2CR,r3
- sync
- b 23f
-20:
- b 21f
-21: sync
- isync
- b 22b
-
-23:
- /* Perform a global invalidation */
- oris r3,r3,0x0020
- sync
- mtspr SPRN_L2CR,r3
- sync
- isync /* For errata */
-
-BEGIN_FTR_SECTION
- /* On the 7450, we wait for the L2I bit to clear......
- */
-10: mfspr r3,SPRN_L2CR
- andis. r4,r3,0x0020
- bne 10b
- b 11f
-END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
-
- /* Wait for the invalidation to complete */
-3: mfspr r3,SPRN_L2CR
- rlwinm. r4,r3,0,31,31
- bne 3b
-
-11: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
- sync
- mtspr SPRN_L2CR,r3
- sync
-
- /* See if we need to enable the cache */
- cmplwi r5,0
- beq 4f
-
- /* Enable the cache */
- oris r3,r3,0x8000
- mtspr SPRN_L2CR,r3
- sync
-
- /* Enable L2 HW prefetch on 744x/745x */
-BEGIN_FTR_SECTION
- mfspr r3,SPRN_MSSCR0
- ori r3,r3,3
- sync
- mtspr SPRN_MSSCR0,r3
- sync
- isync
-END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
-4:
-
- /* Restore HID0[DPM] to whatever it was before */
- sync
- mtspr 1008,r8
- sync
-
- /* Restore MSR (restores EE and DR bits to original state) */
- SYNC
- mtmsr r7
- isync
-
- mtlr r9
- blr
-
-_GLOBAL(_get_L2CR)
- /* Return the L2CR contents */
- li r3,0
-BEGIN_FTR_SECTION
- mfspr r3,SPRN_L2CR
-END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
- blr
-
-
-/*
- * Here is a similar routine for dealing with the L3 cache
- * on the 745x family of chips
- */
-
-_GLOBAL(_set_L3CR)
- /* Make sure this is a 745x chip */
-BEGIN_FTR_SECTION
- li r3,-1
- blr
-END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
-
- /* Turn off interrupts and data relocation. */
- mfmsr r7 /* Save MSR in r7 */
- rlwinm r4,r7,0,17,15
- rlwinm r4,r4,0,28,26 /* Turn off DR bit */
- sync
- mtmsr r4
- isync
-
- /* Stop DST streams */
- DSSALL
- sync
-
- /* Get the current enable bit of the L3CR into r4 */
- mfspr r4,SPRN_L3CR
-
- /* Tweak some bits */
- rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
- rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */
- rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */
- rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
- /* Check to see if we need to flush */
- rlwinm. r4,r4,0,0,0
- beq 2f
-
- /* Flush the cache.
- */
-
- /* TODO: use HW flush assist */
-
- lis r4,0x0008
- mtctr r4
- li r4,0
-1:
- lwzx r0,r0,r4
- dcbf 0,r4
- addi r4,r4,32 /* Go to start of next cache line */
- bdnz 1b
-
-2:
- /* Set up the L3CR configuration bits (and switch L3 off) */
- sync
- mtspr SPRN_L3CR,r3
- sync
-
- oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */
- mtspr SPRN_L3CR,r3
- sync
- oris r3,r3,L3CR_L3CLKEN@h /* Set clken */
- mtspr SPRN_L3CR,r3
- sync
-
- /* Wait for stabilize */
- li r0,256
- mtctr r0
-1: bdnz 1b
-
- /* Perform a global invalidation */
- ori r3,r3,0x0400
- sync
- mtspr SPRN_L3CR,r3
- sync
- isync
-
- /* We wait for the L3I bit to clear...... */
-10: mfspr r3,SPRN_L3CR
- andi. r4,r3,0x0400
- bne 10b
-
- /* Clear CLKEN */
- rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
- mtspr SPRN_L3CR,r3
- sync
-
- /* Wait for stabilize */
- li r0,256
- mtctr r0
-1: bdnz 1b
-
- /* See if we need to enable the cache */
- cmplwi r5,0
- beq 4f
-
- /* Enable the cache */
- oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
- mtspr SPRN_L3CR,r3
- sync
-
- /* Wait for stabilize */
- li r0,256
- mtctr r0
-1: bdnz 1b
-
- /* Restore MSR (restores EE and DR bits to original state) */
-4: SYNC
- mtmsr r7
- isync
- blr
-
-_GLOBAL(_get_L3CR)
- /* Return the L3CR contents */
- li r3,0
-BEGIN_FTR_SECTION
- mfspr r3,SPRN_L3CR
-END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
- blr
-
-/* --- End of PowerLogix code ---
- */
-
-
-/* flush_disable_L1() - Flush and disable L1 cache
- *
- * clobbers r0, r3, ctr, cr0
- * Must be called with interrupts disabled and MMU enabled.
- */
-_GLOBAL(__flush_disable_L1)
- /* Stop pending alitvec streams and memory accesses */
-BEGIN_FTR_SECTION
- DSSALL
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- sync
-
- /* Load counter to 0x4000 cache lines (512k) and
- * load cache with datas
- */
- li r3,0x4000 /* 512kB / 32B */
- mtctr r3
- lis r3,KERNELBASE@h
-1:
- lwz r0,0(r3)
- addi r3,r3,0x0020 /* Go to start of next cache line */
- bdnz 1b
- isync
- sync
-
- /* Now flush those cache lines */
- li r3,0x4000 /* 512kB / 32B */
- mtctr r3
- lis r3,KERNELBASE@h
-1:
- dcbf 0,r3
- addi r3,r3,0x0020 /* Go to start of next cache line */
- bdnz 1b
- sync
-
- /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
- mfspr r3,SPRN_HID0
- rlwinm r3,r3,0,18,15
- mtspr SPRN_HID0,r3
- sync
- isync
- blr
-
-/* inval_enable_L1 - Invalidate and enable L1 cache
- *
- * Assumes L1 is already disabled and MSR:EE is off
- *
- * clobbers r3
- */
-_GLOBAL(__inval_enable_L1)
- /* Enable and then Flash inval the instruction & data cache */
- mfspr r3,SPRN_HID0
- ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
- sync
- isync
- mtspr SPRN_HID0,r3
- xori r3,r3, HID0_ICFI|HID0_DCI
- mtspr SPRN_HID0,r3
- sync
-
- blr
-
-
diff --git a/arch/ppc/kernel/module.c b/arch/ppc/kernel/module.c
deleted file mode 100644
index 92f4e5f64f0..00000000000
--- a/arch/ppc/kernel/module.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/* Kernel module help for PPC.
- Copyright (C) 2001 Rusty Russell.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-#include <linux/module.h>
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/cache.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt , ...)
-#endif
-
-LIST_HEAD(module_bug_list);
-
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
-}
-
-/* Count how many different relocations (different symbol, different
- addend) */
-static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
-{
- unsigned int i, j, ret = 0;
-
- /* Sure, this is order(n^2), but it's usually short, and not
- time critical */
- for (i = 0; i < num; i++) {
- for (j = 0; j < i; j++) {
- /* If this addend appeared before, it's
- already been counted */
- if (ELF32_R_SYM(rela[i].r_info)
- == ELF32_R_SYM(rela[j].r_info)
- && rela[i].r_addend == rela[j].r_addend)
- break;
- }
- if (j == i) ret++;
- }
- return ret;
-}
-
-/* Get the potential trampolines size required of the init and
- non-init sections */
-static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
- const Elf32_Shdr *sechdrs,
- const char *secstrings,
- int is_init)
-{
- unsigned long ret = 0;
- unsigned i;
-
- /* Everything marked ALLOC (this includes the exported
- symbols) */
- for (i = 1; i < hdr->e_shnum; i++) {
- /* If it's called *.init*, and we're not init, we're
- not interested */
- if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
- != is_init)
- continue;
-
- /* We don't want to look at debug sections. */
- if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != 0)
- continue;
-
- if (sechdrs[i].sh_type == SHT_RELA) {
- DEBUGP("Found relocations in section %u\n", i);
- DEBUGP("Ptr: %p. Number: %u\n",
- (void *)hdr + sechdrs[i].sh_offset,
- sechdrs[i].sh_size / sizeof(Elf32_Rela));
- ret += count_relocs((void *)hdr
- + sechdrs[i].sh_offset,
- sechdrs[i].sh_size
- / sizeof(Elf32_Rela))
- * sizeof(struct ppc_plt_entry);
- }
- }
-
- return ret;
-}
-
-int module_frob_arch_sections(Elf32_Ehdr *hdr,
- Elf32_Shdr *sechdrs,
- char *secstrings,
- struct module *me)
-{
- unsigned int i;
-
- /* Find .plt and .init.plt sections */
- for (i = 0; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
- me->arch.init_plt_section = i;
- else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
- me->arch.core_plt_section = i;
- }
- if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
- printk("Module doesn't contain .plt or .init.plt sections.\n");
- return -ENOEXEC;
- }
-
- /* Override their sizes */
- sechdrs[me->arch.core_plt_section].sh_size
- = get_plt_size(hdr, sechdrs, secstrings, 0);
- sechdrs[me->arch.init_plt_section].sh_size
- = get_plt_size(hdr, sechdrs, secstrings, 1);
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *module)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
-static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
-{
- if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
- && entry->jump[1] == 0x396b0000 + (val & 0xffff))
- return 1;
- return 0;
-}
-
-/* Set up a trampoline in the PLT to bounce us to the distant function */
-static uint32_t do_plt_call(void *location,
- Elf32_Addr val,
- Elf32_Shdr *sechdrs,
- struct module *mod)
-{
- struct ppc_plt_entry *entry;
-
- DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
- /* Init, or core PLT? */
- if (location >= mod->module_core
- && location < mod->module_core + mod->core_size)
- entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
- else
- entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
-
- /* Find this entry, or if that fails, the next avail. entry */
- while (entry->jump[0]) {
- if (entry_matches(entry, val)) return (uint32_t)entry;
- entry++;
- }
-
- /* Stolen from Paul Mackerras as well... */
- entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
- entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/
- entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
- entry->jump[3] = 0x4e800420; /* bctr */
-
- DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
- return (uint32_t)entry;
-}
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *module)
-{
- unsigned int i;
- Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
- Elf32_Sym *sym;
- uint32_t *location;
- uint32_t value;
-
- DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rela[i].r_offset;
- /* This is the symbol it is referring to. Note that all
- undefined symbols have been resolved. */
- sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
- + ELF32_R_SYM(rela[i].r_info);
- /* `Everything is relative'. */
- value = sym->st_value + rela[i].r_addend;
-
- switch (ELF32_R_TYPE(rela[i].r_info)) {
- case R_PPC_ADDR32:
- /* Simply set it */
- *(uint32_t *)location = value;
- break;
-
- case R_PPC_ADDR16_LO:
- /* Low half of the symbol */
- *(uint16_t *)location = value;
- break;
-
- case R_PPC_ADDR16_HA:
- /* Sign-adjusted lower 16 bits: PPC ELF ABI says:
- (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
- This is the same, only sane.
- */
- *(uint16_t *)location = (value + 0x8000) >> 16;
- break;
-
- case R_PPC_REL24:
- if ((int)(value - (uint32_t)location) < -0x02000000
- || (int)(value - (uint32_t)location) >= 0x02000000)
- value = do_plt_call(location, value,
- sechdrs, module);
-
- /* Only replace bits 2 through 26 */
- DEBUGP("REL24 value = %08X. location = %08X\n",
- value, (uint32_t)location);
- DEBUGP("Location before: %08X.\n",
- *(uint32_t *)location);
- *(uint32_t *)location
- = (*(uint32_t *)location & ~0x03fffffc)
- | ((value - (uint32_t)location)
- & 0x03fffffc);
- DEBUGP("Location after: %08X.\n",
- *(uint32_t *)location);
- DEBUGP("ie. jump to %08X+%08X = %08X\n",
- *(uint32_t *)location & 0x03fffffc,
- (uint32_t)location,
- (*(uint32_t *)location & 0x03fffffc)
- + (uint32_t)location);
- break;
-
- case R_PPC_REL32:
- /* 32-bit relative jump. */
- *(uint32_t *)location = value - (uint32_t)location;
- break;
-
- default:
- printk("%s: unknown ADD relocation: %u\n",
- module->name,
- ELF32_R_TYPE(rela[i].r_info));
- return -ENOEXEC;
- }
- }
- return 0;
-}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- char *secstrings;
- unsigned int i;
-
- me->arch.bug_table = NULL;
- me->arch.num_bugs = 0;
-
- /* Find the __bug_table section, if present */
- secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
- continue;
- me->arch.bug_table = (void *) sechdrs[i].sh_addr;
- me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
- break;
- }
-
- /*
- * Strictly speaking this should have a spinlock to protect against
- * traversals, but since we only traverse on BUG()s, a spinlock
- * could potentially lead to deadlock and thus be counter-productive.
- */
- list_add(&me->arch.bug_list, &module_bug_list);
-
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
- list_del(&mod->arch.bug_list);
-}
-
-struct bug_entry *module_find_bug(unsigned long bugaddr)
-{
- struct mod_arch_specific *mod;
- unsigned int i;
- struct bug_entry *bug;
-
- list_for_each_entry(mod, &module_bug_list, bug_list) {
- bug = mod->bug_table;
- for (i = 0; i < mod->num_bugs; ++i, ++bug)
- if (bugaddr == bug->bug_addr)
- return bug;
- }
- return NULL;
-}
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 04d04c5bfdd..809673a36f7 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -46,9 +46,6 @@ static void pcibios_fixup_resources(struct pci_dev* dev);
static void fixup_broken_pcnet32(struct pci_dev* dev);
static int reparent_resources(struct resource *parent, struct resource *res);
static void fixup_cpc710_pci64(struct pci_dev* dev);
-#ifdef CONFIG_PPC_OF
-static u8* pci_to_OF_bus_map;
-#endif
/* By default, we don't re-assign bus numbers.
*/
@@ -625,406 +622,13 @@ pcibios_alloc_controller(void)
return hose;
}
-#ifdef CONFIG_PPC_OF
-/*
- * Functions below are used on OpenFirmware machines.
- */
-static void
-make_one_node_map(struct device_node* node, u8 pci_bus)
-{
- int *bus_range;
- int len;
-
- if (pci_bus >= pci_bus_count)
- return;
- bus_range = (int *) get_property(node, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, "
- "assuming it starts at 0\n", node->full_name);
- pci_to_OF_bus_map[pci_bus] = 0;
- } else
- pci_to_OF_bus_map[pci_bus] = bus_range[0];
-
- for (node=node->child; node != 0;node = node->sibling) {
- struct pci_dev* dev;
- unsigned int *class_code, *reg;
-
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
- if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
- continue;
- reg = (unsigned int *)get_property(node, "reg", NULL);
- if (!reg)
- continue;
- dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
- if (!dev || !dev->subordinate)
- continue;
- make_one_node_map(node, dev->subordinate->number);
- }
-}
-
-void
-pcibios_make_OF_bus_map(void)
-{
- int i;
- struct pci_controller* hose;
- u8* of_prop_map;
-
- pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL);
- if (!pci_to_OF_bus_map) {
- printk(KERN_ERR "Can't allocate OF bus map !\n");
- return;
- }
-
- /* We fill the bus map with invalid values, that helps
- * debugging.
- */
- for (i=0; i<pci_bus_count; i++)
- pci_to_OF_bus_map[i] = 0xff;
-
- /* For each hose, we begin searching bridges */
- for(hose=hose_head; hose; hose=hose->next) {
- struct device_node* node;
- node = (struct device_node *)hose->arch_data;
- if (!node)
- continue;
- make_one_node_map(node, hose->first_busno);
- }
- of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL);
- if (of_prop_map)
- memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
-#ifdef DEBUG
- printk("PCI->OF bus map:\n");
- for (i=0; i<pci_bus_count; i++) {
- if (pci_to_OF_bus_map[i] == 0xff)
- continue;
- printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
- }
-#endif
-}
-
-typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
-
-static struct device_node*
-scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
-{
- struct device_node* sub_node;
-
- for (; node != 0;node = node->sibling) {
- unsigned int *class_code;
-
- if (filter(node, data))
- return node;
-
- /* For PCI<->PCI bridges or CardBus bridges, we go down
- * Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well.
- */
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
- if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
- strcmp(node->name, "multifunc-device"))
- continue;
- sub_node = scan_OF_pci_childs(node->child, filter, data);
- if (sub_node)
- return sub_node;
- }
- return NULL;
-}
-
-static int
-scan_OF_pci_childs_iterator(struct device_node* node, void* data)
-{
- unsigned int *reg;
- u8* fdata = (u8*)data;
-
- reg = (unsigned int *) get_property(node, "reg", NULL);
- if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
- && ((reg[0] >> 16) & 0xff) == fdata[0])
- return 1;
- return 0;
-}
-
-static struct device_node*
-scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
-{
- u8 filter_data[2] = {bus, dev_fn};
-
- return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
-}
-
-/*
- * Scans the OF tree for a device node matching a PCI device
- */
-struct device_node *
-pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
-{
- struct pci_controller *hose;
- struct device_node *node;
- int busnr;
-
- if (!have_of)
- return NULL;
-
- /* Lookup the hose */
- busnr = bus->number;
- hose = pci_bus_to_hose(busnr);
- if (!hose)
- return NULL;
-
- /* Check it has an OF node associated */
- node = (struct device_node *) hose->arch_data;
- if (!node)
- return NULL;
-
- /* Fixup bus number according to what OF think it is. */
- if (pci_to_OF_bus_map)
- busnr = pci_to_OF_bus_map[busnr];
- if (busnr == 0xff)
- return NULL;
-
- /* Now, lookup childs of the hose */
- return scan_OF_childs_for_device(node->child, busnr, devfn);
-}
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-
-struct device_node*
-pci_device_to_OF_node(struct pci_dev *dev)
-{
- return pci_busdev_to_OF_node(dev->bus, dev->devfn);
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
-/* This routine is meant to be used early during boot, when the
- * PCI bus numbers have not yet been assigned, and you need to
- * issue PCI config cycles to an OF device.
- * It could also be used to "fix" RTAS config cycles if you want
- * to set pci_assign_all_buses to 1 and still use RTAS for PCI
- * config cycles.
- */
-struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
-{
- if (!have_of)
- return NULL;
- while(node) {
- struct pci_controller* hose;
- for (hose=hose_head;hose;hose=hose->next)
- if (hose->arch_data == node)
- return hose;
- node=node->parent;
- }
- return NULL;
-}
-
-static int
-find_OF_pci_device_filter(struct device_node* node, void* data)
-{
- return ((void *)node == data);
-}
-
-/*
- * Returns the PCI device matching a given OF node
- */
-int
-pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
-{
- unsigned int *reg;
- struct pci_controller* hose;
- struct pci_dev* dev = NULL;
-
- if (!have_of)
- return -ENODEV;
- /* Make sure it's really a PCI device */
- hose = pci_find_hose_for_OF_device(node);
- if (!hose || !hose->arch_data)
- return -ENODEV;
- if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
- find_OF_pci_device_filter, (void *)node))
- return -ENODEV;
- reg = (unsigned int *) get_property(node, "reg", NULL);
- if (!reg)
- return -ENODEV;
- *bus = (reg[0] >> 16) & 0xff;
- *devfn = ((reg[0] >> 8) & 0xff);
-
- /* Ok, here we need some tweak. If we have already renumbered
- * all busses, we can't rely on the OF bus number any more.
- * the pci_to_OF_bus_map is not enough as several PCI busses
- * may match the same OF bus number.
- */
- if (!pci_to_OF_bus_map)
- return 0;
-
- for_each_pci_dev(dev)
- if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
- dev->devfn == *devfn) {
- *bus = dev->bus->number;
- pci_dev_put(dev);
- return 0;
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL(pci_device_from_OF_node);
-
-void __init
-pci_process_bridge_OF_ranges(struct pci_controller *hose,
- struct device_node *dev, int primary)
-{
- static unsigned int static_lc_ranges[256] __initdata;
- unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
- unsigned int size;
- int rlen = 0, orig_rlen;
- int memno = 0;
- struct resource *res;
- int np, na = prom_n_addr_cells(dev);
- np = na + 5;
-
- /* First we try to merge ranges to fix a problem with some pmacs
- * that can have more than 3 ranges, fortunately using contiguous
- * addresses -- BenH
- */
- dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
- if (!dt_ranges)
- return;
- /* Sanity check, though hopefully that never happens */
- if (rlen > sizeof(static_lc_ranges)) {
- printk(KERN_WARNING "OF ranges property too large !\n");
- rlen = sizeof(static_lc_ranges);
- }
- lc_ranges = static_lc_ranges;
- memcpy(lc_ranges, dt_ranges, rlen);
- orig_rlen = rlen;
-
- /* Let's work on a copy of the "ranges" property instead of damaging
- * the device-tree image in memory
- */
- ranges = lc_ranges;
- prev = NULL;
- while ((rlen -= np * sizeof(unsigned int)) >= 0) {
- if (prev) {
- if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
- (prev[2] + prev[na+4]) == ranges[2] &&
- (prev[na+2] + prev[na+4]) == ranges[na+2]) {
- prev[na+4] += ranges[na+4];
- ranges[0] = 0;
- ranges += np;
- continue;
- }
- }
- prev = ranges;
- ranges += np;
- }
-
- /*
- * The ranges property is laid out as an array of elements,
- * each of which comprises:
- * cells 0 - 2: a PCI address
- * cells 3 or 3+4: a CPU physical address
- * (size depending on dev->n_addr_cells)
- * cells 4+5 or 5+6: the size of the range
- */
- ranges = lc_ranges;
- rlen = orig_rlen;
- while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
- res = NULL;
- size = ranges[na+4];
- switch ((ranges[0] >> 24) & 0x3) {
- case 1: /* I/O space */
- if (ranges[2] != 0)
- break;
- hose->io_base_phys = ranges[na+2];
- /* limit I/O space to 16MB */
- if (size > 0x01000000)
- size = 0x01000000;
- hose->io_base_virt = ioremap(ranges[na+2], size);
- if (primary)
- isa_io_base = (unsigned long) hose->io_base_virt;
- res = &hose->io_resource;
- res->flags = IORESOURCE_IO;
- res->start = ranges[2];
- DBG("PCI: IO 0x%lx -> 0x%lx\n",
- res->start, res->start + size - 1);
- break;
- case 2: /* memory space */
- memno = 0;
- if (ranges[1] == 0 && ranges[2] == 0
- && ranges[na+4] <= (16 << 20)) {
- /* 1st 16MB, i.e. ISA memory area */
- if (primary)
- isa_mem_base = ranges[na+2];
- memno = 1;
- }
- while (memno < 3 && hose->mem_resources[memno].flags)
- ++memno;
- if (memno == 0)
- hose->pci_mem_offset = ranges[na+2] - ranges[2];
- if (memno < 3) {
- res = &hose->mem_resources[memno];
- res->flags = IORESOURCE_MEM;
- if(ranges[0] & 0x40000000)
- res->flags |= IORESOURCE_PREFETCH;
- res->start = ranges[na+2];
- DBG("PCI: MEM[%d] 0x%lx -> 0x%lx\n", memno,
- res->start, res->start + size - 1);
- }
- break;
- }
- if (res != NULL) {
- res->name = dev->full_name;
- res->end = res->start + size - 1;
- res->parent = NULL;
- res->sibling = NULL;
- res->child = NULL;
- }
- ranges += np;
- }
-}
-
-/* We create the "pci-OF-bus-map" property now so it appears in the
- * /proc device tree
- */
-void __init
-pci_create_OF_bus_map(void)
-{
- struct property* of_prop;
-
- of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
- if (of_prop && find_path_device("/")) {
- memset(of_prop, -1, sizeof(struct property) + 256);
- of_prop->name = "pci-OF-bus-map";
- of_prop->length = 256;
- of_prop->value = (unsigned char *)&of_prop[1];
- prom_add_property(find_path_device("/"), of_prop);
- }
-}
-
-static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev;
- struct device_node *np;
-
- pdev = to_pci_dev (dev);
- np = pci_device_to_OF_node(pdev);
- if (np == NULL || np->full_name == NULL)
- return 0;
- return sprintf(buf, "%s", np->full_name);
-}
-static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-
-#else /* CONFIG_PPC_OF */
void pcibios_make_OF_bus_map(void)
{
}
-#endif /* CONFIG_PPC_OF */
/* Add sysfs properties */
void pcibios_add_platform_entries(struct pci_dev *pdev)
{
-#ifdef CONFIG_PPC_OF
- device_create_file(&pdev->dev, &dev_attr_devspec);
-#endif /* CONFIG_PPC_OF */
}
diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c
deleted file mode 100644
index 32455dfcc36..00000000000
--- a/arch/ppc/kernel/perfmon_fsl_booke.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/* kernel/perfmon_fsl_booke.c
- * Freescale Book-E Performance Monitor code
- *
- * Author: Andy Fleming
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/prctl.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-#include <asm/xmon.h>
-#include <asm/pmc.h>
-
-static inline u32 get_pmlca(int ctr);
-static inline void set_pmlca(int ctr, u32 pmlca);
-
-static inline u32 get_pmlca(int ctr)
-{
- u32 pmlca;
-
- switch (ctr) {
- case 0:
- pmlca = mfpmr(PMRN_PMLCA0);
- break;
- case 1:
- pmlca = mfpmr(PMRN_PMLCA1);
- break;
- case 2:
- pmlca = mfpmr(PMRN_PMLCA2);
- break;
- case 3:
- pmlca = mfpmr(PMRN_PMLCA3);
- break;
- default:
- panic("Bad ctr number\n");
- }
-
- return pmlca;
-}
-
-static inline void set_pmlca(int ctr, u32 pmlca)
-{
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- break;
- default:
- panic("Bad ctr number\n");
- }
-}
-
-void init_pmc_stop(int ctr)
-{
- u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
- PMLCA_FCM1 | PMLCA_FCM0);
- u32 pmlcb = 0;
-
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- mtpmr(PMRN_PMLCB0, pmlcb);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- mtpmr(PMRN_PMLCB1, pmlcb);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- mtpmr(PMRN_PMLCB2, pmlcb);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- mtpmr(PMRN_PMLCB3, pmlcb);
- break;
- default:
- panic("Bad ctr number!\n");
- }
-}
-
-void set_pmc_event(int ctr, int event)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
- ((event << PMLCA_EVENT_SHIFT) &
- PMLCA_EVENT_MASK);
-
- set_pmlca(ctr, pmlca);
-}
-
-void set_pmc_user_kernel(int ctr, int user, int kernel)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- if(user)
- pmlca &= ~PMLCA_FCU;
- else
- pmlca |= PMLCA_FCU;
-
- if(kernel)
- pmlca &= ~PMLCA_FCS;
- else
- pmlca |= PMLCA_FCS;
-
- set_pmlca(ctr, pmlca);
-}
-
-void set_pmc_marked(int ctr, int mark0, int mark1)
-{
- u32 pmlca = get_pmlca(ctr);
-
- if(mark0)
- pmlca &= ~PMLCA_FCM0;
- else
- pmlca |= PMLCA_FCM0;
-
- if(mark1)
- pmlca &= ~PMLCA_FCM1;
- else
- pmlca |= PMLCA_FCM1;
-
- set_pmlca(ctr, pmlca);
-}
-
-void pmc_start_ctr(int ctr, int enable)
-{
- u32 pmlca = get_pmlca(ctr);
-
- pmlca &= ~PMLCA_FC;
-
- if (enable)
- pmlca |= PMLCA_CE;
- else
- pmlca &= ~PMLCA_CE;
-
- set_pmlca(ctr, pmlca);
-}
-
-void pmc_start_ctrs(int enable)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 &= ~PMGC0_FAC;
- pmgc0 |= PMGC0_FCECE;
-
- if (enable)
- pmgc0 |= PMGC0_PMIE;
- else
- pmgc0 &= ~PMGC0_PMIE;
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-void pmc_stop_ctrs(void)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 |= PMGC0_FAC;
-
- pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-void dump_pmcs(void)
-{
- printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
- printk("pmc\t\tpmlca\t\tpmlcb\n");
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
- mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
- mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
- mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
- mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
-}
-
-EXPORT_SYMBOL(init_pmc_stop);
-EXPORT_SYMBOL(set_pmc_event);
-EXPORT_SYMBOL(set_pmc_user_kernel);
-EXPORT_SYMBOL(set_pmc_marked);
-EXPORT_SYMBOL(pmc_start_ctr);
-EXPORT_SYMBOL(pmc_start_ctrs);
-EXPORT_SYMBOL(pmc_stop_ctrs);
-EXPORT_SYMBOL(dump_pmcs);
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c
index 9b84bffdefc..75c64504374 100644
--- a/arch/ppc/kernel/ppc_htab.c
+++ b/arch/ppc/kernel/ppc_htab.c
@@ -104,7 +104,7 @@ static char *pmc2_lookup(unsigned long mmcr0)
static int ppc_htab_show(struct seq_file *m, void *v)
{
unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0;
-#if defined(CONFIG_PPC_STD_MMU) && !defined(CONFIG_PPC64BRIDGE)
+#if defined(CONFIG_PPC_STD_MMU)
unsigned int kptes = 0, uptes = 0;
PTE *ptr;
#endif /* CONFIG_PPC_STD_MMU */
@@ -133,7 +133,6 @@ static int ppc_htab_show(struct seq_file *m, void *v)
return 0;
}
-#ifndef CONFIG_PPC64BRIDGE
for (ptr = Hash; ptr < Hash_end; ptr++) {
unsigned int mctx, vsid;
@@ -147,7 +146,6 @@ static int ppc_htab_show(struct seq_file *m, void *v)
else
uptes++;
}
-#endif
seq_printf(m,
"PTE Hash Table Information\n"
@@ -155,20 +153,16 @@ static int ppc_htab_show(struct seq_file *m, void *v)
"Buckets\t\t: %lu\n"
"Address\t\t: %08lx\n"
"Entries\t\t: %lu\n"
-#ifndef CONFIG_PPC64BRIDGE
"User ptes\t: %u\n"
"Kernel ptes\t: %u\n"
"Percent full\t: %lu%%\n"
-#endif
, (unsigned long)(Hash_size>>10),
(Hash_size/(sizeof(PTE)*8)),
(unsigned long)Hash,
Hash_size/sizeof(PTE)
-#ifndef CONFIG_PPC64BRIDGE
, uptes,
kptes,
((kptes+uptes)*100) / (Hash_size/sizeof(PTE))
-#endif
);
seq_printf(m,
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 82adb460134..865ba74991a 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -18,7 +18,6 @@
#include <linux/bitops.h>
#include <asm/page.h>
-#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -30,7 +29,6 @@
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
-#include <asm/prom.h>
#include <asm/system.h>
#include <asm/pci-bridge.h>
#include <asm/irq.h>
@@ -208,27 +206,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_PPC_OF
-EXPORT_SYMBOL(find_devices);
-EXPORT_SYMBOL(find_type_devices);
-EXPORT_SYMBOL(find_compatible_devices);
-EXPORT_SYMBOL(find_path_device);
-EXPORT_SYMBOL(device_is_compatible);
-EXPORT_SYMBOL(machine_is_compatible);
-EXPORT_SYMBOL(find_all_nodes);
-EXPORT_SYMBOL(get_property);
-EXPORT_SYMBOL(request_OF_resource);
-EXPORT_SYMBOL(release_OF_resource);
-EXPORT_SYMBOL(of_find_node_by_name);
-EXPORT_SYMBOL(of_find_node_by_type);
-EXPORT_SYMBOL(of_find_compatible_node);
-EXPORT_SYMBOL(of_find_node_by_path);
-EXPORT_SYMBOL(of_find_all_nodes);
-EXPORT_SYMBOL(of_get_parent);
-EXPORT_SYMBOL(of_get_next_child);
-EXPORT_SYMBOL(of_node_get);
-EXPORT_SYMBOL(of_node_put);
-#endif /* CONFIG_PPC_OF */
#if defined(CONFIG_BOOTX_TEXT)
EXPORT_SYMBOL(btext_update_display);
#endif
@@ -262,9 +239,6 @@ EXPORT_SYMBOL(console_drivers);
EXPORT_SYMBOL(xmon);
EXPORT_SYMBOL(xmon_printf);
#endif
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
extern void (*debugger)(struct pt_regs *regs);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 53e9deacee8..1f79e84ab46 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -1,5 +1,5 @@
/*
- * Common prep/chrp boot and setup code.
+ * Common prep boot and setup code.
*/
#include <linux/config.h>
@@ -72,17 +72,12 @@ unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
-#ifdef CONFIG_PPC_MULTIPLATFORM
-int _machine = 0;
-EXPORT_SYMBOL(_machine);
-
+#ifdef CONFIG_PPC_PREP
extern void prep_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);
-extern void chrp_init(unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, unsigned long r7);
dev_t boot_dev;
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC_PREP */
int have_of;
EXPORT_SYMBOL(have_of);
@@ -319,72 +314,12 @@ early_init(int r3, int r4, int r5)
identify_cpu(offset, 0);
do_cpu_ftr_fixups(offset);
-#if defined(CONFIG_PPC_OF)
- reloc_got2(offset);
-
- /*
- * don't do anything on prep
- * for now, don't use bootinfo because it breaks yaboot 0.5
- * and assume that if we didn't find a magic number, we have OF
- */
- if (*(unsigned long *)(0) != 0xdeadc0de)
- phys = prom_init(r3, r4, (prom_entry)r5);
-
- reloc_got2(-offset);
-#endif
-
return phys;
}
-#ifdef CONFIG_PPC_OF
-/*
- * Assume here that all clock rates are the same in a
- * smp system. -- Cort
- */
-int
-of_show_percpuinfo(struct seq_file *m, int i)
-{
- struct device_node *cpu_node;
- u32 *fp;
- int s;
-
- cpu_node = find_type_devices("cpu");
- if (!cpu_node)
- return 0;
- for (s = 0; s < i && cpu_node->next; s++)
- cpu_node = cpu_node->next;
- fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL);
- if (fp)
- seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000);
- return 0;
-}
-
-void __init
-intuit_machine_type(void)
-{
- char *model;
- struct device_node *root;
-
- /* ask the OF info if we're a chrp or pmac */
- root = find_path_device("/");
- if (root != 0) {
- /* assume pmac unless proven to be chrp -- Cort */
- _machine = _MACH_Pmac;
- model = get_property(root, "device_type", NULL);
- if (model && !strncmp("chrp", model, 4))
- _machine = _MACH_chrp;
- else {
- model = get_property(root, "model", NULL);
- if (model && !strncmp(model, "IBM", 3))
- _machine = _MACH_chrp;
- }
- }
-}
-#endif
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_PPC_PREP
/*
- * The PPC_MULTIPLATFORM version of platform_init...
+ * The PPC_PREP version of platform_init...
*/
void __init
platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
@@ -399,161 +334,9 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
parse_bootinfo(find_bootinfo());
- /* if we didn't get any bootinfo telling us what we are... */
- if (_machine == 0) {
- /* prep boot loader tells us if we're prep or not */
- if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
- _machine = _MACH_prep;
- }
-
-#ifdef CONFIG_PPC_PREP
- /* not much more to do here, if prep */
- if (_machine == _MACH_prep) {
- prep_init(r3, r4, r5, r6, r7);
- return;
- }
-#endif
-
-#ifdef CONFIG_PPC_OF
- have_of = 1;
-
- /* prom_init has already been called from __start */
- if (boot_infos)
- relocate_nodes();
-
- /* If we aren't PReP, we can find out if we're Pmac
- * or CHRP with this. */
- if (_machine == 0)
- intuit_machine_type();
-
- /* finish_device_tree may need _machine defined. */
- finish_device_tree();
-
- /*
- * If we were booted via quik, r3 points to the physical
- * address of the command-line parameters.
- * If we were booted from an xcoff image (i.e. netbooted or
- * booted from floppy), we get the command line from the
- * bootargs property of the /chosen node.
- * If an initial ramdisk is present, r3 and r4
- * are used for initrd_start and initrd_size,
- * otherwise they contain 0xdeadbeef.
- */
- if (r3 >= 0x4000 && r3 < 0x800000 && r4 == 0) {
- strlcpy(cmd_line, (char *)r3 + KERNELBASE,
- sizeof(cmd_line));
- } else if (boot_infos != 0) {
- /* booted by BootX - check for ramdisk */
- if (boot_infos->kernelParamsOffset != 0)
- strlcpy(cmd_line, (char *) boot_infos
- + boot_infos->kernelParamsOffset,
- sizeof(cmd_line));
-#ifdef CONFIG_BLK_DEV_INITRD
- if (boot_infos->ramDisk) {
- initrd_start = (unsigned long) boot_infos
- + boot_infos->ramDisk;
- initrd_end = initrd_start + boot_infos->ramDiskSize;
- initrd_below_start_ok = 1;
- }
-#endif
- } else {
- struct device_node *chosen;
- char *p;
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (r3 && r4 && r4 != 0xdeadbeef) {
- if (r3 < KERNELBASE)
- r3 += KERNELBASE;
- initrd_start = r3;
- initrd_end = r3 + r4;
- ROOT_DEV = Root_RAM0;
- initrd_below_start_ok = 1;
- }
-#endif
- chosen = find_devices("chosen");
- if (chosen != NULL) {
- p = get_property(chosen, "bootargs", NULL);
- if (p && *p) {
- strlcpy(cmd_line, p, sizeof(cmd_line));
- }
- }
- }
-#ifdef CONFIG_ADB
- if (strstr(cmd_line, "adb_sync")) {
- extern int __adb_probe_sync;
- __adb_probe_sync = 1;
- }
-#endif /* CONFIG_ADB */
-
- switch (_machine) {
-#ifdef CONFIG_PPC_CHRP
- case _MACH_chrp:
- chrp_init(r3, r4, r5, r6, r7);
- break;
-#endif
- }
-#endif /* CONFIG_PPC_OF */
+ prep_init(r3, r4, r5, r6, r7);
}
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
-#ifdef CONFIG_PPC_OF
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-extern char *of_stdout_device;
-
-static int __init set_preferred_console(void)
-{
- struct device_node *prom_stdout;
- char *name;
- int offset = 0;
-
- if (of_stdout_device == NULL)
- return -ENODEV;
-
- /* The user has requested a console so this is already set up. */
- if (strstr(saved_command_line, "console="))
- return -EBUSY;
-
- prom_stdout = find_path_device(of_stdout_device);
- if (!prom_stdout)
- return -ENODEV;
-
- name = (char *)get_property(prom_stdout, "name", NULL);
- if (!name)
- return -ENODEV;
-
- if (strcmp(name, "serial") == 0) {
- int i;
- u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
- if (i > 8) {
- switch (reg[1]) {
- case 0x3f8:
- offset = 0;
- break;
- case 0x2f8:
- offset = 1;
- break;
- case 0x898:
- offset = 2;
- break;
- case 0x890:
- offset = 3;
- break;
- default:
- /* We dont recognise the serial port */
- return -ENODEV;
- }
- }
- } else if (strcmp(name, "ch-a") == 0)
- offset = 0;
- else if (strcmp(name, "ch-b") == 0)
- offset = 1;
- else
- return -ENODEV;
- return add_preferred_console("ttyS", offset, NULL);
-}
-console_initcall(set_preferred_console);
-#endif /* CONFIG_SERIAL_CORE_CONSOLE */
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_PREP */
struct bi_record *find_bootinfo(void)
{
@@ -589,23 +372,6 @@ void parse_bootinfo(struct bi_record *rec)
initrd_end = data[0] + data[1] + KERNELBASE;
break;
#endif /* CONFIG_BLK_DEV_INITRD */
-#ifdef CONFIG_PPC_MULTIPLATFORM
- case BI_MACHTYPE:
- /* Machine types changed with the merge. Since the
- * bootinfo are now deprecated, we can just hard code
- * the appropriate conversion here for when we are
- * called with yaboot which passes us a machine type
- * this way.
- */
- switch(data[0]) {
- case 1: _machine = _MACH_prep; break;
- case 2: _machine = _MACH_Pmac; break;
- case 4: _machine = _MACH_chrp; break;
- default:
- _machine = data[0];
- }
- break;
-#endif
case BI_MEMSIZE:
boot_mem_size = data[0];
break;
@@ -631,9 +397,6 @@ machine_init(unsigned long r3, unsigned long r4, unsigned long r5,
#ifdef CONFIG_6xx
ppc_md.power_save = ppc6xx_idle;
#endif
-#ifdef CONFIG_POWER4
- ppc_md.power_save = power4_idle;
-#endif
platform_init(r3, r4, r5, r6, r7);
@@ -711,7 +474,7 @@ int __init ppc_init(void)
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
/* register CPU devices */
- for_each_cpu(i)
+ for_each_possible_cpu(i)
register_cpu(&cpu_devices[i], i, NULL);
/* call platform init */
@@ -799,7 +562,4 @@ void __init setup_arch(char **cmdline_p)
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
paging_init();
-
- /* this is for modules since _machine can be a define -- Cort */
- ppc_md.ppc_machine = _machine;
}
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index e55cdda6149..f77795a64da 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -311,7 +311,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Backup CPU 0 state */
__save_cpu_setup();
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
/* create a process for the processor */
diff --git a/arch/ppc/kernel/swsusp.S b/arch/ppc/kernel/swsusp.S
deleted file mode 100644
index 69773cc1a85..00000000000
--- a/arch/ppc/kernel/swsusp.S
+++ /dev/null
@@ -1,349 +0,0 @@
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-
-
-/*
- * Structure for storing CPU registers on the save area.
- */
-#define SL_SP 0
-#define SL_PC 4
-#define SL_MSR 8
-#define SL_SDR1 0xc
-#define SL_SPRG0 0x10 /* 4 sprg's */
-#define SL_DBAT0 0x20
-#define SL_IBAT0 0x28
-#define SL_DBAT1 0x30
-#define SL_IBAT1 0x38
-#define SL_DBAT2 0x40
-#define SL_IBAT2 0x48
-#define SL_DBAT3 0x50
-#define SL_IBAT3 0x58
-#define SL_TB 0x60
-#define SL_R2 0x68
-#define SL_CR 0x6c
-#define SL_LR 0x70
-#define SL_R12 0x74 /* r12 to r31 */
-#define SL_SIZE (SL_R12 + 80)
-
- .section .data
- .align 5
-
-_GLOBAL(swsusp_save_area)
- .space SL_SIZE
-
-
- .section .text
- .align 5
-
-_GLOBAL(swsusp_arch_suspend)
-
- lis r11,swsusp_save_area@h
- ori r11,r11,swsusp_save_area@l
-
- mflr r0
- stw r0,SL_LR(r11)
- mfcr r0
- stw r0,SL_CR(r11)
- stw r1,SL_SP(r11)
- stw r2,SL_R2(r11)
- stmw r12,SL_R12(r11)
-
- /* Save MSR & SDR1 */
- mfmsr r4
- stw r4,SL_MSR(r11)
- mfsdr1 r4
- stw r4,SL_SDR1(r11)
-
- /* Get a stable timebase and save it */
-1: mftbu r4
- stw r4,SL_TB(r11)
- mftb r5
- stw r5,SL_TB+4(r11)
- mftbu r3
- cmpw r3,r4
- bne 1b
-
- /* Save SPRGs */
- mfsprg r4,0
- stw r4,SL_SPRG0(r11)
- mfsprg r4,1
- stw r4,SL_SPRG0+4(r11)
- mfsprg r4,2
- stw r4,SL_SPRG0+8(r11)
- mfsprg r4,3
- stw r4,SL_SPRG0+12(r11)
-
- /* Save BATs */
- mfdbatu r4,0
- stw r4,SL_DBAT0(r11)
- mfdbatl r4,0
- stw r4,SL_DBAT0+4(r11)
- mfdbatu r4,1
- stw r4,SL_DBAT1(r11)
- mfdbatl r4,1
- stw r4,SL_DBAT1+4(r11)
- mfdbatu r4,2
- stw r4,SL_DBAT2(r11)
- mfdbatl r4,2
- stw r4,SL_DBAT2+4(r11)
- mfdbatu r4,3
- stw r4,SL_DBAT3(r11)
- mfdbatl r4,3
- stw r4,SL_DBAT3+4(r11)
- mfibatu r4,0
- stw r4,SL_IBAT0(r11)
- mfibatl r4,0
- stw r4,SL_IBAT0+4(r11)
- mfibatu r4,1
- stw r4,SL_IBAT1(r11)
- mfibatl r4,1
- stw r4,SL_IBAT1+4(r11)
- mfibatu r4,2
- stw r4,SL_IBAT2(r11)
- mfibatl r4,2
- stw r4,SL_IBAT2+4(r11)
- mfibatu r4,3
- stw r4,SL_IBAT3(r11)
- mfibatl r4,3
- stw r4,SL_IBAT3+4(r11)
-
-#if 0
- /* Backup various CPU config stuffs */
- bl __save_cpu_setup
-#endif
- /* Call the low level suspend stuff (we should probably have made
- * a stackframe...
- */
- bl swsusp_save
-
- /* Restore LR from the save area */
- lis r11,swsusp_save_area@h
- ori r11,r11,swsusp_save_area@l
- lwz r0,SL_LR(r11)
- mtlr r0
-
- blr
-
-
-/* Resume code */
-_GLOBAL(swsusp_arch_resume)
-
- /* Stop pending alitvec streams and memory accesses */
-BEGIN_FTR_SECTION
- DSSALL
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- sync
-
- /* Disable MSR:DR to make sure we don't take a TLB or
- * hash miss during the copy, as our hash table will
- * for a while be unuseable. For .text, we assume we are
- * covered by a BAT. This works only for non-G5 at this
- * point. G5 will need a better approach, possibly using
- * a small temporary hash table filled with large mappings,
- * disabling the MMU completely isn't a good option for
- * performance reasons.
- * (Note that 750's may have the same performance issue as
- * the G5 in this case, we should investigate using moving
- * BATs for these CPUs)
- */
- mfmsr r0
- sync
- rlwinm r0,r0,0,28,26 /* clear MSR_DR */
- mtmsr r0
- sync
- isync
-
- /* Load ptr the list of pages to copy in r3 */
- lis r11,(pagedir_nosave - KERNELBASE)@h
- ori r11,r11,pagedir_nosave@l
- lwz r10,0(r11)
-
- /* Copy the pages. This is a very basic implementation, to
- * be replaced by something more cache efficient */
-1:
- tophys(r3,r10)
- li r0,256
- mtctr r0
- lwz r11,pbe_address(r3) /* source */
- tophys(r5,r11)
- lwz r10,pbe_orig_address(r3) /* destination */
- tophys(r6,r10)
-2:
- lwz r8,0(r5)
- lwz r9,4(r5)
- lwz r10,8(r5)
- lwz r11,12(r5)
- addi r5,r5,16
- stw r8,0(r6)
- stw r9,4(r6)
- stw r10,8(r6)
- stw r11,12(r6)
- addi r6,r6,16
- bdnz 2b
- lwz r10,pbe_next(r3)
- cmpwi 0,r10,0
- bne 1b
-
- /* Do a very simple cache flush/inval of the L1 to ensure
- * coherency of the icache
- */
- lis r3,0x0002
- mtctr r3
- li r3, 0
-1:
- lwz r0,0(r3)
- addi r3,r3,0x0020
- bdnz 1b
- isync
- sync
-
- /* Now flush those cache lines */
- lis r3,0x0002
- mtctr r3
- li r3, 0
-1:
- dcbf 0,r3
- addi r3,r3,0x0020
- bdnz 1b
- sync
-
- /* Ok, we are now running with the kernel data of the old
- * kernel fully restored. We can get to the save area
- * easily now. As for the rest of the code, it assumes the
- * loader kernel and the booted one are exactly identical
- */
- lis r11,swsusp_save_area@h
- ori r11,r11,swsusp_save_area@l
- tophys(r11,r11)
-
-#if 0
- /* Restore various CPU config stuffs */
- bl __restore_cpu_setup
-#endif
- /* Restore the BATs, and SDR1. Then we can turn on the MMU.
- * This is a bit hairy as we are running out of those BATs,
- * but first, our code is probably in the icache, and we are
- * writing the same value to the BAT, so that should be fine,
- * though a better solution will have to be found long-term
- */
- lwz r4,SL_SDR1(r11)
- mtsdr1 r4
- lwz r4,SL_SPRG0(r11)
- mtsprg 0,r4
- lwz r4,SL_SPRG0+4(r11)
- mtsprg 1,r4
- lwz r4,SL_SPRG0+8(r11)
- mtsprg 2,r4
- lwz r4,SL_SPRG0+12(r11)
- mtsprg 3,r4
-
-#if 0
- lwz r4,SL_DBAT0(r11)
- mtdbatu 0,r4
- lwz r4,SL_DBAT0+4(r11)
- mtdbatl 0,r4
- lwz r4,SL_DBAT1(r11)
- mtdbatu 1,r4
- lwz r4,SL_DBAT1+4(r11)
- mtdbatl 1,r4
- lwz r4,SL_DBAT2(r11)
- mtdbatu 2,r4
- lwz r4,SL_DBAT2+4(r11)
- mtdbatl 2,r4
- lwz r4,SL_DBAT3(r11)
- mtdbatu 3,r4
- lwz r4,SL_DBAT3+4(r11)
- mtdbatl 3,r4
- lwz r4,SL_IBAT0(r11)
- mtibatu 0,r4
- lwz r4,SL_IBAT0+4(r11)
- mtibatl 0,r4
- lwz r4,SL_IBAT1(r11)
- mtibatu 1,r4
- lwz r4,SL_IBAT1+4(r11)
- mtibatl 1,r4
- lwz r4,SL_IBAT2(r11)
- mtibatu 2,r4
- lwz r4,SL_IBAT2+4(r11)
- mtibatl 2,r4
- lwz r4,SL_IBAT3(r11)
- mtibatu 3,r4
- lwz r4,SL_IBAT3+4(r11)
- mtibatl 3,r4
-#endif
-
-BEGIN_FTR_SECTION
- li r4,0
- mtspr SPRN_DBAT4U,r4
- mtspr SPRN_DBAT4L,r4
- mtspr SPRN_DBAT5U,r4
- mtspr SPRN_DBAT5L,r4
- mtspr SPRN_DBAT6U,r4
- mtspr SPRN_DBAT6L,r4
- mtspr SPRN_DBAT7U,r4
- mtspr SPRN_DBAT7L,r4
- mtspr SPRN_IBAT4U,r4
- mtspr SPRN_IBAT4L,r4
- mtspr SPRN_IBAT5U,r4
- mtspr SPRN_IBAT5L,r4
- mtspr SPRN_IBAT6U,r4
- mtspr SPRN_IBAT6L,r4
- mtspr SPRN_IBAT7U,r4
- mtspr SPRN_IBAT7L,r4
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
-
- /* Flush all TLBs */
- lis r4,0x1000
-1: addic. r4,r4,-0x1000
- tlbie r4
- blt 1b
- sync
-
- /* restore the MSR and turn on the MMU */
- lwz r3,SL_MSR(r11)
- bl turn_on_mmu
- tovirt(r11,r11)
-
- /* Restore TB */
- li r3,0
- mttbl r3
- lwz r3,SL_TB(r11)
- lwz r4,SL_TB+4(r11)
- mttbu r3
- mttbl r4
-
- /* Kick decrementer */
- li r0,1
- mtdec r0
-
- /* Restore the callee-saved registers and return */
- lwz r0,SL_CR(r11)
- mtcr r0
- lwz r2,SL_R2(r11)
- lmw r12,SL_R12(r11)
- lwz r1,SL_SP(r11)
- lwz r0,SL_LR(r11)
- mtlr r0
-
- // XXX Note: we don't really need to call swsusp_resume
-
- li r3,0
- blr
-
-/* FIXME:This construct is actually not useful since we don't shut
- * down the instruction MMU, we could just flip back MSR-DR on.
- */
-turn_on_mmu:
- mflr r4
- mtsrr0 r4
- mtsrr1 r3
- sync
- isync
- rfi
-
diff --git a/arch/ppc/kernel/temp.c b/arch/ppc/kernel/temp.c
deleted file mode 100644
index 26bd8ea35a4..00000000000
--- a/arch/ppc/kernel/temp.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * temp.c Thermal management for cpu's with Thermal Assist Units
- *
- * Written by Troy Benjegerdes <hozer@drgw.net>
- *
- * TODO:
- * dynamic power management to limit peak CPU temp (using ICTC)
- * calibration???
- *
- * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery
- * life in portables, and add a 'performance/watt' metric somewhere in /proc
- */
-
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-#include <asm/reg.h>
-#include <asm/nvram.h>
-#include <asm/cache.h>
-#include <asm/8xx_immap.h>
-#include <asm/machdep.h>
-
-static struct tau_temp
-{
- int interrupts;
- unsigned char low;
- unsigned char high;
- unsigned char grew;
-} tau[NR_CPUS];
-
-struct timer_list tau_timer;
-
-#undef DEBUG
-
-/* TODO: put these in a /proc interface, with some sanity checks, and maybe
- * dynamic adjustment to minimize # of interrupts */
-/* configurable values for step size and how much to expand the window when
- * we get an interrupt. These are based on the limit that was out of range */
-#define step_size 2 /* step size when temp goes out of range */
-#define window_expand 1 /* expand the window by this much */
-/* configurable values for shrinking the window */
-#define shrink_timer 2*HZ /* period between shrinking the window */
-#define min_window 2 /* minimum window size, degrees C */
-
-void set_thresholds(unsigned long cpu)
-{
-#ifdef CONFIG_TAU_INT
- /*
- * setup THRM1,
- * threshold, valid bit, enable interrupts, interrupt when below threshold
- */
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
-
- /* setup THRM2,
- * threshold, valid bit, enable interrupts, interrupt when above threshhold
- */
- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
-#else
- /* same thing but don't enable interrupts */
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
-#endif
-}
-
-void TAUupdate(int cpu)
-{
- unsigned thrm;
-
-#ifdef DEBUG
- printk("TAUupdate ");
-#endif
-
- /* if both thresholds are crossed, the step_sizes cancel out
- * and the window winds up getting expanded twice. */
- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
- if(thrm & THRM1_TIN){ /* crossed low threshold */
- if (tau[cpu].low >= step_size){
- tau[cpu].low -= step_size;
- tau[cpu].high -= (step_size - window_expand);
- }
- tau[cpu].grew = 1;
-#ifdef DEBUG
- printk("low threshold crossed ");
-#endif
- }
- }
- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
- if(thrm & THRM1_TIN){ /* crossed high threshold */
- if (tau[cpu].high <= 127-step_size){
- tau[cpu].low += (step_size - window_expand);
- tau[cpu].high += step_size;
- }
- tau[cpu].grew = 1;
-#ifdef DEBUG
- printk("high threshold crossed ");
-#endif
- }
- }
-
-#ifdef DEBUG
- printk("grew = %d\n", tau[cpu].grew);
-#endif
-
-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
- set_thresholds(cpu);
-#endif
-
-}
-
-#ifdef CONFIG_TAU_INT
-/*
- * TAU interrupts - called when we have a thermal assist unit interrupt
- * with interrupts disabled
- */
-
-void TAUException(struct pt_regs * regs)
-{
- int cpu = smp_processor_id();
-
- irq_enter();
- tau[cpu].interrupts++;
-
- TAUupdate(cpu);
-
- irq_exit();
-}
-#endif /* CONFIG_TAU_INT */
-
-static void tau_timeout(void * info)
-{
- int cpu;
- unsigned long flags;
- int size;
- int shrink;
-
- /* disabling interrupts *should* be okay */
- local_irq_save(flags);
- cpu = smp_processor_id();
-
-#ifndef CONFIG_TAU_INT
- TAUupdate(cpu);
-#endif
-
- size = tau[cpu].high - tau[cpu].low;
- if (size > min_window && ! tau[cpu].grew) {
- /* do an exponential shrink of half the amount currently over size */
- shrink = (2 + size - min_window) / 4;
- if (shrink) {
- tau[cpu].low += shrink;
- tau[cpu].high -= shrink;
- } else { /* size must have been min_window + 1 */
- tau[cpu].low += 1;
-#if 1 /* debug */
- if ((tau[cpu].high - tau[cpu].low) != min_window){
- printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__);
- }
-#endif
- }
- }
-
- tau[cpu].grew = 0;
-
- set_thresholds(cpu);
-
- /*
- * Do the enable every time, since otherwise a bunch of (relatively)
- * complex sleep code needs to be added. One mtspr every time
- * tau_timeout is called is probably not a big deal.
- *
- * Enable thermal sensor and set up sample interval timer
- * need 20 us to do the compare.. until a nice 'cpu_speed' function
- * call is implemented, just assume a 500 mhz clock. It doesn't really
- * matter if we take too long for a compare since it's all interrupt
- * driven anyway.
- *
- * use a extra long time.. (60 us @ 500 mhz)
- */
- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
-
- local_irq_restore(flags);
-}
-
-static void tau_timeout_smp(unsigned long unused)
-{
-
- /* schedule ourselves to be run again */
- mod_timer(&tau_timer, jiffies + shrink_timer) ;
- on_each_cpu(tau_timeout, NULL, 1, 0);
-}
-
-/*
- * setup the TAU
- *
- * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound.
- * Start off at zero
- */
-
-int tau_initialized = 0;
-
-void __init TAU_init_smp(void * info)
-{
- unsigned long cpu = smp_processor_id();
-
- /* set these to a reasonable value and let the timer shrink the
- * window */
- tau[cpu].low = 5;
- tau[cpu].high = 120;
-
- set_thresholds(cpu);
-}
-
-int __init TAU_init(void)
-{
- /* We assume in SMP that if one CPU has TAU support, they
- * all have it --BenH
- */
- if (!cpu_has_feature(CPU_FTR_TAU)) {
- printk("Thermal assist unit not available\n");
- tau_initialized = 0;
- return 1;
- }
-
-
- /* first, set up the window shrinking timer */
- init_timer(&tau_timer);
- tau_timer.function = tau_timeout_smp;
- tau_timer.expires = jiffies + shrink_timer;
- add_timer(&tau_timer);
-
- on_each_cpu(TAU_init_smp, NULL, 1, 0);
-
- printk("Thermal assist unit ");
-#ifdef CONFIG_TAU_INT
- printk("using interrupts, ");
-#else
- printk("using timers, ");
-#endif
- printk("shrink_timer: %d jiffies\n", shrink_timer);
- tau_initialized = 1;
-
- return 0;
-}
-
-__initcall(TAU_init);
-
-/*
- * return current temp
- */
-
-u32 cpu_temp_both(unsigned long cpu)
-{
- return ((tau[cpu].high << 16) | tau[cpu].low);
-}
-
-int cpu_temp(unsigned long cpu)
-{
- return ((tau[cpu].high + tau[cpu].low) / 2);
-}
-
-int tau_interrupts(unsigned long cpu)
-{
- return (tau[cpu].interrupts);
-}