summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-powerpc/processor.h (renamed from include/asm-ppc/processor.h)185
-rw-r--r--include/asm-powerpc/reg.h281
-rw-r--r--include/asm-ppc64/processor.h567
-rw-r--r--include/asm-ppc64/system.h7
4 files changed, 353 insertions, 687 deletions
diff --git a/include/asm-ppc/processor.h b/include/asm-powerpc/processor.h
index b05b5d9cae2..9592f533e05 100644
--- a/include/asm-ppc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -1,21 +1,28 @@
-#ifdef __KERNEL__
-#ifndef __ASM_PPC_PROCESSOR_H
-#define __ASM_PPC_PROCESSOR_H
+#ifndef _ASM_POWERPC_PROCESSOR_H
+#define _ASM_POWERPC_PROCESSOR_H
/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
*/
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
#include <linux/config.h>
-#include <linux/stringify.h>
+#include <asm/reg.h>
+#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/types.h>
-#include <asm/mpc8xx.h>
-#include <asm/reg.h>
+#ifdef CONFIG_PPC64
+#include <asm/systemcfg.h>
+#endif
+#ifdef CONFIG_PPC32
+/* 32-bit platform types */
/* We only need to define a new _MACH_xxx for machines which are part of
* a configuration which supports more than one type of different machine.
* This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac.
@@ -36,20 +43,6 @@
#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
-#define _GLOBAL(n)\
- .stabs __stringify(n:F-1),N_FUN,0,0,n;\
- .globl n;\
-n:
-
-/*
- * this is the minimum allowable io space due to the location
- * of the io areas on prep (first one at 0x80000000) but
- * as soon as I get around to remapping the io areas with the BATs
- * to match the mac we can raise this. -- Cort
- */
-#define TASK_SIZE (CONFIG_TASK_SIZE)
-
-#ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_MULTIPLATFORM
extern int _machine;
@@ -67,17 +60,49 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
#else
#define _machine 0
#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
+/* Platforms supported by PPC64 */
+#define PLATFORM_PSERIES 0x0100
+#define PLATFORM_PSERIES_LPAR 0x0101
+#define PLATFORM_ISERIES_LPAR 0x0201
+#define PLATFORM_LPAR 0x0001
+#define PLATFORM_POWERMAC 0x0400
+#define PLATFORM_MAPLE 0x0500
+#define PLATFORM_BPA 0x1000
+
+/* Compatibility with drivers coming from PPC32 world */
+#define _machine (systemcfg->platform)
+#define _MACH_Pmac PLATFORM_POWERMAC
+#endif
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
+#define HMT_low() asm volatile("or 1,1,1 # low priority")
+#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
+#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
+#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
+#define HMT_high() asm volatile("or 3,3,3 # high priority")
+
+#ifdef __KERNEL__
+
+extern int have_of;
struct task_struct;
-void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
+void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
extern void prepare_to_copy(struct task_struct *tsk);
-/*
- * Create a new kernel thread.
- */
+/* Create a new kernel thread. */
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Lazy FPU handling on uni-processor */
@@ -85,10 +110,37 @@ extern struct task_struct *last_task_used_math;
extern struct task_struct *last_task_used_altivec;
extern struct task_struct *last_task_used_spe;
+#ifdef CONFIG_PPC32
+#define TASK_SIZE (CONFIG_TASK_SIZE)
+
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+#endif
+
+#ifdef CONFIG_PPC64
+/* 64-bit user address space is 44-bits (16TB user VM) */
+#define TASK_SIZE_USER64 (0x0000100000000000UL)
+
+/*
+ * 32-bit user address space is 4GB - 1 page
+ * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
+ */
+#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
+
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_USER32 : TASK_SIZE_USER64)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
+
+#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
+ TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
+#endif
typedef struct {
unsigned long seg;
@@ -96,18 +148,31 @@ typedef struct {
struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
+#ifdef CONFIG_PPC64
+ unsigned long ksp_vsid;
+#endif
struct pt_regs *regs; /* Pointer to saved register state */
mm_segment_t fs; /* for get_fs() validation */
+#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
- int fpexc_mode; /* floating-point exception mode */
signed long last_syscall;
+#endif
#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
unsigned long dbcr0; /* debug control register values */
unsigned long dbcr1;
#endif
double fpr[32]; /* Complete floating point set */
+#ifdef CONFIG_PPC32
unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */
+#endif
unsigned long fpscr; /* Floating point status */
+ int fpexc_mode; /* floating-point exception mode */
+#ifdef CONFIG_PPC64
+ unsigned long start_tb; /* Start purr when proc switched in */
+ unsigned long accum_tb; /* Total accumilated purr for process */
+ unsigned long vdso_base; /* base of the vDSO library */
+#endif
+ unsigned long dabr; /* Data address breakpoint register */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
vector128 vr[32] __attribute((aligned(16)));
@@ -128,51 +193,58 @@ struct thread_struct {
#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+
+#ifdef CONFIG_PPC32
#define INIT_THREAD { \
.ksp = INIT_SP, \
.fs = KERNEL_DS, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
}
+#else
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
+ .fs = KERNEL_DS, \
+ .fpr = {0}, \
+ .fpscr = 0, \
+ .fpexc_mode = MSR_FE0|MSR_FE1, \
+}
+#endif
/*
* Return saved PC of a blocked thread. For now, this is the "user" PC
*/
-#define thread_saved_pc(tsk) \
- ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define thread_saved_pc(tsk) \
+ ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
+#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
/* Get/set floating-point exception mode */
-#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
-#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
+#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
+#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
-static inline unsigned int __unpack_fe01(unsigned int msr_bits)
+static inline unsigned int __unpack_fe01(unsigned long msr_bits)
{
return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
}
-static inline unsigned int __pack_fe01(unsigned int fpmode)
+static inline unsigned long __pack_fe01(unsigned int fpmode)
{
return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
}
-/* in process.c - for early bootup debug -- Cort */
-int ll_printk(const char *, ...);
-void ll_puts(const char *);
-
-/* In misc.c */
-void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
-
-#define have_of (_machine == _MACH_chrp || _machine == _MACH_Pmac)
-
+#ifdef CONFIG_PPC64
+#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
+#else
#define cpu_relax() barrier()
+#endif
/*
* Prefetch macros.
@@ -181,21 +253,28 @@ void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
-extern inline void prefetch(const void *x)
+static inline void prefetch(const void *x)
{
- __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
}
-extern inline void prefetchw(const void *x)
+static inline void prefetchw(const void *x)
{
- __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
}
#define spin_lock_prefetch(x) prefetchw(x)
-extern int emulate_altivec(struct pt_regs *regs);
-
-#endif /* !__ASSEMBLY__ */
+#ifdef CONFIG_PPC64
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#endif
-#endif /* __ASM_PPC_PROCESSOR_H */
#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 1402a2dedff..ff619630dff 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -6,53 +6,95 @@
* Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
*/
-#ifndef _ASM_POWERPC_REGS_H
-#define _ASM_POWERPC_REGS_H
+#ifndef _ASM_POWERPC_REG_H
+#define _ASM_POWERPC_REG_H
#ifdef __KERNEL__
#include <linux/stringify.h>
+#include <asm/cputable.h>
/* Pickup Book E specific registers. */
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include <asm/reg_booke.h>
#endif
-/* Machine State Register (MSR) Fields */
-#define MSR_SF (1<<63)
-#define MSR_ISF (1<<61)
-#define MSR_VEC (1<<25) /* Enable AltiVec */
-#define MSR_POW (1<<18) /* Enable Power Management */
-#define MSR_WE (1<<18) /* Wait State Enable */
-#define MSR_TGPR (1<<17) /* TLB Update registers in use */
-#define MSR_CE (1<<17) /* Critical Interrupt Enable */
-#define MSR_ILE (1<<16) /* Interrupt Little Endian */
-#define MSR_EE (1<<15) /* External Interrupt Enable */
-#define MSR_PR (1<<14) /* Problem State / Privilege Level */
-#define MSR_FP (1<<13) /* Floating Point enable */
-#define MSR_ME (1<<12) /* Machine Check Enable */
-#define MSR_FE0 (1<<11) /* Floating Exception mode 0 */
-#define MSR_SE (1<<10) /* Single Step */
-#define MSR_BE (1<<9) /* Branch Trace */
-#define MSR_DE (1<<9) /* Debug Exception Enable */
-#define MSR_FE1 (1<<8) /* Floating Exception mode 1 */
-#define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */
-#define MSR_IR (1<<5) /* Instruction Relocate */
-#define MSR_DR (1<<4) /* Data Relocate */
-#define MSR_PE (1<<3) /* Protection Enable */
-#define MSR_PX (1<<2) /* Protection Exclusive Mode */
-#define MSR_RI (1<<1) /* Recoverable Exception */
-#define MSR_LE (1<<0) /* Little Endian */
+#define MSR_SF_LG 63 /* Enable 64 bit mode */
+#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
+#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_VEC_LG 25 /* Enable AltiVec */
+#define MSR_POW_LG 18 /* Enable Power Management */
+#define MSR_WE_LG 18 /* Wait State Enable */
+#define MSR_TGPR_LG 17 /* TLB Update registers in use */
+#define MSR_CE_LG 17 /* Critical Interrupt Enable */
+#define MSR_ILE_LG 16 /* Interrupt Little Endian */
+#define MSR_EE_LG 15 /* External Interrupt Enable */
+#define MSR_PR_LG 14 /* Problem State / Privilege Level */
+#define MSR_FP_LG 13 /* Floating Point enable */
+#define MSR_ME_LG 12 /* Machine Check Enable */
+#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
+#define MSR_SE_LG 10 /* Single Step */
+#define MSR_BE_LG 9 /* Branch Trace */
+#define MSR_DE_LG 9 /* Debug Exception Enable */
+#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
+#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
+#define MSR_IR_LG 5 /* Instruction Relocate */
+#define MSR_DR_LG 4 /* Data Relocate */
+#define MSR_PE_LG 3 /* Protection Enable */
+#define MSR_PX_LG 2 /* Protection Exclusive Mode */
+#define MSR_PMM_LG 2 /* Performance monitor */
+#define MSR_RI_LG 1 /* Recoverable Exception */
+#define MSR_LE_LG 0 /* Little Endian */
+#ifdef __ASSEMBLY__
+#define __MASK(X) (1<<(X))
+#else
+#define __MASK(X) (1UL<<(X))
+#endif
+
+#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
+#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
+#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
+#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
+#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
+#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
+#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
+#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
+#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
+#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
+#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
+#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
+#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
+#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
+#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
+#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
+#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
+#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
+#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
+#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
+#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
+#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
+#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
+#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
+#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
+#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
+
+#ifdef CONFIG_PPC64
+#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
+#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
+
+#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
+#define MSR_USER64 MSR_USER32 | MSR_SF
+
+#else /* 32-bit */
/* Default MSR for kernel mode. */
#ifdef CONFIG_APUS_FAST_EXCEPT
#define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR)
-#endif
-
-#ifndef MSR_KERNEL
+#else
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
#endif
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
@@ -85,6 +127,9 @@
/* Special Purpose Registers (SPRNs)*/
#define SPRN_CTR 0x009 /* Count Register */
+#define SPRN_CTRLF 0x088
+#define SPRN_CTRLT 0x098
+#define CTRL_RUNLATCH 0x1
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define DABR_TRANSLATION (1UL << 2)
#define SPRN_DAR 0x013 /* Data Address Register */
@@ -193,6 +238,16 @@
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
+#define SPRN_HID6 0x3F9 /* BE HID 6 */
+#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
+#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
+#define SPRN_TSCR 0x399 /* Thread switch control on BE */
+#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
+#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
+#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
+#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
+#define SPRN_TSC 0x3FD /* Thread switch control on others */
+#define SPRN_TST 0x3FC /* Thread switch timeout on others */
#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
@@ -276,22 +331,18 @@
#define L3CR_L3DO 0x00000040 /* L3 data only mode */
#define L3CR_PMEN 0x00000004 /* L3 private memory enable */
#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */
+
#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */
#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */
#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */
#define SPRN_LDSTDB 0x3f4 /* */
#define SPRN_LR 0x008 /* Link Register */
-#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
-#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
#ifndef SPRN_PIR
#define SPRN_PIR 0x3FF /* Processor Identification Register */
#endif
-#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
-#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
-#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
-#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
+#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
#define SPRN_PVR 0x11F /* Processor Version Register */
#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
@@ -333,6 +384,52 @@
#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
#define SPRN_XER 0x001 /* Fixed Point Exception Register */
+/* Performance monitor SPRs */
+#ifdef CONFIG_PPC64
+#define SPRN_MMCR0 795
+#define MMCR0_FC 0x80000000UL /* freeze counters */
+#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
+#define MMCR0_KERNEL_DISABLE MMCR0_FCS
+#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
+#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
+#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
+#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
+#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
+#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
+#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
+#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
+#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
+#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
+#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
+#define SPRN_MMCR1 798
+#define SPRN_MMCRA 0x312
+#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
+#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
+#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define SPRN_PMC1 787
+#define SPRN_PMC2 788
+#define SPRN_PMC3 789
+#define SPRN_PMC4 790
+#define SPRN_PMC5 791
+#define SPRN_PMC6 792
+#define SPRN_PMC7 793
+#define SPRN_PMC8 794
+#define SPRN_SIAR 780
+#define SPRN_SDAR 781
+
+#else /* 32-bit */
+#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
+#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
+#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
+#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
+#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
+#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
+
/* Bit definitions for MMCR0 and PMC1 / PMC2. */
#define MMCR0_PMC1_CYCLES (1 << 7)
#define MMCR0_PMC1_ICACHEMISS (5 << 7)
@@ -342,14 +439,15 @@
#define MMCR0_PMC2_ITLB 0x7
#define MMCR0_PMC2_LOADMISSTIME 0x5
#define MMCR0_PMXE (1 << 26)
-
-/* Processor Version Register */
+#endif
/* Processor Version Register (PVR) field extraction */
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
+#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv))
+
/*
* IBM has further subdivided the standard PowerPC 16-bit version and
* revision subfields of the PVR for the PowerPC 403s into the following:
@@ -405,42 +503,105 @@
#define PVR_8245 0x80811014
#define PVR_8260 PVR_8240
-#if 0
-/* Segment Registers */
-#define SR0 0
-#define SR1 1
-#define SR2 2
-#define SR3 3
-#define SR4 4
-#define SR5 5
-#define SR6 6
-#define SR7 7
-#define SR8 8
-#define SR9 9
-#define SR10 10
-#define SR11 11
-#define SR12 12
-#define SR13 13
-#define SR14 14
-#define SR15 15
-#endif
+/* 64-bit processors */
+/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
+#define PV_NORTHSTAR 0x0033
+#define PV_PULSAR 0x0034
+#define PV_POWER4 0x0035
+#define PV_ICESTAR 0x0036
+#define PV_SSTAR 0x0037
+#define PV_POWER4p 0x0038
+#define PV_970 0x0039
+#define PV_POWER5 0x003A
+#define PV_POWER5p 0x003B
+#define PV_970FX 0x003C
+#define PV_630 0x0040
+#define PV_630p 0x0041
+#define PV_970MP 0x0044
+#define PV_BE 0x0070
+
+/*
+ * Number of entries in the SLB. If this ever changes we should handle
+ * it with a use a cpu feature fixup.
+ */
+#define SLB_NUM_ENTRIES 64
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
-#define mfmsr() ({unsigned int rval; \
+#define mfmsr() ({unsigned long rval; \
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+#ifdef CONFIG_PPC64
+#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+ : : "r" (v))
+#define mtmsrd(v) __mtmsrd((v), 0)
+#else
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
+#endif
-#define mfspr(rn) ({unsigned int rval; \
+#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
+#define mftb() ({unsigned long rval; \
+ asm volatile("mftb %0" : "=r" (rval)); rval;})
+#define mftbl() ({unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); rval;})
+
+#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
+#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
+
+#ifdef CONFIG_PPC32
#define mfsrin(v) ({unsigned int rval; \
asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
rval;})
+#endif
#define proc_trap() asm volatile("trap")
+
+#ifdef CONFIG_PPC64
+static inline void ppc64_runlatch_on(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL)) {
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl |= CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+}
+
+static inline void ppc64_runlatch_off(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL)) {
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+}
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+ mttbl(0);
+ mttbu(upper);
+ mttbl(lower);
+}
+#endif
+
+#define __get_SP() ({unsigned long sp; \
+ asm volatile("mr %0,1": "=r" (sp)); sp;})
+
+#else /* __ASSEMBLY__ */
+
+#define RUNLATCH_ON(REG) \
+BEGIN_FTR_SECTION \
+ mfspr (REG),SPRN_CTRLF; \
+ ori (REG),(REG),CTRL_RUNLATCH; \
+ mtspr SPRN_CTRLT,(REG); \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_REGS_H */
+#endif /* _ASM_POWERPC_REG_H */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
deleted file mode 100644
index 6447fbee7d6..00000000000
--- a/include/asm-ppc64/processor.h
+++ /dev/null
@@ -1,567 +0,0 @@
-#ifndef __ASM_PPC64_PROCESSOR_H
-#define __ASM_PPC64_PROCESSOR_H
-
-/*
- * Copyright (C) 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/stringify.h>
-#ifndef __ASSEMBLY__
-#include <linux/config.h>
-#include <asm/atomic.h>
-#include <asm/ppcdebug.h>
-#include <asm/a.out.h>
-#endif
-#include <asm/ptrace.h>
-#include <asm/types.h>
-#include <asm/systemcfg.h>
-#include <asm/cputable.h>
-
-/* Machine State Register (MSR) Fields */
-#define MSR_SF_LG 63 /* Enable 64 bit mode */
-#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
-#define MSR_HV_LG 60 /* Hypervisor state */
-#define MSR_VEC_LG 25 /* Enable AltiVec */
-#define MSR_POW_LG 18 /* Enable Power Management */
-#define MSR_WE_LG 18 /* Wait State Enable */
-#define MSR_TGPR_LG 17 /* TLB Update registers in use */
-#define MSR_CE_LG 17 /* Critical Interrupt Enable */
-#define MSR_ILE_LG 16 /* Interrupt Little Endian */
-#define MSR_EE_LG 15 /* External Interrupt Enable */
-#define MSR_PR_LG 14 /* Problem State / Privilege Level */
-#define MSR_FP_LG 13 /* Floating Point enable */
-#define MSR_ME_LG 12 /* Machine Check Enable */
-#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
-#define MSR_SE_LG 10 /* Single Step */
-#define MSR_BE_LG 9 /* Branch Trace */
-#define MSR_DE_LG 9 /* Debug Exception Enable */
-#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
-#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
-#define MSR_IR_LG 5 /* Instruction Relocate */
-#define MSR_DR_LG 4 /* Data Relocate */
-#define MSR_PE_LG 3 /* Protection Enable */
-#define MSR_PX_LG 2 /* Protection Exclusive Mode */
-#define MSR_PMM_LG 2 /* Performance monitor */
-#define MSR_RI_LG 1 /* Recoverable Exception */
-#define MSR_LE_LG 0 /* Little Endian */
-
-#ifdef __ASSEMBLY__
-#define __MASK(X) (1<<(X))
-#else
-#define __MASK(X) (1UL<<(X))
-#endif
-
-#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
-#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
-#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
-#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
-#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
-#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
-#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
-#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
-#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
-#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
-#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
-#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
-#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
-#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
-#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
-#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
-#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
-#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
-#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
-#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
-#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
-#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
-#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
-#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
-#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
-#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
-
-#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
-#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
-
-#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64 MSR_USER32 | MSR_SF
-
-/* Floating Point Status and Control Register (FPSCR) Fields */
-
-#define FPSCR_FX 0x80000000 /* FPU exception summary */
-#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
-#define FPSCR_VX 0x20000000 /* Invalid operation summary */
-#define FPSCR_OX 0x10000000 /* Overflow exception summary */
-#define FPSCR_UX 0x08000000 /* Underflow exception summary */
-#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
-#define FPSCR_XX 0x02000000 /* Inexact exception summary */
-#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
-#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
-#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
-#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
-#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
-#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
-#define FPSCR_FR 0x00040000 /* Fraction rounded */
-#define FPSCR_FI 0x00020000 /* Fraction inexact */
-#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
-#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
-#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
-#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
-#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
-#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
-#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
-#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
-#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
-#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
-#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
-#define FPSCR_RN 0x00000003 /* FPU rounding control */
-
-/* Special Purpose Registers (SPRNs)*/
-
-#define SPRN_CTR 0x009 /* Count Register */
-#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
-#define DABR_TRANSLATION (1UL << 2)
-#define SPRN_DAR 0x013 /* Data Address Register */
-#define SPRN_DEC 0x016 /* Decrement Register */
-#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
-#define DSISR_NOHPTE 0x40000000 /* no translation found */
-#define DSISR_PROTFAULT 0x08000000 /* protection fault */
-#define DSISR_ISSTORE 0x02000000 /* access was a store */
-#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
-#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
-#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
-#define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */
-#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
-#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
-#define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */
-#define SPRN_HID4 0x3F4 /* 970 HID4 */
-#define SPRN_HID5 0x3F6 /* 970 HID5 */
-#define SPRN_HID6 0x3F9 /* BE HID 6 */
-#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
-#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
-#define SPRN_TSCR 0x399 /* Thread switch control on BE */
-#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
-#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
-#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
-#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
-#define SPRN_TSC 0x3FD /* Thread switch control on others */
-#define SPRN_TST 0x3FC /* Thread switch timeout on others */
-#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
-#define SPRN_LR 0x008 /* Link Register */
-#define SPRN_PIR 0x3FF /* Processor Identification Register */
-#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
-#define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */
-#define SPRN_PVR 0x11F /* Processor Version Register */
-#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
-#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
-#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
-#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
-#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
-#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
-#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
-#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
-#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
-#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
-#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
-#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
-#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */
-#define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */
-#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
-#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
-#define SPRN_XER 0x001 /* Fixed Point Exception Register */
-#define SPRN_VRSAVE 0x100 /* Vector save */
-#define SPRN_CTRLF 0x088
-#define SPRN_CTRLT 0x098
-#define CTRL_RUNLATCH 0x1
-
-/* Performance monitor SPRs */
-#define SPRN_SIAR 780
-#define SPRN_SDAR 781
-#define SPRN_MMCRA 786
-#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
-#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
-#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
-#define SPRN_PMC1 787
-#define SPRN_PMC2 788
-#define SPRN_PMC3 789
-#define SPRN_PMC4 790
-#define SPRN_PMC5 791
-#define SPRN_PMC6 792
-#define SPRN_PMC7 793
-#define SPRN_PMC8 794
-#define SPRN_MMCR0 795
-#define MMCR0_FC 0x80000000UL /* freeze counters. set to 1 on a perfmon exception */
-#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
-#define MMCR0_KERNEL_DISABLE MMCR0_FCS
-#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
-#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
-#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
-#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
-#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
-#define MMCR0_FCECE 0x02000000UL /* freeze counters on enabled condition or event */
-/* time base exception enable */
-#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
-#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
-#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
-#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
-#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
-#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
-#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
-#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
-#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
-#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
-#define SPRN_MMCR1 798
-
-/* Short-hand versions for a number of the above SPRNs */
-
-#define CTR SPRN_CTR /* Counter Register */
-#define DAR SPRN_DAR /* Data Address Register */
-#define DABR SPRN_DABR /* Data Address Breakpoint Register */
-#define DEC SPRN_DEC /* Decrement Register */
-#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
-#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
-#define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */
-#define NIADORM SPRN_NIADORM /* NIA Dormant Register */
-#define TSC SPRN_TSC /* Thread switch control */
-#define TST SPRN_TST /* Thread switch timeout */
-#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
-#define L2CR SPRN_L2CR /* PPC 750 L2 control register */
-#define __LR SPRN_LR
-#define PVR SPRN_PVR /* Processor Version */
-#define PIR SPRN_PIR /* Processor ID */
-#define PURR SPRN_PURR /* Processor Utilization of Resource Register */
-#define SDR1 SPRN_SDR1 /* MMU hash base register */
-#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
-#define SPR1 SPRN_SPRG1
-#define SPR2 SPRN_SPRG2
-#define SPR3 SPRN_SPRG3
-#define SPRG0 SPRN_SPRG0
-#define SPRG1 SPRN_SPRG1
-#define SPRG2 SPRN_SPRG2
-#define SPRG3 SPRN_SPRG3
-#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
-#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
-#define TBRL SPRN_TBRL /* Time Base Read Lower Register */
-#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
-#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
-#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
-#define XER SPRN_XER
-
-/* Processor Version Register (PVR) field extraction */
-
-#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
-#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
-
-/* Processor Version Numbers */
-#define PV_NORTHSTAR 0x0033
-#define PV_PULSAR 0x0034
-#define PV_POWER4 0x0035
-#define PV_ICESTAR 0x0036
-#define PV_SSTAR 0x0037
-#define PV_POWER4p 0x0038
-#define PV_970 0x0039
-#define PV_POWER5 0x003A
-#define PV_POWER5p 0x003B
-#define PV_970FX 0x003C
-#define PV_630 0x0040
-#define PV_630p 0x0041
-#define PV_970MP 0x0044
-#define PV_BE 0x0070
-
-/* Platforms supported by PPC64 */
-#define PLATFORM_PSERIES 0x0100
-#define PLATFORM_PSERIES_LPAR 0x0101
-#define PLATFORM_ISERIES_LPAR 0x0201
-#define PLATFORM_LPAR 0x0001
-#define PLATFORM_POWERMAC 0x0400
-#define PLATFORM_MAPLE 0x0500
-#define PLATFORM_BPA 0x1000
-
-/* Compatibility with drivers coming from PPC32 world */
-#define _machine (systemcfg->platform)
-#define _MACH_Pmac PLATFORM_POWERMAC
-
-/*
- * List of interrupt controllers.
- */
-#define IC_INVALID 0
-#define IC_OPEN_PIC 1
-#define IC_PPC_XIC 2
-#define IC_BPA_IIC 3
-#define IC_ISERIES 4
-
-#define XGLUE(a,b) a##b
-#define GLUE(a,b) XGLUE(a,b)
-
-#ifdef __ASSEMBLY__
-
-#define _GLOBAL(name) \
- .section ".text"; \
- .align 2 ; \
- .globl name; \
- .globl GLUE(.,name); \
- .section ".opd","aw"; \
-name: \
- .quad GLUE(.,name); \
- .quad .TOC.@tocbase; \
- .quad 0; \
- .previous; \
- .type GLUE(.,name),@function; \
-GLUE(.,name):
-
-#define _KPROBE(name) \
- .section ".kprobes.text","a"; \
- .align 2 ; \
- .globl name; \
- .globl GLUE(.,name); \
- .section ".opd","aw"; \
-name: \
- .quad GLUE(.,name); \
- .quad .TOC.@tocbase; \
- .quad 0; \
- .previous; \
- .type GLUE(.,name),@function; \
-GLUE(.,name):
-
-#define _STATIC(name) \
- .section ".text"; \
- .align 2 ; \
- .section ".opd","aw"; \
-name: \
- .quad GLUE(.,name); \
- .quad .TOC.@tocbase; \
- .quad 0; \
- .previous; \
- .type GLUE(.,name),@function; \
-GLUE(.,name):
-
-#else /* __ASSEMBLY__ */
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-/* Macros for setting and retrieving special purpose registers */
-
-#define mfmsr() ({unsigned long rval; \
- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
-
-#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
- : : "r" (v))
-#define mtmsrd(v) __mtmsrd((v), 0)
-
-#define mfspr(rn) ({unsigned long rval; \
- asm volatile("mfspr %0," __stringify(rn) \
- : "=r" (rval)); rval;})
-#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
-
-#define mftb() ({unsigned long rval; \
- asm volatile("mftb %0" : "=r" (rval)); rval;})
-
-#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
-#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
-
-#define mfasr() ({unsigned long rval; \
- asm volatile("mfasr %0" : "=r" (rval)); rval;})
-
-/* Macros for adjusting thread priority (hardware multi-threading) */
-#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
-#define HMT_low() asm volatile("or 1,1,1 # low priority")
-#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
-#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
-#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
-#define HMT_high() asm volatile("or 3,3,3 # high priority")
-
-static inline void set_tb(unsigned int upper, unsigned int lower)
-{
- mttbl(0);
- mttbu(upper);
- mttbl(lower);
-}
-
-#define __get_SP() ({unsigned long sp; \
- asm volatile("mr %0,1": "=r" (sp)); sp;})
-
-#ifdef __KERNEL__
-
-extern int have_of;
-extern u64 ppc64_interrupt_controller;
-
-struct task_struct;
-void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
-void release_thread(struct task_struct *);
-
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct *tsk);
-
-/* Create a new kernel thread. */
-extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
-/* Lazy FPU handling on uni-processor */
-extern struct task_struct *last_task_used_math;
-extern struct task_struct *last_task_used_altivec;
-
-/* 64-bit user address space is 44-bits (16TB user VM) */
-#define TASK_SIZE_USER64 (0x0000100000000000UL)
-
-/*
- * 32-bit user address space is 4GB - 1 page
- * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
- */
-#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
-
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
- TASK_SIZE_USER32 : TASK_SIZE_USER64)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
-
-#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
- TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-struct thread_struct {
- unsigned long ksp; /* Kernel stack pointer */
- unsigned long ksp_vsid;
- struct pt_regs *regs; /* Pointer to saved register state */
- mm_segment_t fs; /* for get_fs() validation */
- double fpr[32]; /* Complete floating point set */
- unsigned long fpscr; /* Floating point status (plus pad) */
- unsigned long fpexc_mode; /* Floating-point exception mode */
- unsigned long start_tb; /* Start purr when proc switched in */
- unsigned long accum_tb; /* Total accumilated purr for process */
- unsigned long vdso_base; /* base of the vDSO library */
- unsigned long dabr; /* Data address breakpoint register */
-#ifdef CONFIG_ALTIVEC
- /* Complete AltiVec register set */
- vector128 vr[32] __attribute((aligned(16)));
- /* AltiVec status */
- vector128 vscr __attribute((aligned(16)));
- unsigned long vrsave;
- int used_vr; /* set if process has used altivec */
-#endif /* CONFIG_ALTIVEC */
-};
-
-#define ARCH_MIN_TASKALIGN 16
-
-#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
-
-#define INIT_THREAD { \
- .ksp = INIT_SP, \
- .regs = (struct pt_regs *)INIT_SP - 1, \
- .fs = KERNEL_DS, \
- .fpr = {0}, \
- .fpscr = 0, \
- .fpexc_mode = MSR_FE0|MSR_FE1, \
-}
-
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-#define thread_saved_pc(tsk) \
- ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-
-unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
-
-/* Get/set floating-point exception mode */
-#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
-#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
-
-extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
-extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
-
-static inline unsigned int __unpack_fe01(unsigned long msr_bits)
-{
- return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
-}
-
-static inline unsigned long __pack_fe01(unsigned int fpmode)
-{
- return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
-}
-
-#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
-
-/*
- * Prefetch macros.
- */
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-static inline void prefetch(const void *x)
-{
- if (unlikely(!x))
- return;
-
- __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
-}
-
-static inline void prefetchw(const void *x)
-{
- if (unlikely(!x))
- return;
-
- __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
-}
-
-#define spin_lock_prefetch(x) prefetchw(x)
-
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
-static inline void ppc64_runlatch_on(void)
-{
- unsigned long ctrl;
-
- if (cpu_has_feature(CPU_FTR_CTRL)) {
- ctrl = mfspr(SPRN_CTRLF);
- ctrl |= CTRL_RUNLATCH;
- mtspr(SPRN_CTRLT, ctrl);
- }
-}
-
-static inline void ppc64_runlatch_off(void)
-{
- unsigned long ctrl;
-
- if (cpu_has_feature(CPU_FTR_CTRL)) {
- ctrl = mfspr(SPRN_CTRLF);
- ctrl &= ~CTRL_RUNLATCH;
- mtspr(SPRN_CTRLT, ctrl);
- }
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#define RUNLATCH_ON(REG) \
-BEGIN_FTR_SECTION \
- mfspr (REG),SPRN_CTRLF; \
- ori (REG),(REG),CTRL_RUNLATCH; \
- mtspr SPRN_CTRLT,(REG); \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
-#endif
-
-/*
- * Number of entries in the SLB. If this ever changes we should handle
- * it with a use a cpu feature fixup.
- */
-#define SLB_NUM_ENTRIES 64
-
-#endif /* __ASM_PPC64_PROCESSOR_H */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 1fbdc9f0590..c04d62c592a 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -144,13 +144,6 @@ struct thread_struct;
extern struct task_struct * _switch(struct thread_struct *prev,
struct thread_struct *next);
-static inline int __is_processor(unsigned long pv)
-{
- unsigned long pvr;
- asm("mfspr %0, 0x11F" : "=r" (pvr));
- return(PVR_VER(pvr) == pv);
-}
-
/*
* Atomic exchange
*