summaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/atomic.h38
-rw-r--r--include/asm-powerpc/cputable.h38
-rw-r--r--include/asm-powerpc/cputime.h202
-rw-r--r--include/asm-powerpc/firmware.h16
-rw-r--r--include/asm-powerpc/irq.h6
-rw-r--r--include/asm-powerpc/iseries/mf.h7
-rw-r--r--include/asm-powerpc/lmb.h19
-rw-r--r--include/asm-powerpc/mmu.h1
-rw-r--r--include/asm-powerpc/paca.h7
-rw-r--r--include/asm-powerpc/percpu.h7
-rw-r--r--include/asm-powerpc/pgtable-4k.h11
-rw-r--r--include/asm-powerpc/pgtable.h9
-rw-r--r--include/asm-powerpc/ppc_asm.h42
-rw-r--r--include/asm-powerpc/processor.h1
-rw-r--r--include/asm-powerpc/prom.h6
-rw-r--r--include/asm-powerpc/rwsem.h2
-rw-r--r--include/asm-powerpc/synch.h2
-rw-r--r--include/asm-powerpc/system.h6
-rw-r--r--include/asm-powerpc/time.h15
19 files changed, 358 insertions, 77 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 147a38dcc76..bb3c0ab7e66 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -8,6 +8,7 @@
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
+#include <linux/compiler.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
@@ -176,20 +177,29 @@ static __inline__ int atomic_dec_return(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int t;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # atomic_add_unless\n\
+ cmpw 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%1 \n\
+ bne- 1b \n"
+ ISYNC_ON_SMP
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 5638518968c..fe45f6f3a4b 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -102,38 +102,40 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#ifdef __powerpc64__
/* Add the 64b processor unique features in the top half of the word */
-#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
-#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
-#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
-#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
-#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
-#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
+#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
+#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
+#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
+#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
+#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
#define CPU_FTR_CTRL ASM_CONST(0x0000008000000000)
-#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
-#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
+#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000)
#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000)
+#define CPU_FTR_PURR ASM_CONST(0x0000400000000000)
#else
/* ensure on 32b processors the flags are available for compiling but
* don't do anything */
-#define CPU_FTR_SLB ASM_CONST(0x0)
-#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
-#define CPU_FTR_TLBIEL ASM_CONST(0x0)
-#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
-#define CPU_FTR_IABR ASM_CONST(0x0)
-#define CPU_FTR_MMCRA ASM_CONST(0x0)
+#define CPU_FTR_SLB ASM_CONST(0x0)
+#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
+#define CPU_FTR_TLBIEL ASM_CONST(0x0)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
+#define CPU_FTR_IABR ASM_CONST(0x0)
+#define CPU_FTR_MMCRA ASM_CONST(0x0)
#define CPU_FTR_CTRL ASM_CONST(0x0)
-#define CPU_FTR_SMT ASM_CONST(0x0)
-#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
+#define CPU_FTR_SMT ASM_CONST(0x0)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0)
+#define CPU_FTR_PURR ASM_CONST(0x0)
#endif
#ifndef __ASSEMBLY__
@@ -318,7 +320,7 @@ enum {
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
CPU_FTR_MMCRA | CPU_FTR_SMT |
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
- CPU_FTR_MMCRA_SIHV,
+ CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR,
CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT |
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
index 6d68ad7e0ea..a21185d4788 100644
--- a/include/asm-powerpc/cputime.h
+++ b/include/asm-powerpc/cputime.h
@@ -1 +1,203 @@
+/*
+ * Definitions for measuring cputime on powerpc machines.
+ *
+ * Copyright (C) 2006 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
+ * the same units as the timebase. Otherwise we measure cpu time
+ * in jiffies using the generic definitions.
+ */
+
+#ifndef __POWERPC_CPUTIME_H
+#define __POWERPC_CPUTIME_H
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#include <asm-generic/cputime.h>
+#else
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+#include <asm/param.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero ((cputime_t)0)
+#define cputime_max ((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+
+#define cputime64_zero ((cputime64_t)0)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime_to_cputime64(__ct) (__ct)
+
+#ifdef __KERNEL__
+
+/*
+ * Convert cputime <-> jiffies
+ */
+extern u64 __cputime_jiffies_factor;
+
+static inline unsigned long cputime_to_jiffies(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_jiffies_factor);
+}
+
+static inline cputime_t jiffies_to_cputime(const unsigned long jif)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = jif % HZ;
+ sec = jif / HZ;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, HZ);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+static inline u64 cputime64_to_jiffies64(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_jiffies_factor);
+}
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+extern u64 __cputime_msec_factor;
+
+static inline unsigned long cputime_to_msecs(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_msec_factor);
+}
+
+static inline cputime_t msecs_to_cputime(const unsigned long ms)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = ms % 1000;
+ sec = ms / 1000;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, 1000);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+/*
+ * Convert cputime <-> seconds
+ */
+extern u64 __cputime_sec_factor;
+
+static inline unsigned long cputime_to_secs(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_sec_factor);
+}
+
+static inline cputime_t secs_to_cputime(const unsigned long sec)
+{
+ return (cputime_t) sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> timespec
+ */
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
+{
+ u64 x = ct;
+ unsigned int frac;
+
+ frac = do_div(x, tb_ticks_per_sec);
+ p->tv_sec = x;
+ x = (u64) frac * 1000000000;
+ do_div(x, tb_ticks_per_sec);
+ p->tv_nsec = x;
+}
+
+static inline cputime_t timespec_to_cputime(const struct timespec *p)
+{
+ cputime_t ct;
+
+ ct = (u64) p->tv_nsec * tb_ticks_per_sec;
+ do_div(ct, 1000000000);
+ return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> timeval
+ */
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
+{
+ u64 x = ct;
+ unsigned int frac;
+
+ frac = do_div(x, tb_ticks_per_sec);
+ p->tv_sec = x;
+ x = (u64) frac * 1000000;
+ do_div(x, tb_ticks_per_sec);
+ p->tv_usec = x;
+}
+
+static inline cputime_t timeval_to_cputime(const struct timeval *p)
+{
+ cputime_t ct;
+
+ ct = (u64) p->tv_usec * tb_ticks_per_sec;
+ do_div(ct, 1000000);
+ return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
+ */
+extern u64 __cputime_clockt_factor;
+
+static inline unsigned long cputime_to_clock_t(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_clockt_factor);
+}
+
+static inline cputime_t clock_t_to_cputime(const unsigned long clk)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = clk % USER_HZ;
+ sec = clk / USER_HZ;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, USER_HZ);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* __POWERPC_CPUTIME_H */
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index f804b34cf06..ce3788224ed 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -41,6 +41,7 @@
#define FW_FEATURE_MULTITCE (1UL<<19)
#define FW_FEATURE_SPLPAR (1UL<<20)
#define FW_FEATURE_ISERIES (1UL<<21)
+#define FW_FEATURE_LPAR (1UL<<22)
enum {
#ifdef CONFIG_PPC64
@@ -51,10 +52,10 @@ enum {
FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
- FW_FEATURE_SPLPAR,
+ FW_FEATURE_SPLPAR | FW_FEATURE_LPAR,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES,
- FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES,
+ FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
+ FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
FW_FEATURE_POSSIBLE =
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
@@ -89,15 +90,6 @@ static inline unsigned long firmware_has_feature(unsigned long feature)
(FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature);
}
-#ifdef CONFIG_PPC_PSERIES
-typedef struct {
- unsigned long val;
- char * name;
-} firmware_feature_t;
-
-extern firmware_feature_t firmware_features_table[];
-#endif
-
extern void system_reset_fwnmi(void);
extern void machine_check_fwnmi(void);
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index 8eb7e857ec4..51f87d9993b 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -479,6 +479,10 @@ extern int distribute_irqs;
struct irqaction;
struct pt_regs;
+#define __ARCH_HAS_DO_SOFTIRQ
+
+extern void __do_softirq(void);
+
#ifdef CONFIG_IRQSTACKS
/*
* Per-cpu stacks for handling hard and soft interrupts.
@@ -491,8 +495,6 @@ extern void call_do_softirq(struct thread_info *tp);
extern int call___do_IRQ(int irq, struct pt_regs *regs,
struct thread_info *tp);
-#define __ARCH_HAS_DO_SOFTIRQ
-
#else
#define irq_ctx_init()
diff --git a/include/asm-powerpc/iseries/mf.h b/include/asm-powerpc/iseries/mf.h
index 857e5202fc7..eb851a9c9e5 100644
--- a/include/asm-powerpc/iseries/mf.h
+++ b/include/asm-powerpc/iseries/mf.h
@@ -41,16 +41,11 @@ extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
unsigned count, MFCompleteHandler hdlr, void *userToken);
extern void mf_power_off(void);
-extern void mf_reboot(void);
+extern void mf_reboot(char *cmd);
extern void mf_display_src(u32 word);
extern void mf_display_progress(u16 value);
-extern void mf_clear_src(void);
extern void mf_init(void);
-extern int mf_get_rtc(struct rtc_time *tm);
-extern int mf_get_boot_rtc(struct rtc_time *tm);
-extern int mf_set_rtc(struct rtc_time *tm);
-
#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h
index d3546c4c9f4..0c5880f7022 100644
--- a/include/asm-powerpc/lmb.h
+++ b/include/asm-powerpc/lmb.h
@@ -19,8 +19,6 @@
#define MAX_LMB_REGIONS 128
-#define LMB_ALLOC_ANYWHERE 0
-
struct lmb_property {
unsigned long base;
unsigned long size;
@@ -43,20 +41,19 @@ extern struct lmb lmb;
extern void __init lmb_init(void);
extern void __init lmb_analyze(void);
-extern long __init lmb_add(unsigned long, unsigned long);
-extern long __init lmb_reserve(unsigned long, unsigned long);
-extern unsigned long __init lmb_alloc(unsigned long, unsigned long);
-extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
- unsigned long);
+extern long __init lmb_add(unsigned long base, unsigned long size);
+extern long __init lmb_reserve(unsigned long base, unsigned long size);
+extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align);
+extern unsigned long __init lmb_alloc_base(unsigned long size,
+ unsigned long align, unsigned long max_addr);
+extern unsigned long __init __lmb_alloc_base(unsigned long size,
+ unsigned long align, unsigned long max_addr);
extern unsigned long __init lmb_phys_mem_size(void);
extern unsigned long __init lmb_end_of_DRAM(void);
-extern unsigned long __init lmb_abs_to_phys(unsigned long);
-extern void __init lmb_enforce_memory_limit(unsigned long);
+extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
extern void lmb_dump_all(void);
-extern unsigned long io_hole_start;
-
static inline unsigned long
lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
{
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index b0b9a3f8cdc..31f721994bd 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -236,7 +236,6 @@ extern void htab_initialize_secondary(void);
extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
extern void hpte_init_iSeries(void);
-extern void mm_init_ppc64(void);
extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn,
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index c9add8f1ad9..4465b95ebef 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -54,7 +54,7 @@ struct paca_struct {
#endif /* CONFIG_PPC_ISERIES */
/*
- * MAGIC: the spinlock functions in arch/ppc64/lib/locks.c
+ * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
* load lock_token and paca_index with a single lwz
* instruction. They must travel together and be properly
* aligned.
@@ -96,6 +96,11 @@ struct paca_struct {
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u8 proc_enabled; /* irq soft-enable flag */
+
+ /* Stuff for accurate time accounting */
+ u64 user_time; /* accumulated usermode TB ticks */
+ u64 system_time; /* accumulated system TB ticks */
+ u64 startpurr; /* PURR/TB value snapshot */
};
extern struct paca_struct paca[];
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index e31922c50e5..464301cd0d0 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -27,10 +27,9 @@
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for (__i = 0; __i < NR_CPUS; __i++) \
- if (cpu_possible(__i)) \
- memcpy((pcpudst)+__per_cpu_offset(__i), \
- (src), (size)); \
+ for_each_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset(__i), \
+ (src), (size)); \
} while (0)
extern void setup_per_cpu_areas(void);
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index 80a7832d272..b2e18629932 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -62,9 +62,14 @@
/* shift to put page number into pte */
#define PTE_RPN_SHIFT (17)
-#define __real_pte(e,p) ((real_pte_t)(e))
-#define __rpte_to_pte(r) (r)
-#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12)
+#ifdef STRICT_MM_TYPECHECKS
+#define __real_pte(e,p) ((real_pte_t){(e)})
+#define __rpte_to_pte(r) ((r).pte)
+#else
+#define __real_pte(e,p) (e)
+#define __rpte_to_pte(r) (__pte(r))
+#endif
+#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 185ee15963a..e9f1f4627e6 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -188,9 +188,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
+
#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (pmd_val(pmd) == 0)
+#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
+ || (pmd_val(pmd) & PMD_BAD_BITS))
#define pmd_present(pmd) (pmd_val(pmd) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
@@ -198,7 +202,8 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) ((pud_val(pud)) == 0)
+#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
+ || (pud_val(pud) & PUD_BAD_BITS))
#define pud_present(pud) (pud_val(pud) != 0)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
#define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index ab8688d3902..dd1c0a913d5 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -15,6 +15,48 @@
#define SZL (BITS_PER_LONG/8)
/*
+ * Stuff for accurate CPU time accounting.
+ * These macros handle transitions between user and system state
+ * in exception entry and exit and accumulate time to the
+ * user_time and system_time fields in the paca.
+ */
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
+#define ACCOUNT_CPU_USER_EXIT(ra, rb)
+#else
+#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
+ beq 2f; /* if from kernel mode */ \
+BEGIN_FTR_SECTION; \
+ mfspr ra,SPRN_PURR; /* get processor util. reg */ \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
+BEGIN_FTR_SECTION; \
+ mftb ra; /* or get TB if no PURR */ \
+END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
+ ld rb,PACA_STARTPURR(r13); \
+ std ra,PACA_STARTPURR(r13); \
+ subf rb,rb,ra; /* subtract start value */ \
+ ld ra,PACA_USER_TIME(r13); \
+ add ra,ra,rb; /* add on to user time */ \
+ std ra,PACA_USER_TIME(r13); \
+2:
+
+#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
+BEGIN_FTR_SECTION; \
+ mfspr ra,SPRN_PURR; /* get processor util. reg */ \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
+BEGIN_FTR_SECTION; \
+ mftb ra; /* or get TB if no PURR */ \
+END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
+ ld rb,PACA_STARTPURR(r13); \
+ std ra,PACA_STARTPURR(r13); \
+ subf rb,rb,ra; /* subtract start value */ \
+ ld ra,PACA_SYSTEM_TIME(r13); \
+ add ra,ra,rb; /* add on to user time */ \
+ std ra,PACA_SYSTEM_TIME(r13);
+#endif
+
+/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index 415fa393b00..1c64a211cf1 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -52,7 +52,6 @@
#ifdef __KERNEL__
#define platform_is_pseries() (_machine == PLATFORM_PSERIES || \
_machine == PLATFORM_PSERIES_LPAR)
-#define platform_is_lpar() (!!(_machine & PLATFORM_LPAR))
#if defined(CONFIG_PPC_MULTIPLATFORM)
extern int _machine;
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index cbd297f44cc..782e13a070a 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -126,8 +126,14 @@ extern struct device_node *find_all_nodes(void);
/* New style node lookup */
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
+#define for_each_node_by_name(dn, name) \
+ for (dn = of_find_node_by_name(NULL, name); dn; \
+ dn = of_find_node_by_name(dn, name))
extern struct device_node *of_find_node_by_type(struct device_node *from,
const char *type);
+#define for_each_node_by_type(dn, type) \
+ for (dn = of_find_node_by_type(NULL, type); dn; \
+ dn = of_find_node_by_type(dn, type))
extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat);
extern struct device_node *of_find_node_by_path(const char *path);
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h
index 79bae4933b7..2c2fe964759 100644
--- a/include/asm-powerpc/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -4,7 +4,7 @@
#ifdef __KERNEL__
/*
- * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
+ * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff
* in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
* by Paul Mackerras <paulus@samba.org>.
*/
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
index c90d9d9aae7..2cda3c38a9f 100644
--- a/include/asm-powerpc/synch.h
+++ b/include/asm-powerpc/synch.h
@@ -15,7 +15,7 @@
#endif
#ifdef CONFIG_SMP
-#define ISYNC_ON_SMP "\n\tisync"
+#define ISYNC_ON_SMP "\n\tisync\n"
#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
#else
#define ISYNC_ON_SMP
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d9bf53653b1..65f5a7b2646 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -171,6 +171,8 @@ extern u32 booke_wdt_period;
/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
extern unsigned char e2a(unsigned char);
+extern unsigned char* strne2a(unsigned char *dest,
+ const unsigned char *src, size_t n);
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
@@ -424,5 +426,9 @@ static inline void create_function_call(unsigned long addr, void * func)
create_branch(addr, func_addr, BRANCH_SET_LINK);
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index baddc9ab57a..912118db13a 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -41,6 +41,7 @@ extern time_t last_rtc_update;
extern void generic_calibrate_decr(void);
extern void wakeup_decrementer(void);
+extern void snapshot_timebase(void);
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
extern unsigned long ppc_proc_freq;
@@ -221,5 +222,19 @@ struct cpu_usage {
DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_process_vtime(struct task_struct *tsk);
+#else
+#define account_process_vtime(tsk) do { } while (0)
+#endif
+
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
+extern void calculate_steal_time(void);
+extern void snapshot_timebases(void);
+#else
+#define calculate_steal_time() do { } while (0)
+#define snapshot_timebases() do { } while (0)
+#endif
+
#endif /* __KERNEL__ */
#endif /* __PPC64_TIME_H */