diff options
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/atomic.h | 38 | ||||
-rw-r--r-- | include/asm-powerpc/cputable.h | 4 | ||||
-rw-r--r-- | include/asm-powerpc/cputime.h | 202 | ||||
-rw-r--r-- | include/asm-powerpc/firmware.h | 9 | ||||
-rw-r--r-- | include/asm-powerpc/irq.h | 6 | ||||
-rw-r--r-- | include/asm-powerpc/lmb.h | 17 | ||||
-rw-r--r-- | include/asm-powerpc/paca.h | 7 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-4k.h | 11 | ||||
-rw-r--r-- | include/asm-powerpc/ppc_asm.h | 42 | ||||
-rw-r--r-- | include/asm-powerpc/rwsem.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/synch.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/system.h | 4 | ||||
-rw-r--r-- | include/asm-powerpc/time.h | 15 |
13 files changed, 318 insertions, 41 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 147a38dcc76..bb3c0ab7e66 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -8,6 +8,7 @@ typedef struct { volatile int counter; } atomic_t; #ifdef __KERNEL__ +#include <linux/compiler.h> #include <asm/synch.h> #include <asm/asm-compat.h> @@ -176,20 +177,29 @@ static __inline__ int atomic_dec_return(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int t; + + __asm__ __volatile__ ( + LWSYNC_ON_SMP +"1: lwarx %0,0,%1 # atomic_add_unless\n\ + cmpw 0,%0,%3 \n\ + beq- 2f \n\ + add %0,%2,%0 \n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%1 \n\ + bne- 1b \n" + ISYNC_ON_SMP +" subf %0,%2,%0 \n\ +2:" + : "=&r" (t) + : "r" (&v->counter), "r" (a), "r" (u) + : "cc", "memory"); + + return t != u; +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 5638518968c..aaaeb452770 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -119,6 +119,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) #define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000) #define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000) +#define CPU_FTR_PURR ASM_CONST(0x0000400000000000) #else /* ensure on 32b processors the flags are available for compiling but * don't do anything */ @@ -134,6 +135,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0) #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0) #define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0) +#define CPU_FTR_PURR ASM_CONST(0x0) #endif #ifndef __ASSEMBLY__ @@ -318,7 +320,7 @@ enum { CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | - CPU_FTR_MMCRA_SIHV, + CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR, CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h index 6d68ad7e0ea..a21185d4788 100644 --- a/include/asm-powerpc/cputime.h +++ b/include/asm-powerpc/cputime.h @@ -1 +1,203 @@ +/* + * Definitions for measuring cputime on powerpc machines. + * + * Copyright (C) 2006 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in + * the same units as the timebase. Otherwise we measure cpu time + * in jiffies using the generic definitions. + */ + +#ifndef __POWERPC_CPUTIME_H +#define __POWERPC_CPUTIME_H + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING #include <asm-generic/cputime.h> +#else + +#include <linux/types.h> +#include <linux/time.h> +#include <asm/div64.h> +#include <asm/time.h> +#include <asm/param.h> + +typedef u64 cputime_t; +typedef u64 cputime64_t; + +#define cputime_zero ((cputime_t)0) +#define cputime_max ((~((cputime_t)0) >> 1) - 1) +#define cputime_add(__a, __b) ((__a) + (__b)) +#define cputime_sub(__a, __b) ((__a) - (__b)) +#define cputime_div(__a, __n) ((__a) / (__n)) +#define cputime_halve(__a) ((__a) >> 1) +#define cputime_eq(__a, __b) ((__a) == (__b)) +#define cputime_gt(__a, __b) ((__a) > (__b)) +#define cputime_ge(__a, __b) ((__a) >= (__b)) +#define cputime_lt(__a, __b) ((__a) < (__b)) +#define cputime_le(__a, __b) ((__a) <= (__b)) + +#define cputime64_zero ((cputime64_t)0) +#define cputime64_add(__a, __b) ((__a) + (__b)) +#define cputime_to_cputime64(__ct) (__ct) + +#ifdef __KERNEL__ + +/* + * Convert cputime <-> jiffies + */ +extern u64 __cputime_jiffies_factor; + +static inline unsigned long cputime_to_jiffies(const cputime_t ct) +{ + return mulhdu(ct, __cputime_jiffies_factor); +} + +static inline cputime_t jiffies_to_cputime(const unsigned long jif) +{ + cputime_t ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = jif % HZ; + sec = jif / HZ; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, HZ); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; +} + +static inline u64 cputime64_to_jiffies64(const cputime_t ct) +{ + return mulhdu(ct, __cputime_jiffies_factor); +} + +/* + * Convert cputime <-> milliseconds + */ +extern u64 __cputime_msec_factor; + +static inline unsigned long cputime_to_msecs(const cputime_t ct) +{ + return mulhdu(ct, __cputime_msec_factor); +} + +static inline cputime_t msecs_to_cputime(const unsigned long ms) +{ + cputime_t ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = ms % 1000; + sec = ms / 1000; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, 1000); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; +} + +/* + * Convert cputime <-> seconds + */ +extern u64 __cputime_sec_factor; + +static inline unsigned long cputime_to_secs(const cputime_t ct) +{ + return mulhdu(ct, __cputime_sec_factor); +} + +static inline cputime_t secs_to_cputime(const unsigned long sec) +{ + return (cputime_t) sec * tb_ticks_per_sec; +} + +/* + * Convert cputime <-> timespec + */ +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) +{ + u64 x = ct; + unsigned int frac; + + frac = do_div(x, tb_ticks_per_sec); + p->tv_sec = x; + x = (u64) frac * 1000000000; + do_div(x, tb_ticks_per_sec); + p->tv_nsec = x; +} + +static inline cputime_t timespec_to_cputime(const struct timespec *p) +{ + cputime_t ct; + + ct = (u64) p->tv_nsec * tb_ticks_per_sec; + do_div(ct, 1000000000); + return ct + (u64) p->tv_sec * tb_ticks_per_sec; +} + +/* + * Convert cputime <-> timeval + */ +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) +{ + u64 x = ct; + unsigned int frac; + + frac = do_div(x, tb_ticks_per_sec); + p->tv_sec = x; + x = (u64) frac * 1000000; + do_div(x, tb_ticks_per_sec); + p->tv_usec = x; +} + +static inline cputime_t timeval_to_cputime(const struct timeval *p) +{ + cputime_t ct; + + ct = (u64) p->tv_usec * tb_ticks_per_sec; + do_div(ct, 1000000); + return ct + (u64) p->tv_sec * tb_ticks_per_sec; +} + +/* + * Convert cputime <-> clock_t (units of 1/USER_HZ seconds) + */ +extern u64 __cputime_clockt_factor; + +static inline unsigned long cputime_to_clock_t(const cputime_t ct) +{ + return mulhdu(ct, __cputime_clockt_factor); +} + +static inline cputime_t clock_t_to_cputime(const unsigned long clk) +{ + cputime_t ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = clk % USER_HZ; + sec = clk / USER_HZ; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, USER_HZ); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return ct; +} + +#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) + +#endif /* __KERNEL__ */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* __POWERPC_CPUTIME_H */ diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h index f804b34cf06..b7791a1b05d 100644 --- a/include/asm-powerpc/firmware.h +++ b/include/asm-powerpc/firmware.h @@ -89,15 +89,6 @@ static inline unsigned long firmware_has_feature(unsigned long feature) (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature); } -#ifdef CONFIG_PPC_PSERIES -typedef struct { - unsigned long val; - char * name; -} firmware_feature_t; - -extern firmware_feature_t firmware_features_table[]; -#endif - extern void system_reset_fwnmi(void); extern void machine_check_fwnmi(void); diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 8eb7e857ec4..51f87d9993b 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -479,6 +479,10 @@ extern int distribute_irqs; struct irqaction; struct pt_regs; +#define __ARCH_HAS_DO_SOFTIRQ + +extern void __do_softirq(void); + #ifdef CONFIG_IRQSTACKS /* * Per-cpu stacks for handling hard and soft interrupts. @@ -491,8 +495,6 @@ extern void call_do_softirq(struct thread_info *tp); extern int call___do_IRQ(int irq, struct pt_regs *regs, struct thread_info *tp); -#define __ARCH_HAS_DO_SOFTIRQ - #else #define irq_ctx_init() diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h index d3546c4c9f4..377ac1b23aa 100644 --- a/include/asm-powerpc/lmb.h +++ b/include/asm-powerpc/lmb.h @@ -19,8 +19,6 @@ #define MAX_LMB_REGIONS 128 -#define LMB_ALLOC_ANYWHERE 0 - struct lmb_property { unsigned long base; unsigned long size; @@ -43,15 +41,16 @@ extern struct lmb lmb; extern void __init lmb_init(void); extern void __init lmb_analyze(void); -extern long __init lmb_add(unsigned long, unsigned long); -extern long __init lmb_reserve(unsigned long, unsigned long); -extern unsigned long __init lmb_alloc(unsigned long, unsigned long); -extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long, - unsigned long); +extern long __init lmb_add(unsigned long base, unsigned long size); +extern long __init lmb_reserve(unsigned long base, unsigned long size); +extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align); +extern unsigned long __init lmb_alloc_base(unsigned long size, + unsigned long align, unsigned long max_addr); +extern unsigned long __init __lmb_alloc_base(unsigned long size, + unsigned long align, unsigned long max_addr); extern unsigned long __init lmb_phys_mem_size(void); extern unsigned long __init lmb_end_of_DRAM(void); -extern unsigned long __init lmb_abs_to_phys(unsigned long); -extern void __init lmb_enforce_memory_limit(unsigned long); +extern void __init lmb_enforce_memory_limit(unsigned long memory_limit); extern void lmb_dump_all(void); diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index c9add8f1ad9..4465b95ebef 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -54,7 +54,7 @@ struct paca_struct { #endif /* CONFIG_PPC_ISERIES */ /* - * MAGIC: the spinlock functions in arch/ppc64/lib/locks.c + * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c * load lock_token and paca_index with a single lwz * instruction. They must travel together and be properly * aligned. @@ -96,6 +96,11 @@ struct paca_struct { u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ u8 proc_enabled; /* irq soft-enable flag */ + + /* Stuff for accurate time accounting */ + u64 user_time; /* accumulated usermode TB ticks */ + u64 system_time; /* accumulated system TB ticks */ + u64 startpurr; /* PURR/TB value snapshot */ }; extern struct paca_struct paca[]; diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index 80a7832d272..b2e18629932 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h @@ -62,9 +62,14 @@ /* shift to put page number into pte */ #define PTE_RPN_SHIFT (17) -#define __real_pte(e,p) ((real_pte_t)(e)) -#define __rpte_to_pte(r) (r) -#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12) +#ifdef STRICT_MM_TYPECHECKS +#define __real_pte(e,p) ((real_pte_t){(e)}) +#define __rpte_to_pte(r) ((r).pte) +#else +#define __real_pte(e,p) (e) +#define __rpte_to_pte(r) (__pte(r)) +#endif +#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ do { \ diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index ab8688d3902..dd1c0a913d5 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h @@ -15,6 +15,48 @@ #define SZL (BITS_PER_LONG/8) /* + * Stuff for accurate CPU time accounting. + * These macros handle transitions between user and system state + * in exception entry and exit and accumulate time to the + * user_time and system_time fields in the paca. + */ + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#define ACCOUNT_CPU_USER_ENTRY(ra, rb) +#define ACCOUNT_CPU_USER_EXIT(ra, rb) +#else +#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \ + beq 2f; /* if from kernel mode */ \ +BEGIN_FTR_SECTION; \ + mfspr ra,SPRN_PURR; /* get processor util. reg */ \ +END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ +BEGIN_FTR_SECTION; \ + mftb ra; /* or get TB if no PURR */ \ +END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ + ld rb,PACA_STARTPURR(r13); \ + std ra,PACA_STARTPURR(r13); \ + subf rb,rb,ra; /* subtract start value */ \ + ld ra,PACA_USER_TIME(r13); \ + add ra,ra,rb; /* add on to user time */ \ + std ra,PACA_USER_TIME(r13); \ +2: + +#define ACCOUNT_CPU_USER_EXIT(ra, rb) \ +BEGIN_FTR_SECTION; \ + mfspr ra,SPRN_PURR; /* get processor util. reg */ \ +END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ +BEGIN_FTR_SECTION; \ + mftb ra; /* or get TB if no PURR */ \ +END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ + ld rb,PACA_STARTPURR(r13); \ + std ra,PACA_STARTPURR(r13); \ + subf rb,rb,ra; /* subtract start value */ \ + ld ra,PACA_SYSTEM_TIME(r13); \ + add ra,ra,rb; /* add on to user time */ \ + std ra,PACA_SYSTEM_TIME(r13); +#endif + +/* * Macros for storing registers into and loading registers from * exception frames. */ diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h index 79bae4933b7..2c2fe964759 100644 --- a/include/asm-powerpc/rwsem.h +++ b/include/asm-powerpc/rwsem.h @@ -4,7 +4,7 @@ #ifdef __KERNEL__ /* - * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff + * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h * by Paul Mackerras <paulus@samba.org>. */ diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h index c90d9d9aae7..2cda3c38a9f 100644 --- a/include/asm-powerpc/synch.h +++ b/include/asm-powerpc/synch.h @@ -15,7 +15,7 @@ #endif #ifdef CONFIG_SMP -#define ISYNC_ON_SMP "\n\tisync" +#define ISYNC_ON_SMP "\n\tisync\n" #define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" #else #define ISYNC_ON_SMP diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index d9bf53653b1..41b7a5b3d70 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -424,5 +424,9 @@ static inline void create_function_call(unsigned long addr, void * func) create_branch(addr, func_addr, BRANCH_SET_LINK); } +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void account_system_vtime(struct task_struct *); +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYSTEM_H */ diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index baddc9ab57a..912118db13a 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h @@ -41,6 +41,7 @@ extern time_t last_rtc_update; extern void generic_calibrate_decr(void); extern void wakeup_decrementer(void); +extern void snapshot_timebase(void); /* Some sane defaults: 125 MHz timebase, 1GHz processor */ extern unsigned long ppc_proc_freq; @@ -221,5 +222,19 @@ struct cpu_usage { DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void account_process_vtime(struct task_struct *tsk); +#else +#define account_process_vtime(tsk) do { } while (0) +#endif + +#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) +extern void calculate_steal_time(void); +extern void snapshot_timebases(void); +#else +#define calculate_steal_time() do { } while (0) +#define snapshot_timebases() do { } while (0) +#endif + #endif /* __KERNEL__ */ #endif /* __PPC64_TIME_H */ |