diff options
author | Anton Altaparmakov <aia21@cantab.net> | 2006-01-19 16:39:33 +0000 |
---|---|---|
committer | Anton Altaparmakov <aia21@cantab.net> | 2006-01-19 16:39:33 +0000 |
commit | 944d79559d154c12becde0dab327016cf438f46c (patch) | |
tree | 50c101806f4d3b6585222dda060559eb4f3e005a /include/asm-powerpc/system.h | |
parent | d087e4bdd24ebe3ae3d0b265b6573ec901af4b4b (diff) | |
parent | 0f36b018b2e314d45af86449f1a97facb1fbe300 (diff) |
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'include/asm-powerpc/system.h')
-rw-r--r-- | include/asm-powerpc/system.h | 29 |
1 files changed, 23 insertions, 6 deletions
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 5341b75c75c..d9bf53653b1 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -4,7 +4,6 @@ #ifndef _ASM_POWERPC_SYSTEM_H #define _ASM_POWERPC_SYSTEM_H -#include <linux/config.h> #include <linux/kernel.h> #include <asm/hw_irq.h> @@ -42,6 +41,7 @@ #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) +#ifdef __KERNEL__ #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() @@ -54,7 +54,6 @@ #define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ -#ifdef __KERNEL__ struct task_struct; struct pt_regs; @@ -134,6 +133,14 @@ extern int fix_alignment(struct pt_regs *); extern void cvt_fd(float *from, double *to, struct thread_struct *thread); extern void cvt_df(double *from, float *to, struct thread_struct *thread); +#ifndef CONFIG_SMP +extern void discard_lazy_cpu_state(void); +#else +static inline void discard_lazy_cpu_state(void) +{ +} +#endif + #ifdef CONFIG_ALTIVEC extern void flush_altivec_to_thread(struct task_struct *); #else @@ -176,6 +183,16 @@ struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern unsigned long memory_limit; @@ -195,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ @@ -215,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ @@ -270,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) unsigned int prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" @@ -294,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) unsigned long prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ |