From dbc895f95500a73ebf1ff12fe85f2e2b3790f52f Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Wed, 7 Jan 2009 23:14:39 +0800 Subject: Blackfin arch: smp patch cleanup from LKML review 1. Use inline get_l1_... functions instead of macro 2. Fix compile issue about smp barrier functions Signed-off-by: Graf Yang Signed-off-by: Bryan Wu --- arch/blackfin/include/asm/mem_map.h | 75 +++++++++++++++++++++++++++++++++++++ arch/blackfin/include/asm/smp.h | 2 + arch/blackfin/include/asm/system.h | 6 ++- 3 files changed, 81 insertions(+), 2 deletions(-) (limited to 'arch/blackfin/include') diff --git a/arch/blackfin/include/asm/mem_map.h b/arch/blackfin/include/asm/mem_map.h index 88d04a70770..e92b31051bb 100644 --- a/arch/blackfin/include/asm/mem_map.h +++ b/arch/blackfin/include/asm/mem_map.h @@ -9,4 +9,79 @@ #include +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_SMP +static inline ulong get_l1_scratch_start_cpu(int cpu) +{ + return (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START; +} +static inline ulong get_l1_code_start_cpu(int cpu) +{ + return (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START; +} +static inline ulong get_l1_data_a_start_cpu(int cpu) +{ + return (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START; +} +static inline ulong get_l1_data_b_start_cpu(int cpu) +{ + return (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START; +} + +static inline ulong get_l1_scratch_start(void) +{ + return get_l1_scratch_start_cpu(blackfin_core_id()); +} +static inline ulong get_l1_code_start(void) +{ + return get_l1_code_start_cpu(blackfin_core_id()); +} +static inline ulong get_l1_data_a_start(void) +{ + return get_l1_data_a_start_cpu(blackfin_core_id()); +} +static inline ulong get_l1_data_b_start(void) +{ + return get_l1_data_b_start_cpu(blackfin_core_id()); +} + +#else /* !CONFIG_SMP */ + +static inline ulong get_l1_scratch_start_cpu(int cpu) +{ + return L1_SCRATCH_START; +} +static inline ulong get_l1_code_start_cpu(int cpu) +{ + return L1_CODE_START; +} +static inline ulong get_l1_data_a_start_cpu(int cpu) +{ + return L1_DATA_A_START; +} +static inline ulong get_l1_data_b_start_cpu(int cpu) +{ + return L1_DATA_B_START; +} +static inline ulong get_l1_scratch_start(void) +{ + return get_l1_scratch_start_cpu(0); +} +static inline ulong get_l1_code_start(void) +{ + return get_l1_code_start_cpu(0); +} +static inline ulong get_l1_data_a_start(void) +{ + return get_l1_data_a_start_cpu(0); +} +static inline ulong get_l1_data_b_start(void) +{ + return get_l1_data_b_start_cpu(0); +} + +#endif /* CONFIG_SMP */ +#endif /* __ASSEMBLY__ */ + #endif /* _MEM_MAP_H_ */ diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h index 233cb8c3cfb..118deeeae7c 100644 --- a/arch/blackfin/include/asm/smp.h +++ b/arch/blackfin/include/asm/smp.h @@ -32,6 +32,8 @@ #define raw_smp_processor_id() blackfin_core_id() +extern char coreb_trampoline_start, coreb_trampoline_end; + struct corelock_slot { int lock; }; diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index e8bcfa4ee5c..dea92037dff 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h @@ -66,10 +66,13 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, # define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) # define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) # define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) +#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) + #else # define smp_mb() barrier() # define smp_rmb() barrier() # define smp_wmb() barrier() +#define smp_read_barrier_depends() barrier() #endif static inline unsigned long __xchg(unsigned long x, volatile void *ptr, @@ -120,8 +123,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) -#define smp_read_barrier_depends() smp_check_barrier() - #else /* !CONFIG_SMP */ #define smp_mb() barrier() @@ -192,6 +193,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, */ #include +#include asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); -- cgit v1.2.3-70-g09d2