diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/arch_timer.h | 14 | ||||
-rw-r--r-- | arch/arm/include/asm/smp_plat.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 51 | ||||
-rw-r--r-- | arch/arm/include/asm/tlb.h | 7 |
4 files changed, 45 insertions, 30 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index e406d575c94..5665134bfa3 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -17,7 +17,8 @@ int arch_timer_arch_init(void); * nicely work out which register we want, and chuck away the rest of * the code. At least it does so with a recent GCC (4.6.3). */ -static inline void arch_timer_reg_write(const int access, const int reg, u32 val) +static __always_inline +void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) { if (access == ARCH_TIMER_PHYS_ACCESS) { switch (reg) { @@ -28,9 +29,7 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); break; } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { + } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); @@ -44,7 +43,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val isb(); } -static inline u32 arch_timer_reg_read(const int access, const int reg) +static __always_inline +u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) { u32 val = 0; @@ -57,9 +57,7 @@ static inline u32 arch_timer_reg_read(const int access, const int reg) asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); break; } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { + } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd..a252c0bfacf 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) { return 1 << mpidr_hash.bits; } + +extern int platform_can_cpu_hotplug(void); + #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e..b07c09e5a0a 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) " subs %1, %0, %0, ror #16\n" " addeq %0, %0, %4\n" " strexeq %2, %0, [%3]" - : "=&r" (slock), "=&r" (contended), "=r" (res) + : "=&r" (slock), "=&r" (contended), "=&r" (res) : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); } while (res); @@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw) { - unsigned long tmp; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " teq %0, #0\n" + " strexeq %1, %3, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock), "r" (0x80000000) + : "cc"); + } while (res); - if (tmp == 0) { + if (!contended) { smp_mb(); return 1; } else { @@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned long tmp, tmp2 = 1; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%2]\n" -" adds %0, %0, #1\n" -" strexpl %1, %0, [%2]\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " adds %0, %0, #1\n" + " strexpl %1, %0, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock) + : "cc"); + } while (res); - smp_mb(); - return tmp2 == 0; + /* If the lock is negative, then it is already held for write. */ + if (contended < 0x80000000) { + smp_mb(); + return 1; + } else { + return 0; + } } /* read_can_lock - would read_trylock() succeed? */ diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e72..0baf7f0d939 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -43,6 +43,7 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; struct vm_area_struct *vma; + unsigned long start, end; unsigned long range_start; unsigned long range_end; unsigned int nr; @@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->vma = NULL; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; |