diff options
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/atomic.h | 48 | ||||
-rw-r--r-- | arch/powerpc/include/asm/bitops.h | 12 | ||||
-rw-r--r-- | arch/powerpc/include/asm/floppy.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/futex.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm.h | 8 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/lv1call.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg_booke.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/sections.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/synch.h | 9 | ||||
-rw-r--r-- | arch/powerpc/include/asm/xics.h | 4 |
11 files changed, 48 insertions, 52 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index e2a4c26ad37..02e41b53488 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 # atomic_add_return\n\ add %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 # atomic_sub_return\n\ subf %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # atomic_inc_return\n\ addic %0,%0,1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # atomic_dec_return\n\ addic %0,%0,-1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) int t; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # __atomic_add_unless\n\ cmpw 0,%0,%3 \n\ beq- 2f \n\ @@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) PPC405_ERR77(0,%2) " stwcx. %0,0,%1 \n\ bne- 1b \n" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER " subf %0,%2,%0 \n\ 2:" : "=&r" (t) @@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) int t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ cmpwi %0,1\n\ addi %0,%0,-1\n\ @@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&b" (t) : "r" (&v->counter) @@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 # atomic64_add_return\n\ add %0,%1,%0\n\ stdcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 # atomic64_sub_return\n\ subf %0,%1,%0\n\ stdcx. %0,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # atomic64_inc_return\n\ addic %0,%0,1\n\ stdcx. %0,0,%1 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # atomic64_dec_return\n\ addic %0,%0,-1\n\ stdcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) long t; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ addic. %0,%0,-1\n\ blt- 2f\n\ stdcx. %0,0,%1\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (t) : "r" (&v->counter) @@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) long t; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%1 # __atomic_add_unless\n\ cmpd 0,%0,%3 \n\ beq- 2f \n\ add %0,%2,%0 \n" " stdcx. %0,0,%1 \n\ bne- 1b \n" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER " subf %0,%2,%0 \n\ 2:" : "=&r" (t) diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index e137afcc10f..efdc92618b3 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \ return (old & mask); \ } -DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, - PPC_ACQUIRE_BARRIER, 0) +DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) DEFINE_TESTOP(test_and_set_bits_lock, or, "", PPC_ACQUIRE_BARRIER, 1) -DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, - PPC_ACQUIRE_BARRIER, 0) -DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, - PPC_ACQUIRE_BARRIER, 0) +DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) +DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h index 24bd34c57e9..936a904ae78 100644 --- a/arch/powerpc/include/asm/floppy.h +++ b/arch/powerpc/include/asm/floppy.h @@ -108,10 +108,10 @@ static int fd_request_irq(void) { if (can_use_virtual_dma) return request_irq(FLOPPY_IRQ, floppy_hardint, - IRQF_DISABLED, "floppy", NULL); + 0, "floppy", NULL); else return request_irq(FLOPPY_IRQ, floppy_interrupt, - IRQF_DISABLED, "floppy", NULL); + 0, "floppy", NULL); } static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index c94e4a3fe2e..2a9cf845473 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -11,12 +11,13 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ - PPC_RELEASE_BARRIER \ + PPC_ATOMIC_ENTRY_BARRIER \ "1: lwarx %0,0,%2\n" \ insn \ PPC405_ERR77(0, %2) \ "2: stwcx. %1,0,%2\n" \ "bne- 1b\n" \ + PPC_ATOMIC_EXIT_BARRIER \ "li %1,0\n" \ "3: .section .fixup,\"ax\"\n" \ "4: li %1,%3\n" \ @@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ cmpw 0,%1,%4\n\ bne- 3f\n" PPC405_ERR77(0,%3) "2: stwcx. %5,0,%3\n\ bne- 1b\n" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "3: .section .fixup,\"ax\"\n\ 4: li %0,%6\n\ b 3b\n\ diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index 08fe69edcd1..0ad432bc81d 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -149,12 +149,6 @@ struct kvm_regs { #define KVM_SREGS_E_UPDATE_DBSR (1 << 3) /* - * Book3S special bits to indicate contents in the struct by maintaining - * backwards compatibility with older structs. If adding a new field, - * please make sure to add a flag for that new field */ -#define KVM_SREGS_S_HIOR (1 << 0) - -/* * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a * previous KVM_GET_REGS. * @@ -179,8 +173,6 @@ struct kvm_sregs { __u64 ibat[8]; __u64 dbat[8]; } ppc32; - __u64 flags; /* KVM_SREGS_S_ */ - __u64 hior; } s; struct { union { diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index a384ffdf33d..d4df013ad77 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s { #endif int context_id[SID_CONTEXTS]; - bool hior_sregs; /* HIOR is set by SREGS, not PVR */ - struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h index 9cd5fc828a3..f77c708c67a 100644 --- a/arch/powerpc/include/asm/lv1call.h +++ b/arch/powerpc/include/asm/lv1call.h @@ -316,7 +316,7 @@ LV1_CALL(gpu_context_free, 1, 0, 218 ) LV1_CALL(gpu_context_iomap, 5, 0, 221 ) LV1_CALL(gpu_context_attribute, 6, 0, 225 ) LV1_CALL(gpu_context_intr, 1, 1, 227 ) -LV1_CALL(gpu_attribute, 5, 0, 228 ) +LV1_CALL(gpu_attribute, 3, 0, 228 ) LV1_CALL(get_rtc, 0, 2, 232 ) LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 ) LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 ) diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 28cdbd9f399..03c48e819c8 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -31,7 +31,7 @@ #define MSR_ MSR_ME | MSR_CE #define MSR_KERNEL MSR_ | MSR_64BIT -#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE +#define MSR_USER32 MSR_ | MSR_PR | MSR_EE #define MSR_USER64 MSR_USER32 | MSR_64BIT #elif defined (CONFIG_40x) #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 6fbce725c71..a0f358d4a00 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -8,7 +8,7 @@ #ifdef __powerpc64__ -extern char _end[]; +extern char __end_interrupts[]; static inline int in_kernel_text(unsigned long addr) { diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index d7cab44643c..e682a7143ed 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -13,6 +13,7 @@ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; extern void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end); +extern void do_final_fixups(void); static inline void eieio(void) { @@ -41,11 +42,15 @@ static inline void isync(void) START_LWSYNC_SECTION(97); \ isync; \ MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); -#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) -#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" +#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) +#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" +#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n" +#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" #else #define PPC_ACQUIRE_BARRIER #define PPC_RELEASE_BARRIER +#define PPC_ATOMIC_ENTRY_BARRIER +#define PPC_ATOMIC_EXIT_BARRIER #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index bd6c401c0ee..c48de98ba94 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -15,8 +15,8 @@ #define DEFAULT_PRIORITY 5 /* - * Mark IPIs as higher priority so we can take them inside interrupts that - * arent marked IRQF_DISABLED + * Mark IPIs as higher priority so we can take them inside interrupts + * FIXME: still true now? */ #define IPI_PRIORITY 4 |