diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/io-64-nonatomic-hi-lo.h | 14 | ||||
-rw-r--r-- | include/asm-generic/io-64-nonatomic-lo-hi.h | 14 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 410 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 5 |
4 files changed, 373 insertions, 70 deletions
diff --git a/include/asm-generic/io-64-nonatomic-hi-lo.h b/include/asm-generic/io-64-nonatomic-hi-lo.h index a6806a94250..2e29d13fc15 100644 --- a/include/asm-generic/io-64-nonatomic-hi-lo.h +++ b/include/asm-generic/io-64-nonatomic-hi-lo.h @@ -4,8 +4,7 @@ #include <linux/io.h> #include <asm-generic/int-ll64.h> -#ifndef readq -static inline __u64 readq(const volatile void __iomem *addr) +static inline __u64 hi_lo_readq(const volatile void __iomem *addr) { const volatile u32 __iomem *p = addr; u32 low, high; @@ -15,14 +14,19 @@ static inline __u64 readq(const volatile void __iomem *addr) return low + ((u64)high << 32); } -#endif -#ifndef writeq -static inline void writeq(__u64 val, volatile void __iomem *addr) +static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) { writel(val >> 32, addr + 4); writel(val, addr); } + +#ifndef readq +#define readq hi_lo_readq +#endif + +#ifndef writeq +#define writeq hi_lo_writeq #endif #endif /* _ASM_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/asm-generic/io-64-nonatomic-lo-hi.h b/include/asm-generic/io-64-nonatomic-lo-hi.h index ca546b1ff8b..0efacff0a1c 100644 --- a/include/asm-generic/io-64-nonatomic-lo-hi.h +++ b/include/asm-generic/io-64-nonatomic-lo-hi.h @@ -4,8 +4,7 @@ #include <linux/io.h> #include <asm-generic/int-ll64.h> -#ifndef readq -static inline __u64 readq(const volatile void __iomem *addr) +static inline __u64 lo_hi_readq(const volatile void __iomem *addr) { const volatile u32 __iomem *p = addr; u32 low, high; @@ -15,14 +14,19 @@ static inline __u64 readq(const volatile void __iomem *addr) return low + ((u64)high << 32); } -#endif -#ifndef writeq -static inline void writeq(__u64 val, volatile void __iomem *addr) +static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) { writel(val, addr); writel(val >> 32, addr + 4); } + +#ifndef readq +#define readq lo_hi_readq +#endif + +#ifndef writeq +#define writeq lo_hi_writeq #endif #endif /* _ASM_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 0703aa75b5e..4d9f233c4ba 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -36,93 +36,385 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #endif /* - * Add a offset to a pointer but keep the pointer as is. - * - * Only S390 provides its own means of moving the pointer. + * Arch may define arch_raw_cpu_ptr() to provide more efficient address + * translations for raw_cpu_ptr(). */ -#ifndef SHIFT_PERCPU_PTR -/* Weird cast keeps both GCC and sparse happy. */ -#define SHIFT_PERCPU_PTR(__p, __offset) ({ \ - __verify_pcpu_ptr((__p)); \ - RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ -}) +#ifndef arch_raw_cpu_ptr +#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #endif -/* - * A percpu variable may point to a discarded regions. The following are - * established ways to produce a usable pointer from the percpu variable - * offset. - */ -#define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) - -#ifndef raw_cpu_ptr -#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); #endif -#ifdef CONFIG_DEBUG_PREEMPT -#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) + +#endif /* SMP */ + +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" #else -#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#define PER_CPU_BASE_SECTION ".data" +#endif #endif -#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) -#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif -#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -extern void setup_per_cpu_areas(void); +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES #endif -#else /* ! SMP */ +#define raw_cpu_generic_to_op(pcp, val, op) \ +do { \ + *raw_cpu_ptr(&(pcp)) op val; \ +} while (0) -#define VERIFY_PERCPU_PTR(__p) ({ \ - __verify_pcpu_ptr((__p)); \ - (typeof(*(__p)) __kernel __force *)(__p); \ +#define raw_cpu_generic_add_return(pcp, val) \ +({ \ + raw_cpu_add(pcp, val); \ + raw_cpu_read(pcp); \ }) -#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) -#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) -#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) -#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) -#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr) +#define raw_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) -#endif /* SMP */ +#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) -#ifndef PER_CPU_BASE_SECTION -#ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data..percpu" -#else -#define PER_CPU_BASE_SECTION ".data" +#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret = 0; \ + if (raw_cpu_read(pcp1) == (oval1) && \ + raw_cpu_read(pcp2) == (oval2)) { \ + raw_cpu_write(pcp1, nval1); \ + raw_cpu_write(pcp2, nval2); \ + __ret = 1; \ + } \ + (__ret); \ +}) + +#define this_cpu_generic_read(pcp) \ +({ \ + typeof(pcp) __ret; \ + preempt_disable(); \ + __ret = *this_cpu_ptr(&(pcp)); \ + preempt_enable(); \ + __ret; \ +}) + +#define this_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + *raw_cpu_ptr(&(pcp)) op val; \ + raw_local_irq_restore(__flags); \ +} while (0) + +#define this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + raw_cpu_add(pcp, val); \ + __ret = raw_cpu_read(pcp); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ + oval1, oval2, nval1, nval2); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#ifndef raw_cpu_read_1 +#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) #endif +#ifndef raw_cpu_read_2 +#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_4 +#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_8 +#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) #endif -#ifdef CONFIG_SMP +#ifndef raw_cpu_write_1 +#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_2 +#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_4 +#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_8 +#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif -#ifdef MODULE -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION "" -#else -#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" -#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#ifndef raw_cpu_add_1 +#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_2 +#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_4 +#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_8 +#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) #endif -#define PER_CPU_FIRST_SECTION "..first" -#else +#ifndef raw_cpu_and_1 +#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_2 +#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_4 +#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_8 +#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef raw_cpu_or_1 +#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_2 +#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_4 +#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_8 +#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION "..shared_aligned" -#define PER_CPU_FIRST_SECTION "" +#ifndef raw_cpu_add_return_1 +#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_2 +#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_4 +#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_8 +#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_xchg_1 +#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_2 +#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_4 +#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_8 +#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif -#ifndef PER_CPU_ATTRIBUTES -#define PER_CPU_ATTRIBUTES +#ifndef raw_cpu_cmpxchg_1 +#define raw_cpu_cmpxchg_1(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_2 +#define raw_cpu_cmpxchg_2(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_4 +#define raw_cpu_cmpxchg_4(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_8 +#define raw_cpu_cmpxchg_8(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif -#ifndef PER_CPU_DEF_ATTRIBUTES -#define PER_CPU_DEF_ATTRIBUTES +#ifndef raw_cpu_cmpxchg_double_1 +#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_2 +#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_4 +#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_8 +#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#ifndef this_cpu_read_1 +#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_2 +#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_4 +#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_8 +#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) #endif -/* Keep until we have removed all uses of __this_cpu_ptr */ -#define __this_cpu_ptr raw_cpu_ptr +#ifndef this_cpu_write_1 +#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_2 +#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_4 +#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_8 +#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef this_cpu_add_1 +#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_2 +#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_4 +#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_8 +#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef this_cpu_and_1 +#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_2 +#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_4 +#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_8 +#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef this_cpu_or_1 +#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_2 +#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_4 +#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_8 +#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef this_cpu_add_return_1 +#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_2 +#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_4 +#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_8 +#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif + +#ifndef this_cpu_xchg_1 +#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_2 +#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_4 +#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_8 +#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef this_cpu_cmpxchg_1 +#define this_cpu_cmpxchg_1(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_2 +#define this_cpu_cmpxchg_2(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_4 +#define this_cpu_cmpxchg_4(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_8 +#define this_cpu_cmpxchg_8(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef this_cpu_cmpxchg_double_1 +#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_2 +#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_4 +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_8 +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 471ba48c7ae..5ba0360663a 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -268,6 +268,9 @@ VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ *(.pci_fixup_suspend) \ VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ + *(.pci_fixup_suspend_late) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ } \ \ /* Built-in firmware blobs */ \ @@ -693,7 +696,7 @@ . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ . = ALIGN(cacheline); \ - *(.data..percpu..readmostly) \ + *(.data..percpu..read_mostly) \ . = ALIGN(cacheline); \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \ |