summaryrefslogtreecommitdiffstats
path: root/include/asm-x86/percpu_64.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 11:20:03 +0200
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 11:20:03 +0200
commit96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch)
treed947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86/percpu_64.h
parent27bd0c955648646abf2a353a8371d28c37bcd982 (diff)
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the header install make rules Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/percpu_64.h')
-rw-r--r--include/asm-x86/percpu_64.h68
1 files changed, 68 insertions, 0 deletions
diff --git a/include/asm-x86/percpu_64.h b/include/asm-x86/percpu_64.h
new file mode 100644
index 00000000000..5abd4827010
--- /dev/null
+++ b/include/asm-x86/percpu_64.h
@@ -0,0 +1,68 @@
+#ifndef _ASM_X8664_PERCPU_H_
+#define _ASM_X8664_PERCPU_H_
+#include <linux/compiler.h>
+
+/* Same as asm-generic/percpu.h, except that we store the per cpu offset
+ in the PDA. Longer term the PDA and every per cpu variable
+ should be just put into a single section and referenced directly
+ from %gs */
+
+#ifdef CONFIG_SMP
+
+#include <asm/pda.h>
+
+#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
+#define __my_cpu_offset() read_pda(data_offset)
+
+#define per_cpu_offset(x) (__per_cpu_offset(x))
+
+/* Separate out the type, so (int[3], foo) works. */
+#define DEFINE_PER_CPU(type, name) \
+ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_internodealigned_in_smp
+
+/* var is in discarded region: offset to particular copy we want */
+#define per_cpu(var, cpu) (*({ \
+ extern int simple_identifier_##var(void); \
+ RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)); }))
+#define __get_cpu_var(var) (*({ \
+ extern int simple_identifier_##var(void); \
+ RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
+#define __raw_get_cpu_var(var) (*({ \
+ extern int simple_identifier_##var(void); \
+ RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
+
+/* A macro to avoid #include hell... */
+#define percpu_modcopy(pcpudst, src, size) \
+do { \
+ unsigned int __i; \
+ for_each_possible_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset(__i), \
+ (src), (size)); \
+} while (0)
+
+extern void setup_per_cpu_areas(void);
+
+#else /* ! SMP */
+
+#define DEFINE_PER_CPU(type, name) \
+ __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+
+#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
+#define __get_cpu_var(var) per_cpu__##var
+#define __raw_get_cpu_var(var) per_cpu__##var
+
+#endif /* SMP */
+
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
+#endif /* _ASM_X8664_PERCPU_H_ */