summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h24
-rw-r--r--include/linux/percpu-defs.h66
-rw-r--r--include/linux/percpu.h88
-rw-r--r--include/linux/vmalloc.h6
4 files changed, 157 insertions, 27 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 6ad76bf5fb4..a43223af98b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -33,13 +33,10 @@
* BSS_SECTION(0, 0, 0)
* _end = .;
*
- * /DISCARD/ : {
- * EXIT_TEXT
- * EXIT_DATA
- * EXIT_CALL
- * }
* STABS_DEBUG
* DWARF_DEBUG
+ *
+ * DISCARDS // must be the last
* }
*
* [__init_begin, __init_end] is the init section that may be freed after init
@@ -626,6 +623,23 @@
#define INIT_RAM_FS
#endif
+/*
+ * Default discarded sections.
+ *
+ * Some archs want to discard exit text/data at runtime rather than
+ * link time due to cross-section references such as alt instructions,
+ * bug table, eh_frame, etc. DISCARDS must be the last of output
+ * section definitions so that such archs put those in earlier section
+ * definitions.
+ */
+#define DISCARDS \
+ /DISCARD/ : { \
+ EXIT_TEXT \
+ EXIT_DATA \
+ EXIT_CALL \
+ *(.discard) \
+ }
+
/**
* PERCPU_VADDR - define output section for percpu area
* @vaddr: explicit base address (optional)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 0761491b3ee..9bd03193ecd 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -10,22 +10,70 @@
/*
* Base implementations of per-CPU variable declarations and definitions, where
* the section in which the variable is to be placed is provided by the
- * 'section' argument. This may be used to affect the parameters governing the
+ * 'sec' argument. This may be used to affect the parameters governing the
* variable's storage.
*
* NOTE! The sections for the DECLARE and for the DEFINE must match, lest
* linkage errors occur due the compiler generating the wrong code to access
* that section.
*/
-#define DECLARE_PER_CPU_SECTION(type, name, section) \
- extern \
- __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SECTION(type, name, section) \
- __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
- PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \
+#define __PCPU_ATTRS(sec) \
+ __attribute__((section(PER_CPU_BASE_SECTION sec))) \
+ PER_CPU_ATTRIBUTES
+
+#define __PCPU_DUMMY_ATTRS \
+ __attribute__((section(".discard"), unused))
+
+/*
+ * s390 and alpha modules require percpu variables to be defined as
+ * weak to force the compiler to generate GOT based external
+ * references for them. This is necessary because percpu sections
+ * will be located outside of the usually addressable area.
+ *
+ * This definition puts the following two extra restrictions when
+ * defining percpu variables.
+ *
+ * 1. The symbol must be globally unique, even the static ones.
+ * 2. Static percpu variables cannot be defined inside a function.
+ *
+ * Archs which need weak percpu definitions should define
+ * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
+ *
+ * To ensure that the generic code observes the above two
+ * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
+ * definition is used for all cases.
+ */
+#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
+/*
+ * __pcpu_scope_* dummy variable is used to enforce scope. It
+ * receives the static modifier when it's used in front of
+ * DEFINE_PER_CPU() and will trigger build failure if
+ * DECLARE_PER_CPU() is used for the same variable.
+ *
+ * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
+ * such that hidden weak symbol collision, which will cause unrelated
+ * variables to share the same address, can be detected during build.
+ */
+#define DECLARE_PER_CPU_SECTION(type, name, sec) \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+
+#define DEFINE_PER_CPU_SECTION(type, name, sec) \
+ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
+ __typeof__(type) per_cpu__##name
+#else
+/*
+ * Normal declaration and definition macros.
+ */
+#define DECLARE_PER_CPU_SECTION(type, name, sec) \
+ extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+
+#define DEFINE_PER_CPU_SECTION(type, name, sec) \
+ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
__typeof__(type) per_cpu__##name
+#endif
/*
* Variant on the per-CPU variable declaration/definition theme used for
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 26fd9d12f05..878836ca999 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,7 +34,7 @@
#ifdef CONFIG_SMP
-#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -57,19 +57,70 @@
#endif
extern void *pcpu_base_addr;
+extern const unsigned long *pcpu_unit_offsets;
-typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
-typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
+struct pcpu_group_info {
+ int nr_units; /* aligned # of units */
+ unsigned long base_offset; /* base address offset */
+ unsigned int *cpu_map; /* unit->cpu map, empty
+ * entries contain NR_CPUS */
+};
+
+struct pcpu_alloc_info {
+ size_t static_size;
+ size_t reserved_size;
+ size_t dyn_size;
+ size_t unit_size;
+ size_t atom_size;
+ size_t alloc_size;
+ size_t __ai_size; /* internal, don't use */
+ int nr_groups; /* 0 if grouping unnecessary */
+ struct pcpu_group_info groups[];
+};
-extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
- size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size,
- void *base_addr,
- pcpu_populate_pte_fn_t populate_pte_fn);
+enum pcpu_fc {
+ PCPU_FC_AUTO,
+ PCPU_FC_EMBED,
+ PCPU_FC_PAGE,
-extern ssize_t __init pcpu_embed_first_chunk(
- size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size);
+ PCPU_FC_NR,
+};
+extern const char *pcpu_fc_names[PCPU_FC_NR];
+
+extern enum pcpu_fc pcpu_chosen_fc;
+
+typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
+ size_t align);
+typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
+typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
+
+extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
+ int nr_units);
+extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
+
+extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
+ size_t reserved_size, ssize_t dyn_size,
+ size_t atom_size,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
+
+extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ void *base_addr);
+
+#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
+extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
+ size_t atom_size,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn);
+#endif
+
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+extern int __init pcpu_page_first_chunk(size_t reserved_size,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_populate_pte_fn_t populate_pte_fn);
+#endif
/*
* Use this to get to a cpu's version of the per-cpu object
@@ -80,7 +131,7 @@ extern ssize_t __init pcpu_embed_first_chunk(
extern void *__alloc_reserved_percpu(size_t size, size_t align);
-#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
struct percpu_data {
void *ptrs[1];
@@ -99,11 +150,15 @@ struct percpu_data {
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
+#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void __init setup_per_cpu_areas(void);
+#endif
+
#else /* CONFIG_SMP */
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
@@ -124,6 +179,13 @@ static inline void free_percpu(void *p)
kfree(p);
}
+static inline void __init setup_per_cpu_areas(void) { }
+
+static inline void *pcpu_lpage_remapped(void *kaddr)
+{
+ return NULL;
+}
+
#endif /* CONFIG_SMP */
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index a43ebec3a7b..227c2a585e4 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -115,4 +115,10 @@ extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+ const size_t *sizes, int nr_vms,
+ size_t align, gfp_t gfp_mask);
+
+void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
+
#endif /* _LINUX_VMALLOC_H */