summaryrefslogtreecommitdiffstats
path: root/mm/allocpercpu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-28 13:40:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-28 13:48:38 -0700
commit7c730ccdc1188b97f5c8cb690906242c7ed75c22 (patch)
tree17ccd927e70dadaf59104c53cce892474eb539b2 /mm/allocpercpu.c
parent8d735b4148d46446e64d72b22ef0344ee8dc02fa (diff)
parent82268da1b130f763d22d04f7d016bbf6fc8815c2 (diff)
Merge branch 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (682 commits) percpu: fix spurious alignment WARN in legacy SMP percpu allocator percpu: generalize embedding first chunk setup helper percpu: more flexibility for @dyn_size of pcpu_setup_first_chunk() percpu: make x86 addr <-> pcpu ptr conversion macros generic linker script: define __per_cpu_load on all SMP capable archs x86: UV: remove uv_flush_tlb_others() WARN_ON percpu: finer grained locking to break deadlock and allow atomic free percpu: move fully free chunk reclamation into a work percpu: move chunk area map extension out of area allocation percpu: replace pcpu_realloc() with pcpu_mem_alloc() and pcpu_mem_free() x86, percpu: setup reserved percpu area for x86_64 percpu, module: implement reserved allocation and use it for module percpu variables percpu: add an indirection ptr for chunk page map access x86: make embedding percpu allocator return excessive free space percpu: use negative for auto for pcpu_setup_first_chunk() arguments percpu: improve first chunk initial area map handling percpu: cosmetic renames in pcpu_setup_first_chunk() percpu: clean up percpu constants x86: un-__init fill_pud/pmd/pte x86: remove vestigial fix_ioremap prototypes ... Manually merge conflicts in arch/ia64/kernel/irq_ia64.c
Diffstat (limited to 'mm/allocpercpu.c')
-rw-r--r--mm/allocpercpu.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 4297bc41bfd..1882923bc70 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
/**
- * percpu_alloc_mask - initial setup of per-cpu data
+ * alloc_percpu - initial setup of per-cpu data
* @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-data for cpu's selected through mask bits
+ * @align: alignment
*
- * Populating per-cpu data for all online cpu's would be a typical use case,
- * which is simplified by the percpu_alloc() wrapper.
- * Per-cpu objects are populated with zeroed buffers.
+ * Allocate dynamic percpu area. Percpu objects are populated with
+ * zeroed buffers.
*/
-void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+void *__alloc_percpu(size_t size, size_t align)
{
/*
* We allocate whole cache lines to avoid false sharing
*/
size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
- void *pdata = kzalloc(sz, gfp);
+ void *pdata = kzalloc(sz, GFP_KERNEL);
void *__pdata = __percpu_disguise(pdata);
+ /*
+ * Can't easily make larger alignment work with kmalloc. WARN
+ * on it. Larger alignment should only be used for module
+ * percpu sections on SMP for which this path isn't used.
+ */
+ WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+
if (unlikely(!pdata))
return NULL;
- if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
+ if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
+ &cpu_possible_map)))
return __pdata;
kfree(pdata);
return NULL;
}
-EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
+EXPORT_SYMBOL_GPL(__alloc_percpu);
/**
- * percpu_free - final cleanup of per-cpu data
+ * free_percpu - final cleanup of per-cpu data
* @__pdata: object to clean up
*
* We simply clean up any per-cpu object left. No need for the client to
* track and specify through a bis mask which per-cpu objects are to free.
*/
-void percpu_free(void *__pdata)
+void free_percpu(void *__pdata)
{
if (unlikely(!__pdata))
return;
__percpu_depopulate_mask(__pdata, &cpu_possible_map);
kfree(__percpu_disguise(__pdata));
}
-EXPORT_SYMBOL_GPL(percpu_free);
+EXPORT_SYMBOL_GPL(free_percpu);