diff options
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 33 |
1 files changed, 28 insertions, 5 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 5ff9676c1e2..000da12b5cf 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -23,6 +23,34 @@ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ +/* + * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! + * + * This delays freeing the SLAB page by a grace period, it does _NOT_ + * delay object freeing. This means that if you do kmem_cache_free() + * that memory location is free to be reused at any time. Thus it may + * be possible to see another object there in the same RCU grace period. + * + * This feature only ensures the memory location backing the object + * stays valid, the trick to using this is relying on an independent + * object validation pass. Something like: + * + * rcu_read_lock() + * again: + * obj = lockless_lookup(key); + * if (obj) { + * if (!try_get_ref(obj)) // might fail for free objects + * goto again; + * + * if (obj->key != key) { // not the object we expected + * put_ref(obj); + * goto again; + * } + * } + * rcu_read_unlock(); + * + * See also the comment on struct slab_rcu in mm/slab.c. + */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ @@ -288,9 +316,4 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } -#ifdef CONFIG_SLABINFO -extern const struct seq_operations slabinfo_op; -ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); -#endif - #endif /* _LINUX_SLAB_H */ |