From 8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 16 Oct 2007 01:26:06 -0700 Subject: SLUB: Do not use page->mapping After moving the lockless_freelist to kmem_cache_cpu we no longer need page->lockless_freelist. Restructure the use of the struct page fields in such a way that we never touch the mapping field. This is turn allows us to remove the special casing of SLUB when determining the mapping of a page (needed for corner cases of virtual caches machines that need to flush caches of processors mapping a page). Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ---- include/linux/mm_types.h | 9 ++------- 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 6a68d41444f..292c6862375 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -568,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page) VM_BUG_ON(PageSlab(page)); if (unlikely(PageSwapCache(page))) mapping = &swapper_space; -#ifdef CONFIG_SLUB - else if (unlikely(PageSlab(page))) - mapping = NULL; -#endif else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 145b3d05304..0cdc8fbf643 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -62,13 +62,8 @@ struct page { #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif - struct { /* SLUB uses */ - void **lockless_freelist; - struct kmem_cache *slab; /* Pointer to slab */ - }; - struct { - struct page *first_page; /* Compound pages */ - }; + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct page *first_page; /* Compound tail pages */ }; union { pgoff_t index; /* Our offset within mapping. */ -- cgit v1.2.3-70-g09d2