summaryrefslogtreecommitdiffstats
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2008-10-22 23:57:26 -0400
committerLen Brown <len.brown@intel.com>2008-10-23 00:11:07 -0400
commit057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch)
tree4333e608da237c73ff69b10878025cca96dcb4c8 /include/linux/pagemap.h
parent3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
Merge branch 'linus' into test
Conflicts: MAINTAINERS arch/x86/kernel/acpi/boot.c arch/x86/kernel/acpi/sleep.c drivers/acpi/Kconfig drivers/pnp/Makefile drivers/pnp/quirks.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h44
1 files changed, 36 insertions, 8 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 5da31c12101..709742be02f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,6 +32,34 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
}
}
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
+
+static inline void mapping_set_unevictable(struct address_space *mapping)
+{
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ if (likely(mapping))
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
+}
+#else
+static inline void mapping_set_unevictable(struct address_space *mapping) { }
+static inline void mapping_clear_unevictable(struct address_space *mapping) { }
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ return 0;
+}
+#endif
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
@@ -271,19 +299,19 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page);
-static inline void set_page_locked(struct page *page)
+static inline void __set_page_locked(struct page *page)
{
- set_bit(PG_locked, &page->flags);
+ __set_bit(PG_locked, &page->flags);
}
-static inline void clear_page_locked(struct page *page)
+static inline void __clear_page_locked(struct page *page)
{
- clear_bit(PG_locked, &page->flags);
+ __clear_bit(PG_locked, &page->flags);
}
static inline int trylock_page(struct page *page)
{
- return !test_and_set_bit(PG_locked, &page->flags);
+ return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
}
/*
@@ -410,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run set_page_locked() against it.
+ * the page is new, so we can just run __set_page_locked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
- set_page_locked(page);
+ __set_page_locked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
- clear_page_locked(page);
+ __clear_page_locked(page);
return error;
}