summaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h70
1 files changed, 35 insertions, 35 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7438071b44a..3dc3a8c2c48 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -356,36 +356,50 @@ static inline struct page *compound_head(struct page *page)
return page;
}
+/*
+ * The atomic page->_mapcount, starts from -1: so that transitions
+ * both from it and to it can be tracked, using atomic_inc_and_test
+ * and atomic_add_negative(-1).
+ */
+static inline void reset_page_mapcount(struct page *page)
+{
+ atomic_set(&(page)->_mapcount, -1);
+}
+
+static inline int page_mapcount(struct page *page)
+{
+ return atomic_read(&(page)->_mapcount) + 1;
+}
+
static inline int page_count(struct page *page)
{
return atomic_read(&compound_head(page)->_count);
}
+static inline void get_huge_page_tail(struct page *page)
+{
+ /*
+ * __split_huge_page_refcount() cannot run
+ * from under us.
+ */
+ VM_BUG_ON(page_mapcount(page) < 0);
+ VM_BUG_ON(atomic_read(&page->_count) != 0);
+ atomic_inc(&page->_mapcount);
+}
+
+extern bool __get_page_tail(struct page *page);
+
static inline void get_page(struct page *page)
{
+ if (unlikely(PageTail(page)))
+ if (likely(__get_page_tail(page)))
+ return;
/*
* Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_count. Only if
- * we're getting a tail page, the elevated page->_count is
- * required only in the head page, so for tail pages the
- * bugcheck only verifies that the page->_count isn't
- * negative.
+ * requires to already have an elevated page->_count.
*/
- VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
+ VM_BUG_ON(atomic_read(&page->_count) <= 0);
atomic_inc(&page->_count);
- /*
- * Getting a tail page will elevate both the head and tail
- * page->_count(s).
- */
- if (unlikely(PageTail(page))) {
- /*
- * This is safe only because
- * __split_huge_page_refcount can't run under
- * get_page().
- */
- VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
- atomic_inc(&page->first_page->_count);
- }
}
static inline struct page *virt_to_head_page(const void *x)
@@ -804,21 +818,6 @@ static inline pgoff_t page_index(struct page *page)
}
/*
- * The atomic page->_mapcount, like _count, starts from -1:
- * so that transitions both from it and to it can be tracked,
- * using atomic_inc_and_test and atomic_add_negative(-1).
- */
-static inline void reset_page_mapcount(struct page *page)
-{
- atomic_set(&(page)->_mapcount, -1);
-}
-
-static inline int page_mapcount(struct page *page)
-{
- return atomic_read(&(page)->_mapcount) + 1;
-}
-
-/*
* Return true if this page is mapped into pagetables.
*/
static inline int page_mapped(struct page *page)
@@ -1334,7 +1333,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
-extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+extern __printf(3, 4)
+void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);