summaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-11-28 09:46:22 -0800
committerTejun Heo <tj@kernel.org>2011-11-28 09:46:22 -0800
commitd4bbf7e7759afc172e2bfbc5c416324590049cdd (patch)
tree7eab5ee5481cd3dcf1162329fec827177640018a /include/linux/mm.h
parenta150439c4a97db379f0ed6faa46fbbb6e7bf3cb2 (diff)
parent401d0069cb344f401bc9d264c31db55876ff78c0 (diff)
Merge branch 'master' into x86/memblock
Conflicts & resolutions: * arch/x86/xen/setup.c dc91c728fd "xen: allow extra memory to be in multiple regions" 24aa07882b "memblock, x86: Replace memblock_x86_reserve/free..." conflicted on xen_add_extra_mem() updates. The resolution is trivial as the latter just want to replace memblock_x86_reserve_range() with memblock_reserve(). * drivers/pci/intel-iommu.c 166e9278a3f "x86/ia64: intel-iommu: move to drivers/iommu/" 5dfe8660a3d "bootmem: Replace work_with_active_regions() with..." conflicted as the former moved the file under drivers/iommu/. Resolved by applying the chnages from the latter on the moved file. * mm/Kconfig 6661672053a "memblock: add NO_BOOTMEM config symbol" c378ddd53f9 "memblock, x86: Make ARCH_DISCARD_MEMBLOCK a config option" conflicted trivially. Both added config options. Just letting both add their own options resolves the conflict. * mm/memblock.c d1f0ece6cdc "mm/memblock.c: small function definition fixes" ed7b56a799c "memblock: Remove memblock_memory_can_coalesce()" confliected. The former updates function removed by the latter. Resolution is trivial. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h139
1 files changed, 57 insertions, 82 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ceb1e4a1a73..6b365aee839 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/bit_spinlock.h>
+#include <linux/shrinker.h>
struct mempolicy;
struct anon_vma;
@@ -355,36 +356,50 @@ static inline struct page *compound_head(struct page *page)
return page;
}
+/*
+ * The atomic page->_mapcount, starts from -1: so that transitions
+ * both from it and to it can be tracked, using atomic_inc_and_test
+ * and atomic_add_negative(-1).
+ */
+static inline void reset_page_mapcount(struct page *page)
+{
+ atomic_set(&(page)->_mapcount, -1);
+}
+
+static inline int page_mapcount(struct page *page)
+{
+ return atomic_read(&(page)->_mapcount) + 1;
+}
+
static inline int page_count(struct page *page)
{
return atomic_read(&compound_head(page)->_count);
}
+static inline void get_huge_page_tail(struct page *page)
+{
+ /*
+ * __split_huge_page_refcount() cannot run
+ * from under us.
+ */
+ VM_BUG_ON(page_mapcount(page) < 0);
+ VM_BUG_ON(atomic_read(&page->_count) != 0);
+ atomic_inc(&page->_mapcount);
+}
+
+extern bool __get_page_tail(struct page *page);
+
static inline void get_page(struct page *page)
{
+ if (unlikely(PageTail(page)))
+ if (likely(__get_page_tail(page)))
+ return;
/*
* Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_count. Only if
- * we're getting a tail page, the elevated page->_count is
- * required only in the head page, so for tail pages the
- * bugcheck only verifies that the page->_count isn't
- * negative.
+ * requires to already have an elevated page->_count.
*/
- VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
+ VM_BUG_ON(atomic_read(&page->_count) <= 0);
atomic_inc(&page->_count);
- /*
- * Getting a tail page will elevate both the head and tail
- * page->_count(s).
- */
- if (unlikely(PageTail(page))) {
- /*
- * This is safe only because
- * __split_huge_page_refcount can't run under
- * get_page().
- */
- VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
- atomic_inc(&page->first_page->_count);
- }
}
static inline struct page *virt_to_head_page(const void *x)
@@ -636,7 +651,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
-static inline enum zone_type page_zonenum(struct page *page)
+static inline enum zone_type page_zonenum(const struct page *page)
{
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}
@@ -664,15 +679,15 @@ static inline int zone_to_nid(struct zone *zone)
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
-extern int page_to_nid(struct page *page);
+extern int page_to_nid(const struct page *page);
#else
-static inline int page_to_nid(struct page *page)
+static inline int page_to_nid(const struct page *page)
{
return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
-static inline struct zone *page_zone(struct page *page)
+static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
@@ -684,7 +699,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
-static inline unsigned long page_to_section(struct page *page)
+static inline unsigned long page_to_section(const struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
@@ -717,7 +732,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*/
#include <linux/vmstat.h>
-static __always_inline void *lowmem_page_address(struct page *page)
+static __always_inline void *lowmem_page_address(const struct page *page)
{
return __va(PFN_PHYS(page_to_pfn(page)));
}
@@ -736,7 +751,7 @@ static __always_inline void *lowmem_page_address(struct page *page)
#endif
#if defined(HASHED_PAGE_VIRTUAL)
-void *page_address(struct page *page);
+void *page_address(const struct page *page);
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif
@@ -803,21 +818,6 @@ static inline pgoff_t page_index(struct page *page)
}
/*
- * The atomic page->_mapcount, like _count, starts from -1:
- * so that transitions both from it and to it can be tracked,
- * using atomic_inc_and_test and atomic_add_negative(-1).
- */
-static inline void reset_page_mapcount(struct page *page)
-{
- atomic_set(&(page)->_mapcount, -1);
-}
-
-static inline int page_mapcount(struct page *page)
-{
- return atomic_read(&(page)->_mapcount) + 1;
-}
-
-/*
* Return true if this page is mapped into pagetables.
*/
static inline int page_mapped(struct page *page)
@@ -910,6 +910,8 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
* @hugetlb_entry: if set, called for each hugetlb entry
+ * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
+ * is used.
*
* (see walk_page_range for more details)
*/
@@ -959,6 +961,8 @@ int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
+extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long address, unsigned int fault_flags);
#else
static inline int handle_mm_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
@@ -968,6 +972,14 @@ static inline int handle_mm_fault(struct mm_struct *mm,
BUG();
return VM_FAULT_SIGBUS;
}
+static inline int fixup_user_fault(struct task_struct *tsk,
+ struct mm_struct *mm, unsigned long address,
+ unsigned int fault_flags)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return -EFAULT;
+}
#endif
extern int make_pages_present(unsigned long addr, unsigned long end);
@@ -1121,44 +1133,6 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
}
#endif
-/*
- * This struct is used to pass information from page reclaim to the shrinkers.
- * We consolidate the values for easier extention later.
- */
-struct shrink_control {
- gfp_t gfp_mask;
-
- /* How many slab objects shrinker() should scan and try to reclaim */
- unsigned long nr_to_scan;
-};
-
-/*
- * A callback you can register to apply pressure to ageable caches.
- *
- * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
- * and a 'gfpmask'. It should look through the least-recently-used
- * 'nr_to_scan' entries and attempt to free them up. It should return
- * the number of objects which remain in the cache. If it returns -1, it means
- * it cannot do any scanning at this time (eg. there is a risk of deadlock).
- *
- * The 'gfpmask' refers to the allocation we are currently trying to
- * fulfil.
- *
- * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
- * querying the cache size, so a fastpath for that case is appropriate.
- */
-struct shrinker {
- int (*shrink)(struct shrinker *, struct shrink_control *sc);
- int seeks; /* seeks to recreate an obj */
-
- /* These are for internal use */
- struct list_head list;
- long nr; /* objs pending delete */
-};
-#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-extern void register_shrinker(struct shrinker *);
-extern void unregister_shrinker(struct shrinker *);
-
int vma_wants_writenotify(struct vm_area_struct *vma);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1377,7 +1351,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
-extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+extern __printf(3, 4)
+void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
@@ -1464,8 +1439,7 @@ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
extern unsigned long do_brk(unsigned long, unsigned long);
-/* filemap.c */
-extern unsigned long page_unuse(struct page *);
+/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
@@ -1652,6 +1626,7 @@ enum mf_flags {
};
extern void memory_failure(unsigned long pfn, int trapno);
extern int __memory_failure(unsigned long pfn, int trapno, int flags);
+extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;