summaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h70
1 files changed, 23 insertions, 47 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9670f71d7be..7438071b44a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/bit_spinlock.h>
+#include <linux/shrinker.h>
struct mempolicy;
struct anon_vma;
@@ -636,7 +637,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
-static inline enum zone_type page_zonenum(struct page *page)
+static inline enum zone_type page_zonenum(const struct page *page)
{
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}
@@ -664,15 +665,15 @@ static inline int zone_to_nid(struct zone *zone)
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
-extern int page_to_nid(struct page *page);
+extern int page_to_nid(const struct page *page);
#else
-static inline int page_to_nid(struct page *page)
+static inline int page_to_nid(const struct page *page)
{
return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
-static inline struct zone *page_zone(struct page *page)
+static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
@@ -684,7 +685,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
-static inline unsigned long page_to_section(struct page *page)
+static inline unsigned long page_to_section(const struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
@@ -717,7 +718,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*/
#include <linux/vmstat.h>
-static __always_inline void *lowmem_page_address(struct page *page)
+static __always_inline void *lowmem_page_address(const struct page *page)
{
return __va(PFN_PHYS(page_to_pfn(page)));
}
@@ -736,7 +737,7 @@ static __always_inline void *lowmem_page_address(struct page *page)
#endif
#if defined(HASHED_PAGE_VIRTUAL)
-void *page_address(struct page *page);
+void *page_address(const struct page *page);
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif
@@ -910,6 +911,8 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
* @hugetlb_entry: if set, called for each hugetlb entry
+ * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
+ * is used.
*
* (see walk_page_range for more details)
*/
@@ -959,6 +962,8 @@ int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
+extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long address, unsigned int fault_flags);
#else
static inline int handle_mm_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
@@ -968,6 +973,14 @@ static inline int handle_mm_fault(struct mm_struct *mm,
BUG();
return VM_FAULT_SIGBUS;
}
+static inline int fixup_user_fault(struct task_struct *tsk,
+ struct mm_struct *mm, unsigned long address,
+ unsigned int fault_flags)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return -EFAULT;
+}
#endif
extern int make_pages_present(unsigned long addr, unsigned long end);
@@ -1121,44 +1134,6 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
}
#endif
-/*
- * This struct is used to pass information from page reclaim to the shrinkers.
- * We consolidate the values for easier extention later.
- */
-struct shrink_control {
- gfp_t gfp_mask;
-
- /* How many slab objects shrinker() should scan and try to reclaim */
- unsigned long nr_to_scan;
-};
-
-/*
- * A callback you can register to apply pressure to ageable caches.
- *
- * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
- * and a 'gfpmask'. It should look through the least-recently-used
- * 'nr_to_scan' entries and attempt to free them up. It should return
- * the number of objects which remain in the cache. If it returns -1, it means
- * it cannot do any scanning at this time (eg. there is a risk of deadlock).
- *
- * The 'gfpmask' refers to the allocation we are currently trying to
- * fulfil.
- *
- * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
- * querying the cache size, so a fastpath for that case is appropriate.
- */
-struct shrinker {
- int (*shrink)(struct shrinker *, struct shrink_control *sc);
- int seeks; /* seeks to recreate an obj */
-
- /* These are for internal use */
- struct list_head list;
- long nr; /* objs pending delete */
-};
-#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-extern void register_shrinker(struct shrinker *);
-extern void unregister_shrinker(struct shrinker *);
-
int vma_wants_writenotify(struct vm_area_struct *vma);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1313,6 +1288,7 @@ extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_all_active_ranges(void);
void sort_node_map(void);
+unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
@@ -1445,8 +1421,7 @@ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
extern unsigned long do_brk(unsigned long, unsigned long);
-/* filemap.c */
-extern unsigned long page_unuse(struct page *);
+/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
@@ -1633,6 +1608,7 @@ enum mf_flags {
};
extern void memory_failure(unsigned long pfn, int trapno);
extern int __memory_failure(unsigned long pfn, int trapno, int flags);
+extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;