summaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-04-14 13:18:27 +0200
committerIngo Molnar <mingo@kernel.org>2012-04-14 13:19:04 +0200
commit6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch)
tree021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /include/linux/mm.h
parent682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff)
parenta385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff)
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree), to prepare for tooling changes, and also to pick up v3.4 MM changes that the uprobes code needs to take care of. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h45
1 files changed, 25 insertions, 20 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 17b27cd269c..d8738a464b9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -6,6 +6,7 @@
#ifdef __KERNEL__
#include <linux/gfp.h>
+#include <linux/bug.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
@@ -111,7 +112,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */
#endif
#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
-#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
+#define VM_NODUMP 0x04000000 /* Do not include in the core dump */
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
@@ -893,9 +894,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
+void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
-unsigned long unmap_vmas(struct mmu_gather *tlb,
+void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
@@ -953,7 +954,7 @@ extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
extern int vmtruncate(struct inode *inode, loff_t offset);
extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
-
+void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
@@ -1040,6 +1041,9 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
+extern pid_t
+vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
+
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
@@ -1058,19 +1062,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
/*
* per-process(per-mm_struct) statistics.
*/
-static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
-{
- atomic_long_set(&mm->rss_stat.count[member], value);
-}
-
-#if defined(SPLIT_RSS_COUNTING)
-unsigned long get_mm_counter(struct mm_struct *mm, int member);
-#else
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
- return atomic_long_read(&mm->rss_stat.count[member]);
-}
+ long val = atomic_long_read(&mm->rss_stat.count[member]);
+
+#ifdef SPLIT_RSS_COUNTING
+ /*
+ * counter is updated in asynchronous manner and may go to minus.
+ * But it's never be expected number for users.
+ */
+ if (val < 0)
+ val = 0;
#endif
+ return (unsigned long)val;
+}
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
@@ -1127,9 +1132,9 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
}
#if defined(SPLIT_RSS_COUNTING)
-void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
+void sync_mm_rss(struct mm_struct *mm);
#else
-static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
+static inline void sync_mm_rss(struct mm_struct *mm)
{
}
#endif
@@ -1253,6 +1258,8 @@ static inline void pgtable_page_dtor(struct page *page)
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
+extern void free_initmem(void);
+
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
* With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
@@ -1291,8 +1298,6 @@ extern void get_pfn_range_for_nid(unsigned int nid,
extern unsigned long find_min_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
-int add_from_early_node_map(struct range *range, int az,
- int nr_range, int nid);
extern void sparse_memory_present_with_active_regions(int nid);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
@@ -1598,9 +1603,9 @@ void vmemmap_populate_print_last(void);
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
+ MF_ACTION_REQUIRED = 1 << 1,
};
-extern void memory_failure(unsigned long pfn, int trapno);
-extern int __memory_failure(unsigned long pfn, int trapno, int flags);
+extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;