diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 99 |
1 files changed, 11 insertions, 88 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1692dd6cb91..520238cbae5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -10,9 +10,7 @@ #include <linux/mmzone.h> #include <linux/rbtree.h> #include <linux/prio_tree.h> -#include <linux/mutex.h> #include <linux/debug_locks.h> -#include <linux/backing-dev.h> #include <linux/mm_types.h> struct mempolicy; @@ -50,69 +48,6 @@ extern int sysctl_legacy_va_layout; * mmap() functions). */ -/* - * This struct defines a memory VMM memory area. There is one of these - * per VM-area/task. A VM area is any part of the process virtual memory - * space that has a special rule for the page-fault handlers (ie a shared - * library, the executable area etc). - */ -struct vm_area_struct { - struct mm_struct * vm_mm; /* The address space we belong to. */ - unsigned long vm_start; /* Our start address within vm_mm. */ - unsigned long vm_end; /* The first byte after our end address - within vm_mm. */ - - /* linked list of VM areas per task, sorted by address */ - struct vm_area_struct *vm_next; - - pgprot_t vm_page_prot; /* Access permissions of this VMA. */ - unsigned long vm_flags; /* Flags, listed below. */ - - struct rb_node vm_rb; - - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap prio tree, or - * linkage to the list of like vmas hanging off its node, or - * linkage of vma in the address_space->i_mmap_nonlinear list. - */ - union { - struct { - struct list_head list; - void *parent; /* aligns with prio_tree_node parent */ - struct vm_area_struct *head; - } vm_set; - - struct raw_prio_tree_node prio_tree_node; - } shared; - - /* - * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma - * list, after a COW of one of the file pages. A MAP_SHARED vma - * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack - * or brk vma (with NULL file) can only be in an anon_vma list. - */ - struct list_head anon_vma_node; /* Serialized by anon_vma->lock */ - struct anon_vma *anon_vma; /* Serialized by page_table_lock */ - - /* Function pointers to deal with this struct. */ - struct vm_operations_struct * vm_ops; - - /* Information about our backing store: */ - unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE - units, *not* PAGE_CACHE_SIZE */ - struct file * vm_file; /* File we map to (can be NULL). */ - void * vm_private_data; /* was vm_pte (shared mem) */ - unsigned long vm_truncate_count;/* truncate_count or restart_addr */ - -#ifndef CONFIG_MMU - atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */ -#endif -#ifdef CONFIG_NUMA - struct mempolicy *vm_policy; /* NUMA policy for the VMA */ -#endif -}; - extern struct kmem_cache *vm_area_cachep; /* @@ -631,10 +566,6 @@ static inline struct address_space *page_mapping(struct page *page) VM_BUG_ON(PageSlab(page)); if (unlikely(PageSwapCache(page))) mapping = &swapper_space; -#ifdef CONFIG_SLUB - else if (unlikely(PageSlab(page))) - mapping = NULL; -#endif else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; @@ -715,9 +646,6 @@ static inline int page_mapped(struct page *page) extern void show_free_areas(void); #ifdef CONFIG_SHMEM -int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new); -struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, - unsigned long addr); int shmem_lock(struct file *file, int lock, struct user_struct *user); #else static inline int shmem_lock(struct file *file, int lock, @@ -725,18 +653,6 @@ static inline int shmem_lock(struct file *file, int lock, { return 0; } - -static inline int shmem_set_policy(struct vm_area_struct *vma, - struct mempolicy *new) -{ - return 0; -} - -static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, - unsigned long addr) -{ - return NULL; -} #endif struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); @@ -779,8 +695,6 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); -int zeromap_page_range(struct vm_area_struct *vma, unsigned long from, - unsigned long size, pgprot_t prot); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); @@ -1106,8 +1020,6 @@ int write_one_page(struct page *page, int wait); /* readahead.c */ #define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ -#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before - * turning readahead off */ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read); @@ -1218,5 +1130,16 @@ extern int randomize_va_space; const char * arch_vma_name(struct vm_area_struct *vma); +struct page *sparse_mem_map_populate(unsigned long pnum, int nid); +pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); +pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); +pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +void *vmemmap_alloc_block(unsigned long size, int node); +void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); +int vmemmap_populate_basepages(struct page *start_page, + unsigned long pages, int node); +int vmemmap_populate(struct page *start_page, unsigned long pages, int node); + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ |