diff options
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 62 |
1 files changed, 42 insertions, 20 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index fcd19d323f9..8a82342a859 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -49,7 +49,6 @@ #include <linux/ctype.h> #include <linux/migrate.h> #include <linux/highmem.h> -#include <linux/backing-dev.h> #include <asm/uaccess.h> #include <asm/div64.h> @@ -96,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: * might be reconsidered if it ever diverges from PAGE_SIZE. * - * __GFP_MOVABLE is masked out as swap vectors cannot move + * Mobility flags are masked out as swap vectors cannot move */ - return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO, + return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, PAGE_CACHE_SHIFT-PAGE_SHIFT); } @@ -972,7 +971,7 @@ static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_ *nodelist++ = '\0'; if (nodelist_parse(nodelist, *policy_nodes)) goto out; - if (!nodes_subset(*policy_nodes, node_online_map)) + if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY])) goto out; } if (!strcmp(value, "default")) { @@ -997,9 +996,11 @@ static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_ err = 0; } else if (!strcmp(value, "interleave")) { *policy = MPOL_INTERLEAVE; - /* Default to nodes online if no nodelist */ + /* + * Default to online nodes with memory if no nodelist + */ if (!nodelist) - *policy_nodes = node_online_map; + *policy_nodes = node_states[N_HIGH_MEMORY]; err = 0; } out: @@ -1025,8 +1026,8 @@ static struct page *shmem_swapin_async(struct shared_policy *p, return page; } -struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, - unsigned long idx) +static struct page *shmem_swapin(struct shmem_inode_info *info, + swp_entry_t entry, unsigned long idx) { struct shared_policy *p = &info->policy; int i, num; @@ -1061,7 +1062,8 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, return page; } #else -static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) +static inline int shmem_parse_mpol(char *value, int *policy, + nodemask_t *policy_nodes) { return 1; } @@ -1109,7 +1111,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, * Normally, filepage is NULL on entry, and either found * uptodate immediately, or allocated and zeroed, or read * in under swappage, which is then assigned to filepage. - * But shmem_readpage and shmem_prepare_write pass in a locked + * But shmem_readpage and shmem_write_begin pass in a locked * filepage, which may be found not uptodate by other callers * too, and may need to be copied from the swappage read in. */ @@ -1327,14 +1329,14 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } #ifdef CONFIG_NUMA -int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) +static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct inode *i = vma->vm_file->f_path.dentry->d_inode; return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); } -struct mempolicy * -shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) +static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, + unsigned long addr) { struct inode *i = vma->vm_file->f_path.dentry->d_inode; unsigned long idx; @@ -1446,7 +1448,7 @@ static const struct inode_operations shmem_symlink_inode_operations; static const struct inode_operations shmem_symlink_inline_operations; /* - * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write; + * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; * but providing them allows a tmpfs file to be used for splice, sendfile, and * below the loop driver, in the generic fashion that many filesystems support. */ @@ -1459,10 +1461,30 @@ static int shmem_readpage(struct file *file, struct page *page) } static int -shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) +shmem_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { - struct inode *inode = page->mapping->host; - return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL); + struct inode *inode = mapping->host; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + *pagep = NULL; + return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); +} + +static int +shmem_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + + set_page_dirty(page); + page_cache_release(page); + + if (pos+copied > inode->i_size) + i_size_write(inode, pos+copied); + + return copied; } static ssize_t @@ -2219,7 +2241,7 @@ static int shmem_fill_super(struct super_block *sb, unsigned long blocks = 0; unsigned long inodes = 0; int policy = MPOL_DEFAULT; - nodemask_t policy_nodes = node_online_map; + nodemask_t policy_nodes = node_states[N_HIGH_MEMORY]; #ifdef CONFIG_TMPFS /* @@ -2338,8 +2360,8 @@ static const struct address_space_operations shmem_aops = { .set_page_dirty = __set_page_dirty_no_writeback, #ifdef CONFIG_TMPFS .readpage = shmem_readpage, - .prepare_write = shmem_prepare_write, - .commit_write = simple_commit_write, + .write_begin = shmem_write_begin, + .write_end = shmem_write_end, #endif .migratepage = migrate_page, }; |