diff options
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 68 |
1 files changed, 36 insertions, 32 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 90effcdf948..673e4581a2e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -62,16 +62,16 @@ /* * Lock ordering: * - * ->i_mmap_mutex (truncate_pagecache) + * ->i_mmap_rwsem (truncate_pagecache) * ->private_lock (__free_pte->__set_page_dirty_buffers) * ->swap_lock (exclusive_swap_page, others) * ->mapping->tree_lock * * ->i_mutex - * ->i_mmap_mutex (truncate->unmap_mapping_range) + * ->i_mmap_rwsem (truncate->unmap_mapping_range) * * ->mmap_sem - * ->i_mmap_mutex + * ->i_mmap_rwsem * ->page_table_lock or pte_lock (various, mainly in memory.c) * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) * @@ -85,7 +85,7 @@ * sb_lock (fs/fs-writeback.c) * ->mapping->tree_lock (__sync_single_inode) * - * ->i_mmap_mutex + * ->i_mmap_rwsem * ->anon_vma.lock (vma_adjust) * * ->anon_vma.lock @@ -105,7 +105,7 @@ * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->__set_page_dirty_buffers) * - * ->i_mmap_mutex + * ->i_mmap_rwsem * ->tasklist_lock (memory_failure, collect_procs_ao) */ @@ -670,17 +670,13 @@ EXPORT_SYMBOL(__page_cache_alloc); * at a cost of "thundering herd" phenomena during rare hash * collisions. */ -static wait_queue_head_t *page_waitqueue(struct page *page) +wait_queue_head_t *page_waitqueue(struct page *page) { const struct zone *zone = page_zone(page); return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } - -static inline void wake_up_page(struct page *page, int bit) -{ - __wake_up_bit(page_waitqueue(page), &page->flags, bit); -} +EXPORT_SYMBOL(page_waitqueue); void wait_on_page_bit(struct page *page, int bit_nr) { @@ -703,6 +699,19 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr) bit_wait_io, TASK_KILLABLE); } +int wait_on_page_bit_killable_timeout(struct page *page, + int bit_nr, unsigned long timeout) +{ + DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); + + wait.key.timeout = jiffies + timeout; + if (!test_bit(bit_nr, &page->flags)) + return 0; + return __wait_on_bit(page_waitqueue(page), &wait, + bit_wait_io_timeout, TASK_KILLABLE); +} +EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); + /** * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue * @page: Page defining the wait queue of interest @@ -727,7 +736,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue); * * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). * Also wakes sleepers in wait_on_page_writeback() because the wakeup - * mechananism between PageLocked pages and PageWriteback pages is shared. + * mechanism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * * The mb is necessary to enforce ordering between the clear_bit and the read @@ -1037,8 +1046,7 @@ EXPORT_SYMBOL(find_lock_entry); * @mapping: the address_space to search * @offset: the page index * @fgp_flags: PCG flags - * @cache_gfp_mask: gfp mask to use for the page cache data page allocation - * @radix_gfp_mask: gfp mask to use for radix tree node allocation + * @gfp_mask: gfp mask to use for the page cache data page allocation * * Looks up the page cache slot at @mapping & @offset. * @@ -1047,11 +1055,9 @@ EXPORT_SYMBOL(find_lock_entry); * FGP_ACCESSED: the page will be marked accessed * FGP_LOCK: Page is return locked * FGP_CREAT: If page is not present then a new page is allocated using - * @cache_gfp_mask and added to the page cache and the VM's LRU - * list. If radix tree nodes are allocated during page cache - * insertion then @radix_gfp_mask is used. The page is returned - * locked and with an increased refcount. Otherwise, %NULL is - * returned. + * @gfp_mask and added to the page cache and the VM's LRU + * list. The page is returned locked and with an increased + * refcount. Otherwise, %NULL is returned. * * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even * if the GFP flags specified for FGP_CREAT are atomic. @@ -1059,7 +1065,7 @@ EXPORT_SYMBOL(find_lock_entry); * If there is a page cache page, it is returned with an increased refcount. */ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, - int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) + int fgp_flags, gfp_t gfp_mask) { struct page *page; @@ -1096,13 +1102,11 @@ no_page: if (!page && (fgp_flags & FGP_CREAT)) { int err; if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) - cache_gfp_mask |= __GFP_WRITE; - if (fgp_flags & FGP_NOFS) { - cache_gfp_mask &= ~__GFP_FS; - radix_gfp_mask &= ~__GFP_FS; - } + gfp_mask |= __GFP_WRITE; + if (fgp_flags & FGP_NOFS) + gfp_mask &= ~__GFP_FS; - page = __page_cache_alloc(cache_gfp_mask); + page = __page_cache_alloc(gfp_mask); if (!page) return NULL; @@ -1113,7 +1117,8 @@ no_page: if (fgp_flags & FGP_ACCESSED) __SetPageReferenced(page); - err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); + err = add_to_page_cache_lru(page, mapping, offset, + gfp_mask & GFP_RECLAIM_MASK); if (unlikely(err)) { page_cache_release(page); page = NULL; @@ -1744,7 +1749,7 @@ EXPORT_SYMBOL(generic_file_read_iter); static int page_cache_read(struct file *file, pgoff_t offset) { struct address_space *mapping = file->f_mapping; - struct page *page; + struct page *page; int ret; do { @@ -1761,7 +1766,7 @@ static int page_cache_read(struct file *file, pgoff_t offset) page_cache_release(page); } while (ret == AOP_TRUNCATED_PAGE); - + return ret; } @@ -2434,8 +2439,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, fgp_flags |= FGP_NOFS; page = pagecache_get_page(mapping, index, fgp_flags, - mapping_gfp_mask(mapping), - GFP_KERNEL); + mapping_gfp_mask(mapping)); if (page) wait_for_stable_page(page); @@ -2455,7 +2459,7 @@ ssize_t generic_perform_write(struct file *file, /* * Copies from kernel address space cannot fail (NFSD is a big user). */ - if (segment_eq(get_fs(), KERNEL_DS)) + if (!iter_is_iovec(i)) flags |= AOP_FLAG_UNINTERRUPTIBLE; do { |