From 0164f69d0cf1a6abbc936851f5b72ece92187cda Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jun 2011 15:08:09 -0700 Subject: mm/memory.c: fix kernel-doc notation Fix new kernel-doc warnings in mm/memory.c: Warning(mm/memory.c:1327): No description found for parameter 'tlb' Warning(mm/memory.c:1327): Excess function parameter 'tlbp' description in 'unmap_vmas' Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 6953d3926e0..b13e7dbc399 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1296,7 +1296,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb, /** * unmap_vmas - unmap a range of memory covered by a list of vma's - * @tlbp: address of the caller's struct mmu_gather + * @tlb: address of the caller's struct mmu_gather * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping -- cgit v1.2.3-70-g09d2 From 5f1a19070b16c20cdc71ed0e981bfa19f8f6a4ee Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 15 Jun 2011 15:08:23 -0700 Subject: mm: fix wrong kunmap_atomic() pointer Running a ktest.pl test, I hit the following bug on x86_32: ------------[ cut here ]------------ WARNING: at arch/x86/mm/highmem_32.c:81 __kunmap_atomic+0x64/0xc1() Hardware name: Modules linked in: Pid: 93, comm: sh Not tainted 2.6.39-test+ #1 Call Trace: [] warn_slowpath_common+0x7c/0x91 [] ? __kunmap_atomic+0x64/0xc1 [] ? __kunmap_atomic+0x64/0xc1^M [] warn_slowpath_null+0x22/0x24 [] __kunmap_atomic+0x64/0xc1 [] unmap_vmas+0x43a/0x4e0 [] exit_mmap+0x91/0xd2 [] mmput+0x43/0xad [] exit_mm+0x111/0x119 [] do_exit+0x1ff/0x5fa [] ? set_current_blocked+0x3c/0x40 [] ? sigprocmask+0x7e/0x8e [] do_group_exit+0x65/0x88 [] sys_exit_group+0x18/0x1c [] sysenter_do_call+0x12/0x38 ---[ end trace 8055f74ea3c0eb62 ]--- Running a ktest.pl git bisect, found the culprit: commit e303297e6c3a ("mm: extended batches for generic mmu_gather") But although this was the commit triggering the bug, it was not the one originally responsible for the bug. That was commit d16dfc550f53 ("mm: mmu_gather rework"). The code in zap_pte_range() has something that looks like the following: pte = pte_offset_map_lock(mm, pmd, addr, &ptl); do { [...] } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(pte - 1, ptl); The pte starts off pointing at the first element in the page table directory that was returned by the pte_offset_map_lock(). When it's done with the page, pte will be pointing to anything between the next entry and the first entry of the next page inclusive. By doing a pte - 1, this puts the pte back onto the original page, which is all that pte_unmap_unlock() needs. In most archs (64 bit), this is not an issue as the pte is ignored in the pte_unmap_unlock(). But on 32 bit archs, where things may be kmapped, it is essential that the pte passed to pte_unmap_unlock() resides on the same page that was given by pte_offest_map_lock(). The problem came in d16dfc55 ("mm: mmu_gather rework") where it introduced a "break;" from the while loop. This alone did not seem to easily trigger the bug. But the modifications made by e303297e6 caused that "break;" to be hit on the first iteration, before the pte++. The pte not being incremented will now cause pte_unmap_unlock(pte - 1) to be pointing to the previous page. This will cause the wrong page to be unmapped, and also trigger the warning above. The simple solution is to just save the pointer given by pte_offset_map_lock() and use it in the unlock. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: KAMEZAWA Hiroyuki Acked-by: Hugh Dickins Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index b13e7dbc399..87d935333f0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1112,11 +1112,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, int force_flush = 0; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; + pte_t *start_pte; pte_t *pte; again: init_rss_vec(rss); - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte = start_pte; arch_enter_lazy_mmu_mode(); do { pte_t ptent = *pte; @@ -1196,7 +1198,7 @@ again: add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(pte - 1, ptl); + pte_unmap_unlock(start_pte, ptl); /* * mmu_gather ran out of room to batch pages, we break out of -- cgit v1.2.3-70-g09d2 From 5b8ba10198a109f8a02380648c5d29000caa9c55 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 27 Jun 2011 16:18:01 -0700 Subject: mm: move vmtruncate_range to truncate.c You would expect to find vmtruncate_range() next to vmtruncate() in mm/truncate.c: move it there. Signed-off-by: Hugh Dickins Acked-by: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 24 ------------------------ mm/truncate.c | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+), 24 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 87d935333f0..40b7531ee8b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2798,30 +2798,6 @@ void unmap_mapping_range(struct address_space *mapping, } EXPORT_SYMBOL(unmap_mapping_range); -int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) -{ - struct address_space *mapping = inode->i_mapping; - - /* - * If the underlying filesystem is not going to provide - * a way to truncate a range of blocks (punch a hole) - - * we should return failure right now. - */ - if (!inode->i_op->truncate_range) - return -ENOSYS; - - mutex_lock(&inode->i_mutex); - down_write(&inode->i_alloc_sem); - unmap_mapping_range(mapping, offset, (end - offset), 1); - truncate_inode_pages_range(mapping, offset, end); - unmap_mapping_range(mapping, offset, (end - offset), 1); - inode->i_op->truncate_range(inode, offset, end); - up_write(&inode->i_alloc_sem); - mutex_unlock(&inode->i_mutex); - - return 0; -} - /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. diff --git a/mm/truncate.c b/mm/truncate.c index 3a29a618021..5b4c3a4847e 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -603,3 +603,27 @@ int vmtruncate(struct inode *inode, loff_t offset) return 0; } EXPORT_SYMBOL(vmtruncate); + +int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) +{ + struct address_space *mapping = inode->i_mapping; + + /* + * If the underlying filesystem is not going to provide + * a way to truncate a range of blocks (punch a hole) - + * we should return failure right now. + */ + if (!inode->i_op->truncate_range) + return -ENOSYS; + + mutex_lock(&inode->i_mutex); + down_write(&inode->i_alloc_sem); + unmap_mapping_range(mapping, offset, (end - offset), 1); + truncate_inode_pages_range(mapping, offset, end); + unmap_mapping_range(mapping, offset, (end - offset), 1); + inode->i_op->truncate_range(inode, offset, end); + up_write(&inode->i_alloc_sem); + mutex_unlock(&inode->i_mutex); + + return 0; +} -- cgit v1.2.3-70-g09d2 From 0b43c3aab0137595335b08b340a3f3e5af9818a6 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 8 Jul 2011 15:39:41 -0700 Subject: mm: __tlb_remove_page() check the correct batch __tlb_remove_page() switches to a new batch page, but still checks space in the old batch. This check always fails, and causes a forced tlb flush. Signed-off-by: Shaohua Li Acked-by: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 40b7531ee8b..9b8a01d941c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -305,6 +305,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return 0; + batch = tlb->active; } VM_BUG_ON(batch->nr > batch->max); -- cgit v1.2.3-70-g09d2