diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2014-10-09 15:28:39 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:58 -0400 |
commit | 96dad67ff244e797c4bc3e4f7f0fdaa0cfdf0a7d (patch) | |
tree | ea45352bf168845bf42d0d656b5d8bdacc55d8e8 | |
parent | 31c9afa6db122a5c7a7843278aaf77dd08ea6e98 (diff) |
mm: use VM_BUG_ON_MM where possible
Dump the contents of the relevant struct_mm when we hit the bug condition.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/fork.c | 3 | ||||
-rw-r--r-- | kernel/sys.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 2 | ||||
-rw-r--r-- | mm/mlock.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 7 | ||||
-rw-r--r-- | mm/pagewalk.c | 2 |
6 files changed, 9 insertions, 9 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index a91e47d86de..8c162d10274 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -601,9 +601,8 @@ static void check_mm(struct mm_struct *mm) printk(KERN_ALERT "BUG: Bad rss-counter state " "mm:%p idx:%d val:%ld\n", mm, i, x); } - #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS - VM_BUG_ON(mm->pmd_huge_pte); + VM_BUG_ON_MM(mm->pmd_huge_pte, mm); #endif } diff --git a/kernel/sys.c b/kernel/sys.c index f7030b06001..df692fbf1e7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1634,7 +1634,7 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) struct inode *inode; int err; - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); exe = fdget(fd); if (!exe.file) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c13148cc745..74c78aa8bc2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2048,7 +2048,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON(khugepaged_test_exit(mm)); + VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; diff --git a/mm/mlock.c b/mm/mlock.c index d5d09d0786e..03aa8512723 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -235,7 +235,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_MLOCK; /* diff --git a/mm/mmap.c b/mm/mmap.c index c9bc285df25..16d19b48e2a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -410,8 +410,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); - BUG_ON(vma != ignore && - vma->rb_subtree_gap != vma_compute_subtree_gap(vma)); + VM_BUG_ON_VMA(vma != ignore && + vma->rb_subtree_gap != vma_compute_subtree_gap(vma), + vma); } } @@ -448,7 +449,7 @@ static void validate_mm(struct mm_struct *mm) pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } - BUG_ON(bug); + VM_BUG_ON_MM(bug, mm); } #else #define validate_mm_rb(root, ignore) do { } while (0) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 2beeabf502c..ad83195521f 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -177,7 +177,7 @@ int walk_page_range(unsigned long addr, unsigned long end, if (!walk->mm) return -EINVAL; - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); + VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); pgd = pgd_offset(walk->mm, addr); do { |