summaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-05-10 13:01:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-10 15:06:44 -0700
commit16fbdce62d9c89b794e303f4a232e4749b77e9ac (patch)
treeb0088af87f70ceeef0fbe55bc2200b4f1b9aa816 /fs/proc
parentbc46f9375a286d05f84a9464efc2b7f1f5614ff4 (diff)
proc/pid/pagemap: correctly report non-present ptes and holes between vmas
Reset the current pagemap-entry if the current pte isn't present, or if current vma is over. Otherwise pagemap reports last entry again and again. Non-present pte reporting was broken in commit 092b50bacd1c ("pagemap: introduce data structure for pagemap entry") Reporting for holes was broken in commit 5aaabe831eb5 ("pagemap: avoid splitting thp when reading /proc/pid/pagemap") Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Reported-by: Pavel Emelyanov <xemul@parallels.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Andi Kleen <ak@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2d60492d6df..1030a716d15 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -747,6 +747,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
else if (pte_present(pte))
*pme = make_pme(PM_PFRAME(pte_pfn(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
+ else
+ *pme = make_pme(PM_NOT_PRESENT);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -761,6 +763,8 @@ static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
if (pmd_present(pmd))
*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
+ else
+ *pme = make_pme(PM_NOT_PRESENT);
}
#else
static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
@@ -801,8 +805,10 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
/* check to see if we've left 'vma' behind
* and need a new, higher one */
- if (vma && (addr >= vma->vm_end))
+ if (vma && (addr >= vma->vm_end)) {
vma = find_vma(walk->mm, addr);
+ pme = make_pme(PM_NOT_PRESENT);
+ }
/* check that 'vma' actually covers this address,
* and that it isn't a huge page vma */
@@ -830,6 +836,8 @@ static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
if (pte_present(pte))
*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
+ else
+ *pme = make_pme(PM_NOT_PRESENT);
}
/* This function walks within one hugetlb entry in the single call */
@@ -839,7 +847,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
{
struct pagemapread *pm = walk->private;
int err = 0;
- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
+ pagemap_entry_t pme;
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;