summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2008-04-28 02:13:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 08:58:23 -0700
commit3b1163006332302117b1b2acf226d4014ff46525 (patch)
treec9e43179620cfe612ce57795cb0b1e1ceb2c9d1b
parent122c7a59055c77434118d7dd4dff4b625d4a2c15 (diff)
Subject: [PATCH] hugetlb: vmstat events for huge page allocations
Allocating huge pages directly from the buddy allocator is not guaranteed to succeed. Success depends on several factors (such as the amount of physical memory available and the level of fragmentation). With the addition of dynamic hugetlb pool resizing, allocations can occur much more frequently. For these reasons it is desirable to keep track of huge page allocation successes and failures. Add two new vmstat entries to track huge page allocations that succeed and fail. The presence of the two entries is contingent upon CONFIG_HUGETLB_PAGE being enabled. [akpm@linux-foundation.org: reduced ifdeffery] Signed-off-by: Adam Litke <agl@us.ibm.com> Signed-off-by: Eric Munson <ebmunson@us.ibm.com> Tested-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Andy Whitcroft <apw@shadowen.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/vmstat.c4
3 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index e726b6d4649..e83b69346d2 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,6 +25,7 @@
#define HIGHMEM_ZONE(xx)
#endif
+
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
@@ -37,6 +38,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGSCAN_DIRECT),
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_HUGETLB_PAGE
+ HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
NR_VM_EVENT_ITEMS
};
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 93ea46a0fba..8deae4eb969 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -242,6 +242,11 @@ static int alloc_fresh_huge_page(void)
hugetlb_next_nid = next_nid;
} while (!page && hugetlb_next_nid != start_nid);
+ if (ret)
+ count_vm_event(HTLB_BUDDY_PGALLOC);
+ else
+ count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
return ret;
}
@@ -302,9 +307,11 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
*/
nr_huge_pages_node[nid]++;
surplus_huge_pages_node[nid]++;
+ __count_vm_event(HTLB_BUDDY_PGALLOC);
} else {
nr_huge_pages--;
surplus_huge_pages--;
+ __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
}
spin_unlock(&hugetlb_lock);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 879bcc0a1d4..4c21670f8d9 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -645,6 +645,10 @@ static const char * const vmstat_text[] = {
"allocstall",
"pgrotated",
+#ifdef CONFIG_HUGETLB_PAGE
+ "htlb_buddy_alloc_success",
+ "htlb_buddy_alloc_fail",
+#endif
#endif
};