From b1e7a8fd854d2f895730e82137400012b509650e Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 30 Jun 2006 01:55:39 -0700 Subject: [PATCH] zoned vm counters: conversion of nr_dirty to per zone counter This makes nr_dirty a per zone counter. Looping over all processors is avoided during writeback state determination. The counter aggregation for nr_dirty had to be undone in the NFS layer since we summed up the page counts from multiple zones. Someone more familiar with NFS should probably review what I have done. [akpm@osdl.org: bugfix] Signed-off-by: Christoph Lameter Cc: Trond Myklebust Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/mm/pgtable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/i386/mm/pgtable.c') diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index 0bb1e5c1344..aa211dcddb0 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -59,7 +59,7 @@ void show_mem(void) printk(KERN_INFO "%d pages swap cached\n", cached); get_page_state(&ps); - printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty); + printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback); printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); -- cgit v1.2.3-70-g09d2