summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-07-03 10:25:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-07-03 10:25:08 -0400
commit0a1340c185734a57fbf4775927966ad4a1347b02 (patch)
treed9ed8f0dd809a7c542a3356601125ea5b5aaa804 /mm/page_alloc.c
parentaf18ddb8864b096e3ed4732e2d4b21c956dcfe3a (diff)
parent29454dde27d8e340bb1987bad9aa504af7081eba (diff)
Merge rsync://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/linux/kernel.h
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c670
1 files changed, 135 insertions, 535 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 253a450c400..3e792a583f3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -14,7 +14,6 @@
* (lots of bits borrowed from Ingo Molnar & Andrew Morton)
*/
-#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
@@ -37,6 +36,7 @@
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
#include <linux/mempolicy.h>
+#include <linux/stop_machine.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -83,8 +83,8 @@ EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
int min_free_kbytes = 1024;
-unsigned long __initdata nr_kernel_pages;
-unsigned long __initdata nr_all_pages;
+unsigned long __meminitdata nr_kernel_pages;
+unsigned long __meminitdata nr_all_pages;
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
@@ -265,7 +265,7 @@ static inline void rmv_page_order(struct page *page)
* satisfies the following equation:
* P = B & ~(1 << O)
*
- * Assumption: *_mem_map is contigious at least up to MAX_ORDER
+ * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
*/
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
@@ -286,22 +286,27 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
* we can do coalesce a page and its buddy if
* (a) the buddy is not in a hole &&
* (b) the buddy is in the buddy system &&
- * (c) a page and its buddy have the same order.
+ * (c) a page and its buddy have the same order &&
+ * (d) a page and its buddy are in the same zone.
*
* For recording whether a page is in the buddy system, we use PG_buddy.
* Setting, clearing, and testing PG_buddy is serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
-static inline int page_is_buddy(struct page *page, int order)
+static inline int page_is_buddy(struct page *page, struct page *buddy,
+ int order)
{
#ifdef CONFIG_HOLES_IN_ZONE
- if (!pfn_valid(page_to_pfn(page)))
+ if (!pfn_valid(page_to_pfn(buddy)))
return 0;
#endif
- if (PageBuddy(page) && page_order(page) == order) {
- BUG_ON(page_count(page) != 0);
+ if (page_zone_id(page) != page_zone_id(buddy))
+ return 0;
+
+ if (PageBuddy(buddy) && page_order(buddy) == order) {
+ BUG_ON(page_count(buddy) != 0);
return 1;
}
return 0;
@@ -352,7 +357,7 @@ static inline void __free_one_page(struct page *page,
struct page *buddy;
buddy = __page_find_buddy(page, page_idx, order);
- if (!page_is_buddy(buddy, order))
+ if (!page_is_buddy(page, buddy, order))
break; /* Move the buddy up one level. */
list_del(&buddy->lru);
@@ -440,8 +445,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
arch_free_page(page, order);
if (!PageHighMem(page))
- mutex_debug_check_no_locks_freed(page_address(page),
- PAGE_SIZE<<order);
+ debug_check_no_locks_freed(page_address(page),
+ PAGE_SIZE<<order);
for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
@@ -450,7 +455,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
kernel_map_pages(page, 1 << order, 0);
local_irq_save(flags);
- __mod_page_state(pgfree, 1 << order);
+ __count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order);
local_irq_restore(flags);
}
@@ -703,27 +708,6 @@ void drain_local_pages(void)
}
#endif /* CONFIG_PM */
-static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
-{
-#ifdef CONFIG_NUMA
- pg_data_t *pg = z->zone_pgdat;
- pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
- struct per_cpu_pageset *p;
-
- p = zone_pcp(z, cpu);
- if (pg == orig) {
- p->numa_hit++;
- } else {
- p->numa_miss++;
- zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
- }
- if (pg == NODE_DATA(numa_node_id()))
- p->local_node++;
- else
- p->other_node++;
-#endif
-}
-
/*
* Free a 0-order page
*/
@@ -744,7 +728,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
local_irq_save(flags);
- __inc_page_state(pgfree);
+ __count_vm_event(PGFREE);
list_add(&page->lru, &pcp->list);
pcp->count++;
if (pcp->count >= pcp->high) {
@@ -820,8 +804,8 @@ again:
goto failed;
}
- __mod_page_state_zone(zone, pgalloc, 1 << order);
- zone_statistics(zonelist, zone, cpu);
+ __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ zone_statistics(zonelist, zone);
local_irq_restore(flags);
put_cpu();
@@ -951,8 +935,7 @@ restart:
goto got_pg;
do {
- if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
- wakeup_kswapd(*z, order);
+ wakeup_kswapd(*z, order);
} while (*(++z));
/*
@@ -1226,141 +1209,6 @@ static void show_node(struct zone *zone)
#define show_node(zone) do { } while (0)
#endif
-/*
- * Accumulate the page_state information across all CPUs.
- * The result is unavoidably approximate - it can change
- * during and after execution of this function.
- */
-static DEFINE_PER_CPU(struct page_state, page_states) = {0};
-
-atomic_t nr_pagecache = ATOMIC_INIT(0);
-EXPORT_SYMBOL(nr_pagecache);
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
-#endif
-
-static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
-{
- unsigned cpu;
-
- memset(ret, 0, nr * sizeof(unsigned long));
- cpus_and(*cpumask, *cpumask, cpu_online_map);
-
- for_each_cpu_mask(cpu, *cpumask) {
- unsigned long *in;
- unsigned long *out;
- unsigned off;
- unsigned next_cpu;
-
- in = (unsigned long *)&per_cpu(page_states, cpu);
-
- next_cpu = next_cpu(cpu, *cpumask);
- if (likely(next_cpu < NR_CPUS))
- prefetch(&per_cpu(page_states, next_cpu));
-
- out = (unsigned long *)ret;
- for (off = 0; off < nr; off++)
- *out++ += *in++;
- }
-}
-
-void get_page_state_node(struct page_state *ret, int node)
-{
- int nr;
- cpumask_t mask = node_to_cpumask(node);
-
- nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
- nr /= sizeof(unsigned long);
-
- __get_page_state(ret, nr+1, &mask);
-}
-
-void get_page_state(struct page_state *ret)
-{
- int nr;
- cpumask_t mask = CPU_MASK_ALL;
-
- nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
- nr /= sizeof(unsigned long);
-
- __get_page_state(ret, nr + 1, &mask);
-}
-
-void get_full_page_state(struct page_state *ret)
-{
- cpumask_t mask = CPU_MASK_ALL;
-
- __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
-}
-
-unsigned long read_page_state_offset(unsigned long offset)
-{
- unsigned long ret = 0;
- int cpu;
-
- for_each_online_cpu(cpu) {
- unsigned long in;
-
- in = (unsigned long)&per_cpu(page_states, cpu) + offset;
- ret += *((unsigned long *)in);
- }
- return ret;
-}
-
-void __mod_page_state_offset(unsigned long offset, unsigned long delta)
-{
- void *ptr;
-
- ptr = &__get_cpu_var(page_states);
- *(unsigned long *)(ptr + offset) += delta;
-}
-EXPORT_SYMBOL(__mod_page_state_offset);
-
-void mod_page_state_offset(unsigned long offset, unsigned long delta)
-{
- unsigned long flags;
- void *ptr;
-
- local_irq_save(flags);
- ptr = &__get_cpu_var(page_states);
- *(unsigned long *)(ptr + offset) += delta;
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(mod_page_state_offset);
-
-void __get_zone_counts(unsigned long *active, unsigned long *inactive,
- unsigned long *free, struct pglist_data *pgdat)
-{
- struct zone *zones = pgdat->node_zones;
- int i;
-
- *active = 0;
- *inactive = 0;
- *free = 0;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- *active += zones[i].nr_active;
- *inactive += zones[i].nr_inactive;
- *free += zones[i].free_pages;
- }
-}
-
-void get_zone_counts(unsigned long *active,
- unsigned long *inactive, unsigned long *free)
-{
- struct pglist_data *pgdat;
-
- *active = 0;
- *inactive = 0;
- *free = 0;
- for_each_online_pgdat(pgdat) {
- unsigned long l, m, n;
- __get_zone_counts(&l, &m, &n, pgdat);
- *active += l;
- *inactive += m;
- *free += n;
- }
-}
-
void si_meminfo(struct sysinfo *val)
{
val->totalram = totalram_pages;
@@ -1401,7 +1249,6 @@ void si_meminfo_node(struct sysinfo *val, int nid)
*/
void show_free_areas(void)
{
- struct page_state ps;
int cpu, temperature;
unsigned long active;
unsigned long inactive;
@@ -1433,7 +1280,6 @@ void show_free_areas(void)
}
}
- get_page_state(&ps);
get_zone_counts(&active, &inactive, &free);
printk("Free pages: %11ukB (%ukB HighMem)\n",
@@ -1444,13 +1290,13 @@ void show_free_areas(void)
"unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
active,
inactive,
- ps.nr_dirty,
- ps.nr_writeback,
- ps.nr_unstable,
+ global_page_state(NR_FILE_DIRTY),
+ global_page_state(NR_WRITEBACK),
+ global_page_state(NR_UNSTABLE_NFS),
nr_free_pages(),
- ps.nr_slab,
- ps.nr_mapped,
- ps.nr_page_table_pages);
+ global_page_state(NR_SLAB),
+ global_page_state(NR_FILE_MAPPED),
+ global_page_state(NR_PAGETABLE));
for_each_zone(zone) {
int i;
@@ -1485,7 +1331,7 @@ void show_free_areas(void)
}
for_each_zone(zone) {
- unsigned long nr, flags, order, total = 0;
+ unsigned long nr[MAX_ORDER], flags, order, total = 0;
show_node(zone);
printk("%s: ", zone->name);
@@ -1496,11 +1342,12 @@ void show_free_areas(void)
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
- nr = zone->free_area[order].nr_free;
- total += nr << order;
- printk("%lu*%lukB ", nr, K(1UL) << order);
+ nr[order] = zone->free_area[order].nr_free;
+ total += nr[order] << order;
}
spin_unlock_irqrestore(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++)
+ printk("%lu*%lukB ", nr[order], K(1UL) << order);
printk("= %lukB\n", K(total));
}
@@ -1512,7 +1359,7 @@ void show_free_areas(void)
*
* Add all populated zones of a node to the zonelist.
*/
-static int __init build_zonelists_node(pg_data_t *pgdat,
+static int __meminit build_zonelists_node(pg_data_t *pgdat,
struct zonelist *zonelist, int nr_zones, int zone_type)
{
struct zone *zone;
@@ -1548,7 +1395,7 @@ static inline int highest_zone(int zone_bits)
#ifdef CONFIG_NUMA
#define MAX_NODE_LOAD (num_online_nodes())
-static int __initdata node_load[MAX_NUMNODES];
+static int __meminitdata node_load[MAX_NUMNODES];
/**
* find_next_best_node - find the next node that should appear in a given node's fallback list
* @node: node whose fallback list we're appending
@@ -1563,7 +1410,7 @@ static int __initdata node_load[MAX_NUMNODES];
* on them otherwise.
* It returns -1 if no node is found.
*/
-static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
+static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
{
int n, val;
int min_val = INT_MAX;
@@ -1609,7 +1456,7 @@ static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
return best_node;
}
-static void __init build_zonelists(pg_data_t *pgdat)
+static void __meminit build_zonelists(pg_data_t *pgdat)
{
int i, j, k, node, local_node;
int prev_node, load;
@@ -1661,7 +1508,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
#else /* CONFIG_NUMA */
-static void __init build_zonelists(pg_data_t *pgdat)
+static void __meminit build_zonelists(pg_data_t *pgdat)
{
int i, j, k, node, local_node;
@@ -1699,14 +1546,29 @@ static void __init build_zonelists(pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
-void __init build_all_zonelists(void)
+/* return values int ....just for stop_machine_run() */
+static int __meminit __build_all_zonelists(void *dummy)
{
- int i;
+ int nid;
+ for_each_online_node(nid)
+ build_zonelists(NODE_DATA(nid));
+ return 0;
+}
- for_each_online_node(i)
- build_zonelists(NODE_DATA(i));
- printk("Built %i zonelists\n", num_online_nodes());
- cpuset_init_current_mems_allowed();
+void __meminit build_all_zonelists(void)
+{
+ if (system_state == SYSTEM_BOOTING) {
+ __build_all_zonelists(0);
+ cpuset_init_current_mems_allowed();
+ } else {
+ /* we have to stop all cpus to guaranntee there is no user
+ of zonelist */
+ stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
+ /* cpuset refresh routine should be here */
+ }
+ vm_total_pages = nr_free_pagecache_pages();
+ printk("Built %i zonelists. Total pages: %ld\n",
+ num_online_nodes(), vm_total_pages);
}
/*
@@ -1722,7 +1584,8 @@ void __init build_all_zonelists(void)
*/
#define PAGES_PER_WAITQUEUE 256
-static inline unsigned long wait_table_size(unsigned long pages)
+#ifndef CONFIG_MEMORY_HOTPLUG
+static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
{
unsigned long size = 1;
@@ -1740,6 +1603,29 @@ static inline unsigned long wait_table_size(unsigned long pages)
return max(size, 4UL);
}
+#else
+/*
+ * A zone's size might be changed by hot-add, so it is not possible to determine
+ * a suitable size for its wait_table. So we use the maximum size now.
+ *
+ * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
+ *
+ * i386 (preemption config) : 4096 x 16 = 64Kbyte.
+ * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
+ * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
+ *
+ * The maximum entries are prepared when a zone's memory is (512K + 256) pages
+ * or more by the traditional way. (See above). It equals:
+ *
+ * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
+ * ia64(16K page size) : = ( 8G + 4M)byte.
+ * powerpc (64K page size) : = (32G +16M)byte.
+ */
+static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
+{
+ return 4096UL;
+}
+#endif
/*
* This is an integer logarithm so that shifts can be used later
@@ -1964,7 +1850,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int pageset_cpuup_callback(struct notifier_block *nfb,
+static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -1986,7 +1872,7 @@ static int pageset_cpuup_callback(struct notifier_block *nfb,
return ret;
}
-static struct notifier_block pageset_notifier =
+static struct notifier_block __cpuinitdata pageset_notifier =
{ &pageset_cpuup_callback, NULL, 0 };
void __init setup_per_cpu_pageset(void)
@@ -2005,23 +1891,46 @@ void __init setup_per_cpu_pageset(void)
#endif
static __meminit
-void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
+int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
struct pglist_data *pgdat = zone->zone_pgdat;
+ size_t alloc_size;
/*
* The per-page waitqueue mechanism uses hashed waitqueues
* per zone.
*/
- zone->wait_table_size = wait_table_size(zone_size_pages);
- zone->wait_table_bits = wait_table_bits(zone->wait_table_size);
- zone->wait_table = (wait_queue_head_t *)
- alloc_bootmem_node(pgdat, zone->wait_table_size
- * sizeof(wait_queue_head_t));
+ zone->wait_table_hash_nr_entries =
+ wait_table_hash_nr_entries(zone_size_pages);
+ zone->wait_table_bits =
+ wait_table_bits(zone->wait_table_hash_nr_entries);
+ alloc_size = zone->wait_table_hash_nr_entries
+ * sizeof(wait_queue_head_t);
+
+ if (system_state == SYSTEM_BOOTING) {
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, alloc_size);
+ } else {
+ /*
+ * This case means that a zone whose size was 0 gets new memory
+ * via memory hot-add.
+ * But it may be the case that a new node was hot-added. In
+ * this case vmalloc() will not be able to use this new node's
+ * memory - this wait_table must be initialized to use this new
+ * node itself as well.
+ * To use this new node's memory, further consideration will be
+ * necessary.
+ */
+ zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
+ }
+ if (!zone->wait_table)
+ return -ENOMEM;
- for(i = 0; i < zone->wait_table_size; ++i)
+ for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i);
+
+ return 0;
}
static __meminit void zone_pcp_init(struct zone *zone)
@@ -2043,12 +1952,15 @@ static __meminit void zone_pcp_init(struct zone *zone)
zone->name, zone->present_pages, batch);
}
-static __meminit void init_currently_empty_zone(struct zone *zone,
- unsigned long zone_start_pfn, unsigned long size)
+__meminit int init_currently_empty_zone(struct zone *zone,
+ unsigned long zone_start_pfn,
+ unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
-
- zone_wait_table_init(zone, size);
+ int ret;
+ ret = zone_wait_table_init(zone, size);
+ if (ret)
+ return ret;
pgdat->nr_zones = zone_idx(zone) + 1;
zone->zone_start_pfn = zone_start_pfn;
@@ -2056,6 +1968,8 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+
+ return 0;
}
/*
@@ -2064,12 +1978,13 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
* - mark all memory queues empty
* - clear the memory bitmaps
*/
-static void __init free_area_init_core(struct pglist_data *pgdat,
+static void __meminit free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long j;
int nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
+ int ret;
pgdat_resize_init(pgdat);
pgdat->nr_zones = 0;
@@ -2106,12 +2021,14 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->nr_scan_inactive = 0;
zone->nr_active = 0;
zone->nr_inactive = 0;
+ zap_zone_vm_stats(zone);
atomic_set(&zone->reclaim_in_progress, 0);
if (!size)
continue;
zonetable_add(zone, nid, j, zone_start_pfn, size);
- init_currently_empty_zone(zone, zone_start_pfn, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+ BUG_ON(ret);
zone_start_pfn += size;
}
}
@@ -2152,7 +2069,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
}
-void __init free_area_init_node(int nid, struct pglist_data *pgdat,
+void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long node_start_pfn,
unsigned long *zholes_size)
{
@@ -2178,307 +2095,18 @@ void __init free_area_init(unsigned long *zones_size)
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
}
-#ifdef CONFIG_PROC_FS
-
-#include <linux/seq_file.h>
-
-static void *frag_start(struct seq_file *m, loff_t *pos)
-{
- pg_data_t *pgdat;
- loff_t node = *pos;
- for (pgdat = first_online_pgdat();
- pgdat && node;
- pgdat = next_online_pgdat(pgdat))
- --node;
-
- return pgdat;
-}
-
-static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
-{
- pg_data_t *pgdat = (pg_data_t *)arg;
-
- (*pos)++;
- return next_online_pgdat(pgdat);
-}
-
-static void frag_stop(struct seq_file *m, void *arg)
-{
-}
-
-/*
- * This walks the free areas for each zone.
- */
-static int frag_show(struct seq_file *m, void *arg)
-{
- pg_data_t *pgdat = (pg_data_t *)arg;
- struct zone *zone;
- struct zone *node_zones = pgdat->node_zones;
- unsigned long flags;
- int order;
-
- for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
- if (!populated_zone(zone))
- continue;
-
- spin_lock_irqsave(&zone->lock, flags);
- seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
- for (order = 0; order < MAX_ORDER; ++order)
- seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
- spin_unlock_irqrestore(&zone->lock, flags);
- seq_putc(m, '\n');
- }
- return 0;
-}
-
-struct seq_operations fragmentation_op = {
- .start = frag_start,
- .next = frag_next,
- .stop = frag_stop,
- .show = frag_show,
-};
-
-/*
- * Output information about zones in @pgdat.
- */
-static int zoneinfo_show(struct seq_file *m, void *arg)
-{
- pg_data_t *pgdat = arg;
- struct zone *zone;
- struct zone *node_zones = pgdat->node_zones;
- unsigned long flags;
-
- for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
- int i;
-
- if (!populated_zone(zone))
- continue;
-
- spin_lock_irqsave(&zone->lock, flags);
- seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
- seq_printf(m,
- "\n pages free %lu"
- "\n min %lu"
- "\n low %lu"
- "\n high %lu"
- "\n active %lu"
- "\n inactive %lu"
- "\n scanned %lu (a: %lu i: %lu)"
- "\n spanned %lu"
- "\n present %lu",
- zone->free_pages,
- zone->pages_min,
- zone->pages_low,
- zone->pages_high,
- zone->nr_active,
- zone->nr_inactive,
- zone->pages_scanned,
- zone->nr_scan_active, zone->nr_scan_inactive,
- zone->spanned_pages,
- zone->present_pages);
- seq_printf(m,
- "\n protection: (%lu",
- zone->lowmem_reserve[0]);
- for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
- seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
- seq_printf(m,
- ")"
- "\n pagesets");
- for_each_online_cpu(i) {
- struct per_cpu_pageset *pageset;
- int j;
-
- pageset = zone_pcp(zone, i);
- for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
- if (pageset->pcp[j].count)
- break;
- }
- if (j == ARRAY_SIZE(pageset->pcp))
- continue;
- for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
- seq_printf(m,
- "\n cpu: %i pcp: %i"
- "\n count: %i"
- "\n high: %i"
- "\n batch: %i",
- i, j,
- pageset->pcp[j].count,
- pageset->pcp[j].high,
- pageset->pcp[j].batch);
- }
-#ifdef CONFIG_NUMA
- seq_printf(m,
- "\n numa_hit: %lu"
- "\n numa_miss: %lu"
- "\n numa_foreign: %lu"
- "\n interleave_hit: %lu"
- "\n local_node: %lu"
- "\n other_node: %lu",
- pageset->numa_hit,
- pageset->numa_miss,
- pageset->numa_foreign,
- pageset->interleave_hit,
- pageset->local_node,
- pageset->other_node);
-#endif
- }
- seq_printf(m,
- "\n all_unreclaimable: %u"
- "\n prev_priority: %i"
- "\n temp_priority: %i"
- "\n start_pfn: %lu",
- zone->all_unreclaimable,
- zone->prev_priority,
- zone->temp_priority,
- zone->zone_start_pfn);
- spin_unlock_irqrestore(&zone->lock, flags);
- seq_putc(m, '\n');
- }
- return 0;
-}
-
-struct seq_operations zoneinfo_op = {
- .start = frag_start, /* iterate over all zones. The same as in
- * fragmentation. */
- .next = frag_next,
- .stop = frag_stop,
- .show = zoneinfo_show,
-};
-
-static char *vmstat_text[] = {
- "nr_dirty",
- "nr_writeback",
- "nr_unstable",
- "nr_page_table_pages",
- "nr_mapped",
- "nr_slab",
-
- "pgpgin",
- "pgpgout",
- "pswpin",
- "pswpout",
-
- "pgalloc_high",
- "pgalloc_normal",
- "pgalloc_dma32",
- "pgalloc_dma",
-
- "pgfree",
- "pgactivate",
- "pgdeactivate",
-
- "pgfault",
- "pgmajfault",
-
- "pgrefill_high",
- "pgrefill_normal",
- "pgrefill_dma32",
- "pgrefill_dma",
-
- "pgsteal_high",
- "pgsteal_normal",
- "pgsteal_dma32",
- "pgsteal_dma",
-
- "pgscan_kswapd_high",
- "pgscan_kswapd_normal",
- "pgscan_kswapd_dma32",
- "pgscan_kswapd_dma",
-
- "pgscan_direct_high",
- "pgscan_direct_normal",
- "pgscan_direct_dma32",
- "pgscan_direct_dma",
-
- "pginodesteal",
- "slabs_scanned",
- "kswapd_steal",
- "kswapd_inodesteal",
- "pageoutrun",
- "allocstall",
-
- "pgrotated",
- "nr_bounce",
-};
-
-static void *vmstat_start(struct seq_file *m, loff_t *pos)
-{
- struct page_state *ps;
-
- if (*pos >= ARRAY_SIZE(vmstat_text))
- return NULL;
-
- ps = kmalloc(sizeof(*ps), GFP_KERNEL);
- m->private = ps;
- if (!ps)
- return ERR_PTR(-ENOMEM);
- get_full_page_state(ps);
- ps->pgpgin /= 2; /* sectors -> kbytes */
- ps->pgpgout /= 2;
- return (unsigned long *)ps + *pos;
-}
-
-static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
-{
- (*pos)++;
- if (*pos >= ARRAY_SIZE(vmstat_text))
- return NULL;
- return (unsigned long *)m->private + *pos;
-}
-
-static int vmstat_show(struct seq_file *m, void *arg)
-{
- unsigned long *l = arg;
- unsigned long off = l - (unsigned long *)m->private;
-
- seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
- return 0;
-}
-
-static void vmstat_stop(struct seq_file *m, void *arg)
-{
- kfree(m->private);
- m->private = NULL;
-}
-
-struct seq_operations vmstat_op = {
- .start = vmstat_start,
- .next = vmstat_next,
- .stop = vmstat_stop,
- .show = vmstat_show,
-};
-
-#endif /* CONFIG_PROC_FS */
-
#ifdef CONFIG_HOTPLUG_CPU
static int page_alloc_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int cpu = (unsigned long)hcpu;
- long *count;
- unsigned long *src, *dest;
if (action == CPU_DEAD) {
- int i;
-
- /* Drain local pagecache count. */
- count = &per_cpu(nr_pagecache_local, cpu);
- atomic_add(*count, &nr_pagecache);
- *count = 0;
local_irq_disable();
__drain_pages(cpu);
-
- /* Add dead cpu's page_states to our own. */
- dest = (unsigned long *)&__get_cpu_var(page_states);
- src = (unsigned long *)&per_cpu(page_states, cpu);
-
- for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
- i++) {
- dest[i] += src[i];
- src[i] = 0;
- }
-
+ vm_events_fold_cpu(cpu);
local_irq_enable();
+ refresh_cpu_vm_stats(cpu);
}
return NOTIFY_OK;
}
@@ -2804,42 +2432,14 @@ void *__init alloc_large_system_hash(const char *tablename,
}
#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
-/*
- * pfn <-> page translation. out-of-line version.
- * (see asm-generic/memory_model.h)
- */
-#if defined(CONFIG_FLATMEM)
-struct page *pfn_to_page(unsigned long pfn)
-{
- return mem_map + (pfn - ARCH_PFN_OFFSET);
-}
-unsigned long page_to_pfn(struct page *page)
-{
- return (page - mem_map) + ARCH_PFN_OFFSET;
-}
-#elif defined(CONFIG_DISCONTIGMEM)
-struct page *pfn_to_page(unsigned long pfn)
-{
- int nid = arch_pfn_to_nid(pfn);
- return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid);
-}
-unsigned long page_to_pfn(struct page *page)
-{
- struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
- return (page - pgdat->node_mem_map) + pgdat->node_start_pfn;
-}
-#elif defined(CONFIG_SPARSEMEM)
struct page *pfn_to_page(unsigned long pfn)
{
- return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn;
+ return __pfn_to_page(pfn);
}
-
unsigned long page_to_pfn(struct page *page)
{
- long section_id = page_to_section(page);
- return page - __section_mem_map_addr(__nr_to_section(section_id));
+ return __page_to_pfn(page);
}
-#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */
EXPORT_SYMBOL(pfn_to_page);
EXPORT_SYMBOL(page_to_pfn);
#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */