diff options
author | Yasunori Goto <y-goto@jp.fujitsu.com> | 2006-06-23 02:03:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 07:42:46 -0700 |
commit | 02b694dea473ad3db1e2d1b14c1fef8fbd92e5e6 (patch) | |
tree | a39451192b589486d70f4d989f9b7f19ef889db0 | |
parent | 3c5a87f476bed45616e7e543dcaea4440c77bf93 (diff) |
[PATCH] wait_table and zonelist initializing for memory hotadd: change name of wait_table_size()
This is just to rename from wait_table_size() to wait_table_hash_nr_entries().
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/mmzone.h | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 12 |
2 files changed, 9 insertions, 7 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9742e3c1622..652673ea92f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -197,7 +197,7 @@ struct zone { /* * wait_table -- the array holding the hash table - * wait_table_size -- the size of the hash table array + * wait_table_hash_nr_entries -- the size of the hash table array * wait_table_bits -- wait_table_size == (1 << wait_table_bits) * * The purpose of all these is to keep track of the people @@ -220,7 +220,7 @@ struct zone { * free_area_init_core() performs the initialization of them. */ wait_queue_head_t * wait_table; - unsigned long wait_table_size; + unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fd631c2536a..27320a0542d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1727,7 +1727,7 @@ void __init build_all_zonelists(void) */ #define PAGES_PER_WAITQUEUE 256 -static inline unsigned long wait_table_size(unsigned long pages) +static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) { unsigned long size = 1; @@ -2019,13 +2019,15 @@ void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) * The per-page waitqueue mechanism uses hashed waitqueues * per zone. */ - zone->wait_table_size = wait_table_size(zone_size_pages); - zone->wait_table_bits = wait_table_bits(zone->wait_table_size); + zone->wait_table_hash_nr_entries = + wait_table_hash_nr_entries(zone_size_pages); + zone->wait_table_bits = + wait_table_bits(zone->wait_table_hash_nr_entries); zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node(pgdat, zone->wait_table_size + alloc_bootmem_node(pgdat, zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t)); - for(i = 0; i < zone->wait_table_size; ++i) + for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) init_waitqueue_head(zone->wait_table + i); } |