diff options
author | David Woodhouse <dwmw2@infradead.org> | 2006-05-24 09:22:21 +0100 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2006-05-24 09:22:21 +0100 |
commit | 66643de455c27973ac31ad6de9f859d399916842 (patch) | |
tree | 7ebed7f051879007d4b11d6aaa9e65a1bcb0b08f /mm/page_alloc.c | |
parent | 2c23d62abb820e19c54012520f08a198c2233a85 (diff) | |
parent | 387e2b0439026aa738a9edca15a57e5c0bcb4dfc (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
include/asm-powerpc/unistd.h
include/asm-sparc/unistd.h
include/asm-sparc64/unistd.h
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ea77c999047..253a450c400 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -39,6 +39,7 @@ #include <linux/mempolicy.h> #include <asm/tlbflush.h> +#include <asm/div64.h> #include "internal.h" /* @@ -950,7 +951,7 @@ restart: goto got_pg; do { - if (cpuset_zone_allowed(*z, gfp_mask)) + if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL)) wakeup_kswapd(*z, order); } while (*(++z)); @@ -969,7 +970,8 @@ restart: alloc_flags |= ALLOC_HARDER; if (gfp_mask & __GFP_HIGH) alloc_flags |= ALLOC_HIGH; - alloc_flags |= ALLOC_CPUSET; + if (wait) + alloc_flags |= ALLOC_CPUSET; /* * Go through the zonelist again. Let __GFP_HIGH and allocations @@ -2123,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) #ifdef CONFIG_FLAT_NODE_MEM_MAP /* ia64 gets its own node_mem_map, before this, without bootmem */ if (!pgdat->node_mem_map) { - unsigned long size; + unsigned long size, start, end; struct page *map; - size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); + /* + * The zone's endpoints aren't required to be MAX_ORDER + * aligned but the node_mem_map endpoints must be in order + * for the buddy allocator to function correctly. + */ + start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); + end = pgdat->node_start_pfn + pgdat->node_spanned_pages; + end = ALIGN(end, MAX_ORDER_NR_PAGES); + size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); if (!map) map = alloc_bootmem_node(pgdat, size); - pgdat->node_mem_map = map; + pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } #ifdef CONFIG_FLATMEM /* @@ -2566,9 +2576,11 @@ void setup_per_zone_pages_min(void) } for_each_zone(zone) { - unsigned long tmp; + u64 tmp; + spin_lock_irqsave(&zone->lru_lock, flags); - tmp = (pages_min * zone->present_pages) / lowmem_pages; + tmp = (u64)pages_min * zone->present_pages; + do_div(tmp, lowmem_pages); if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't @@ -2595,8 +2607,8 @@ void setup_per_zone_pages_min(void) zone->pages_min = tmp; } - zone->pages_low = zone->pages_min + tmp / 4; - zone->pages_high = zone->pages_min + tmp / 2; + zone->pages_low = zone->pages_min + (tmp >> 2); + zone->pages_high = zone->pages_min + (tmp >> 1); spin_unlock_irqrestore(&zone->lru_lock, flags); } |