From c50f68c8aea421267ba7995b1c485c281b28add6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 24 Mar 2008 20:50:48 +1100 Subject: [LMB] Add lmb_alloc_nid() A variant of lmb_alloc() that tries to allocate memory on a specified NUMA node 'nid' but falls back to normal lmb_alloc() if that fails. The caller provides a 'nid_range' function pointer which assists the allocator. It is given args 'start', 'end', and pointer to integer 'this_nid'. It places at 'this_nid' the NUMA node id that corresponds to 'start', and returns the end address within 'start' to 'end' at which memory assosciated with 'nid' ends. This callback allows a platform to use lmb_alloc_nid() in just about any context, even ones in which early_pfn_to_nid() might not be working yet. This function will be used by the NUMA setup code on sparc64, and also it can be used by powerpc, replacing it's hand crafted "careful_allocation()" function in arch/powerpc/mm/numa.c If x86 ever converts it's NUMA support over to using the LMB helpers, it can use this too as it has something entirely similar. Signed-off-by: David S. Miller Signed-off-by: Paul Mackerras --- lib/lmb.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/lib/lmb.c b/lib/lmb.c index 3c43b95fef4..549fbb3d70c 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -232,6 +232,82 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, return (i < rgn->cnt) ? i : -1; } +static u64 lmb_align_down(u64 addr, u64 size) +{ + return addr & ~(size - 1); +} + +static u64 lmb_align_up(u64 addr, u64 size) +{ + return (addr + (size - 1)) & ~(size - 1); +} + +static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, + u64 size, u64 align) +{ + u64 base; + long j; + + base = lmb_align_down((end - size), align); + while (start <= base && + ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0)) + base = lmb_align_down(lmb.reserved.region[j].base - size, + align); + + if (base != 0 && start <= base) { + if (lmb_add_region(&lmb.reserved, base, + lmb_align_up(size, align)) < 0) + base = ~(u64)0; + return base; + } + + return ~(u64)0; +} + +static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, + u64 (*nid_range)(u64, u64, int *), + u64 size, u64 align, int nid) +{ + u64 start, end; + + start = mp->base; + end = start + mp->size; + + start = lmb_align_up(start, align); + while (start < end) { + u64 this_end; + int this_nid; + + this_end = nid_range(start, end, &this_nid); + if (this_nid == nid) { + u64 ret = lmb_alloc_nid_unreserved(start, this_end, + size, align); + if (ret != ~(u64)0) + return ret; + } + start = this_end; + } + + return ~(u64)0; +} + +u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, + u64 (*nid_range)(u64 start, u64 end, int *nid)) +{ + struct lmb_region *mem = &lmb.memory; + int i; + + for (i = 0; i < mem->cnt; i++) { + u64 ret = lmb_alloc_nid_region(&mem->region[i], + nid_range, + size, align, nid); + if (ret != ~(u64)0) + return ret; + } + + return lmb_alloc(size, align); +} + u64 __init lmb_alloc(u64 size, u64 align) { return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); @@ -250,16 +326,6 @@ u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) return alloc; } -static u64 lmb_align_down(u64 addr, u64 size) -{ - return addr & ~(size - 1); -} - -static u64 lmb_align_up(u64 addr, u64 size) -{ - return (addr + (size - 1)) & ~(size - 1); -} - u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) { long i, j; -- cgit v1.2.3-70-g09d2