From 9d88a2eb6e05c07aa0d484b8fa1372722fa921d0 Mon Sep 17 00:00:00 2001 From: Badari Pulavarty Date: Fri, 18 Apr 2008 13:33:53 -0700 Subject: [POWERPC] Provide walk_memory_resource() for powerpc Provide walk_memory_resource() for 64-bit powerpc. PowerPC maintains logical memory region mapping in the lmb.memory structure. Walk through these structures and do the callbacks for the contiguous chunks. Signed-off-by: Badari Pulavarty Cc: Yasunori Goto Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Paul Mackerras --- arch/powerpc/mm/mem.c | 30 +++++++++++++++++++++++------- include/linux/lmb.h | 1 + lib/lmb.c | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index d9e37f365b5..f67e118116f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -154,19 +154,35 @@ out: /* * walk_memory_resource() needs to make sure there is no holes in a given - * memory range. On PPC64, since this range comes from /sysfs, the range - * is guaranteed to be valid, non-overlapping and can not contain any - * holes. By the time we get here (memory add or remove), /proc/device-tree - * is updated and correct. Only reason we need to check against device-tree - * would be if we allow user-land to specify a memory range through a - * system call/ioctl etc. instead of doing offline/online through /sysfs. + * memory range. PPC64 does not maintain the memory layout in /proc/iomem. + * Instead it maintains it in lmb.memory structures. Walk through the + * memory regions, find holes and callback for contiguous regions. */ int walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - return (*func)(start_pfn, nr_pages, arg); + struct lmb_property res; + unsigned long pfn, len; + u64 end; + int ret = -1; + + res.base = (u64) start_pfn << PAGE_SHIFT; + res.size = (u64) nr_pages << PAGE_SHIFT; + + end = res.base + res.size - 1; + while ((res.base < end) && (lmb_find(&res) >= 0)) { + pfn = (unsigned long)(res.base >> PAGE_SHIFT); + len = (unsigned long)(res.size >> PAGE_SHIFT); + ret = (*func)(pfn, len, arg); + if (ret) + break; + res.base += (res.size + 1); + res.size = (end - res.base + 1); + } + return ret; } +EXPORT_SYMBOL_GPL(walk_memory_resource); #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/include/linux/lmb.h b/include/linux/lmb.h index 55d4b261a9e..c46c89505da 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h @@ -54,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void); extern u64 __init lmb_end_of_DRAM(void); extern void __init lmb_enforce_memory_limit(u64 memory_limit); extern int __init lmb_is_reserved(u64 addr); +extern int lmb_find(struct lmb_property *res); extern void lmb_dump_all(void); diff --git a/lib/lmb.c b/lib/lmb.c index 5b2a739bc3d..83287d3869a 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -474,3 +474,36 @@ int __init lmb_is_reserved(u64 addr) } return 0; } + +/* + * Given a , find which memory regions belong to this range. + * Adjust the request and return a contiguous chunk. + */ +int lmb_find(struct lmb_property *res) +{ + int i; + u64 rstart, rend; + + rstart = res->base; + rend = rstart + res->size - 1; + + for (i = 0; i < lmb.memory.cnt; i++) { + u64 start = lmb.memory.region[i].base; + u64 end = start + lmb.memory.region[i].size - 1; + + if (start > rend) + return -1; + + if ((end >= rstart) && (start < rend)) { + /* adjust the request */ + if (rstart < start) + rstart = start; + if (rend > end) + rend = end; + res->base = rstart; + res->size = rend - rstart + 1; + return 0; + } + } + return -1; +} -- cgit v1.2.3-70-g09d2