summaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c76
-rw-r--r--arch/ia64/mm/discontig.c46
-rw-r--r--arch/ia64/mm/init.c38
3 files changed, 97 insertions, 63 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b..63e6d49c581 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,47 +30,69 @@ static unsigned long max_gap;
#endif
/**
- * show_mem - display a memory statistics summary
+ * show_mem - give short summary of memory stats
*
- * Just walks the pages in the system and describes where they're allocated.
+ * Shows a simple page count of reserved and used pages in the system.
+ * For discontig machines, it does this on a per-pgdat basis.
*/
-void
-show_mem (void)
+void show_mem(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ int i, total_reserved = 0;
+ int total_shared = 0, total_cached = 0;
+ unsigned long total_present = 0;
+ pg_data_t *pgdat;
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
-
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- for (i = 0; i < max_mapnr; i++) {
- if (!pfn_valid(i)) {
+ printk(KERN_INFO "Node memory in pages:\n");
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+ unsigned long flags;
+ int shared = 0, cached = 0, reserved = 0;
+
+ pgdat_resize_lock(pgdat, &flags);
+ present = pgdat->node_present_pages;
+ for(i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page;
+ if (pfn_valid(pgdat->node_start_pfn + i))
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ else {
#ifdef CONFIG_VIRTUAL_MEM_MAP
- if (max_gap < LARGE_GAP)
- continue;
- i = vmemmap_find_next_valid_pfn(0, i) - 1;
+ if (max_gap < LARGE_GAP)
+ continue;
#endif
- continue;
+ i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+ i) - 1;
+ continue;
+ }
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page)-1;
}
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map + i))
- shared += page_count(mem_map + i) - 1;
+ pgdat_resize_unlock(pgdat, &flags);
+ total_present += present;
+ total_reserved += reserved;
+ total_cached += cached;
+ total_shared += shared;
+ printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
+ "shrd: %10d, swpd: %10d\n", pgdat->node_id,
+ present, reserved, shared, cached);
}
- printk(KERN_INFO "%d pages of RAM\n", total);
- printk(KERN_INFO "%d reserved pages\n", reserved);
- printk(KERN_INFO "%d pages shared\n", shared);
- printk(KERN_INFO "%d pages swap cached\n", cached);
- printk(KERN_INFO "%ld pages in page table cache\n",
+ printk(KERN_INFO "%ld pages of RAM\n", total_present);
+ printk(KERN_INFO "%d reserved pages\n", total_reserved);
+ printk(KERN_INFO "%d pages shared\n", total_shared);
+ printk(KERN_INFO "%d pages swap cached\n", total_cached);
+ printk(KERN_INFO "Total of %ld pages in page table cache\n",
pgtable_quicklist_total_size());
+ printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
}
+
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
@@ -177,7 +199,7 @@ find_memory (void)
#ifdef CONFIG_CRASH_DUMP
/* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is * reset. */
+ * size before original memory map is reset. */
saved_max_pfn = max_pfn;
#endif
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49..6eae596c509 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
return;
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * register_sparse_mem - notify SPARSEMEM that this memory range exists.
- * @start: physical start of range
- * @end: physical end of range
- * @arg: unused
- *
- * Simply calls SPARSEMEM to register memory section(s).
- */
-static int __init register_sparse_mem(unsigned long start, unsigned long end,
- void *arg)
-{
- int nid;
-
- start = __pa(start) >> PAGE_SHIFT;
- end = __pa(end) >> PAGE_SHIFT;
- nid = early_pfn_to_nid(start);
- memory_present(nid, start, end);
-
- return 0;
-}
-
-static void __init arch_sparse_init(void)
-{
- efi_memmap_walk(register_sparse_mem, NULL);
- sparse_init();
-}
-#else
-#define arch_sparse_init() do {} while (0)
-#endif
-
/**
* find_memory - walk the EFI memory map and setup the bootmem allocator
*
@@ -473,6 +442,9 @@ void __init find_memory(void)
node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
+
+ efi_memmap_walk(register_active_ranges, NULL);
+
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
@@ -506,6 +478,12 @@ void __init find_memory(void)
max_pfn = max_low_pfn;
find_initrd();
+
+#ifdef CONFIG_CRASH_DUMP
+ /* If we are doing a crash dump, we still need to know the real mem
+ * size before original memory map is reset. */
+ saved_max_pfn = max_pfn;
+#endif
}
#ifdef CONFIG_SMP
@@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
- add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
mem_data[node].num_physpages += len >> PAGE_SHIFT;
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
@@ -686,10 +663,11 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- arch_sparse_init();
-
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
+ sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_init();
+
#ifdef CONFIG_VIRTUAL_MEM_MAP
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657..faaca21a371 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/a.out.h>
#include <asm/dma.h>
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+void
+dma_mark_clean(void *addr, size_t size)
+{
+ unsigned long pg_addr, end;
+
+ pg_addr = PAGE_ALIGN((unsigned long) addr);
+ end = (unsigned long) addr + size;
+ while (pg_addr + PAGE_SIZE <= end) {
+ struct page *page = virt_to_page(pg_addr);
+ set_bit(PG_arch_1, &page->flags);
+ pg_addr += PAGE_SIZE;
+ }
+}
+
inline void
ia64_set_rbs_bot (void)
{
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
return 0;
}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
int __init
register_active_ranges(u64 start, u64 end, void *arg)
{
- add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
+ int nid = paddr_to_nid(__pa(start));
+
+ if (nid < 0)
+ nid = 0;
+#ifdef CONFIG_KEXEC
+ if (start > crashk_res.start && start < crashk_res.end)
+ start = crashk_res.end;
+ if (end > crashk_res.start && end < crashk_res.end)
+ end = crashk_res.start;
+#endif
+
+ if (start < end)
+ add_active_range(nid, __pa(start) >> PAGE_SHIFT,
+ __pa(end) >> PAGE_SHIFT);
return 0;
}
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
static int __init
count_reserved_pages (u64 start, u64 end, void *arg)