summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/init.c41
-rw-r--r--arch/sparc64/mm/tsb.c5
-rw-r--r--arch/sparc64/mm/ultra.S44
3 files changed, 44 insertions, 46 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4e821b3ecb0..b4aeb0f696d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -49,6 +49,7 @@
#include <asm/sstate.h>
#include <asm/mdesc.h>
#include <asm/cpudata.h>
+#include <asm/irq.h>
#define MAX_PHYS_ADDRESS (1UL << 42UL)
#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -795,6 +796,9 @@ static unsigned long nid_range(unsigned long start, unsigned long end,
start += PAGE_SIZE;
}
+ if (start > end)
+ start = end;
+
return start;
}
#else
@@ -1722,8 +1726,7 @@ void __init paging_init(void)
find_ramdisk(phys_base);
- if (cmdline_memory_size)
- lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
+ lmb_enforce_memory_limit(cmdline_memory_size);
lmb_analyze();
lmb_dump_all();
@@ -1771,6 +1774,16 @@ void __init paging_init(void)
if (tlb_type == hypervisor)
sun4v_mdesc_init();
+ /* Once the OF device tree and MDESC have been setup, we know
+ * the list of possible cpus. Therefore we can allocate the
+ * IRQ stacks.
+ */
+ for_each_possible_cpu(i) {
+ /* XXX Use node local allocations... XXX */
+ softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ }
+
/* Setup bootmem... */
last_valid_pfn = end_pfn = bootmem_init(phys_base);
@@ -1950,6 +1963,15 @@ void __init mem_init(void)
void free_initmem(void)
{
unsigned long addr, initend;
+ int do_free = 1;
+
+ /* If the physical memory maps were trimmed by kernel command
+ * line options, don't even try freeing this initmem stuff up.
+ * The kernel image could have been in the trimmed out region
+ * and if so the freeing below will free invalid page structs.
+ */
+ if (cmdline_memory_size)
+ do_free = 0;
/*
* The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
@@ -1964,13 +1986,16 @@ void free_initmem(void)
((unsigned long) __va(kern_base)) -
((unsigned long) KERNBASE));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- p = virt_to_page(page);
- ClearPageReserved(p);
- init_page_count(p);
- __free_page(p);
- num_physpages++;
- totalram_pages++;
+ if (do_free) {
+ p = virt_to_page(page);
+
+ ClearPageReserved(p);
+ init_page_count(p);
+ __free_page(p);
+ num_physpages++;
+ totalram_pages++;
+ }
}
}
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 3547937b17a..587f8efb2e0 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -1,9 +1,10 @@
/* arch/sparc64/mm/tsb.c
*
- * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
+#include <linux/preempt.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
@@ -415,7 +416,9 @@ retry_tsb_alloc:
tsb_context_switch(mm);
/* Now force other processors to do the same. */
+ preempt_disable();
smp_tsb_sync(mm);
+ preempt_enable();
/* Now it is safe to free the old tsb. */
kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 4c8ca131ffa..86773e89dc1 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -480,42 +480,6 @@ xcall_sync_tick:
b rtrap_xcall
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
- /* NOTE: This is SPECIAL!! We do etrap/rtrap however
- * we choose to deal with the "BH's run with
- * %pil==15" problem (described in asm/pil.h)
- * by just invoking rtrap directly past where
- * BH's are checked for.
- *
- * We do it like this because we do not want %pil==15
- * lockups to prevent regs being reported.
- */
- .globl xcall_report_regs
-xcall_report_regs:
-
-661: rdpr %pstate, %g2
- wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- nop
- nop
- .previous
-
- rdpr %pil, %g2
- wrpr %g0, 15, %pil
- sethi %hi(109f), %g7
- b,pt %xcc, etrap_irq
-109: or %g7, %lo(109b), %g7
-#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_off
- nop
-#endif
- call __show_regs
- add %sp, PTREGS_OFF, %o0
- /* Has to be a non-v9 branch due to the large distance. */
- b rtrap_xcall
- ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
-
-#ifdef CONFIG_MAGIC_SYSRQ
.globl xcall_fetch_glob_regs
xcall_fetch_glob_regs:
sethi %hi(global_reg_snapshot), %g1
@@ -531,6 +495,13 @@ xcall_fetch_glob_regs:
stx %g7, [%g1 + GR_SNAP_TNPC]
stx %o7, [%g1 + GR_SNAP_O7]
stx %i7, [%g1 + GR_SNAP_I7]
+ /* Don't try this at home kids... */
+ rdpr %cwp, %g2
+ sub %g2, 1, %g7
+ wrpr %g7, %cwp
+ mov %i7, %g7
+ wrpr %g2, %cwp
+ stx %g7, [%g1 + GR_SNAP_RPC]
sethi %hi(trap_block), %g7
or %g7, %lo(trap_block), %g7
sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
@@ -539,7 +510,6 @@ xcall_fetch_glob_regs:
membar #StoreStore
stx %g3, [%g1 + GR_SNAP_THREAD]
retry
-#endif /* CONFIG_MAGIC_SYSRQ */
#ifdef DCACHE_ALIASING_POSSIBLE
.align 32