From 74bf4312fff083ab25c3f357cc653ada7995e5f6 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:29:18 -0800
Subject: [SPARC64]: Move away from virtual page tables, part 1.

We now use the TSB hardware assist features of the UltraSPARC
MMUs.

SMP is currently knowingly broken, we need to find another place
to store the per-cpu base pointers.  We hid them away in the TSB
base register, and that obviously will not work any more :-)

Another known broken case is non-8KB base page size.

Also noticed that flush_tlb_all() is not referenced anywhere, only
the internal __flush_tlb_all() (local cpu only) is used by the
sparc64 port, so we can get rid of flush_tlb_all().

The kernel gets it's own 8KB TSB (swapper_tsb) and each address space
gets it's own private 8K TSB.  Later we can add code to dynamically
increase the size of per-process TSB as the RSS grows.  An 8KB TSB is
good enough for up to about a 4MB RSS, after which the TSB starts to
incur many capacity and conflict misses.

We even accumulate OBP translations into the kernel TSB.

Another area for refinement is large page size support.  We could use
a secondary address space TSB to handle those.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/mmu.h         |   1 +
 include/asm-sparc64/mmu_context.h |  46 +++--------
 include/asm-sparc64/pgalloc.h     |   1 +
 include/asm-sparc64/pgtable.h     |   9 +--
 include/asm-sparc64/processor.h   |  14 +---
 include/asm-sparc64/tlbflush.h    |  25 ++++--
 include/asm-sparc64/tsb.h         | 165 ++++++++++++++++++++++++++++++++++++++
 7 files changed, 204 insertions(+), 57 deletions(-)
 create mode 100644 include/asm-sparc64/tsb.h

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 8627eed6e83..36384cf7faa 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -92,6 +92,7 @@
 
 typedef struct {
 	unsigned long	sparc64_ctx_val;
+	unsigned long	*sparc64_tsb;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 57ee7b30618..34640a370ab 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -25,7 +25,13 @@ extern void get_new_mmu_context(struct mm_struct *mm);
  * This just needs to set mm->context to an invalid context.
  */
 #define init_new_context(__tsk, __mm)	\
-	(((__mm)->context.sparc64_ctx_val = 0UL), 0)
+({	unsigned long __pg = get_zeroed_page(GFP_KERNEL); \
+	(__mm)->context.sparc64_ctx_val = 0UL; \
+	(__mm)->context.sparc64_tsb = \
+	  (unsigned long *) __pg; \
+	(__pg ? 0 : -ENOMEM); \
+})
+
 
 /* Destroy a dead context.  This occurs when mmput drops the
  * mm_users count to zero, the mmaps have been released, and
@@ -35,7 +41,8 @@ extern void get_new_mmu_context(struct mm_struct *mm);
  * this task if valid.
  */
 #define destroy_context(__mm)					\
-do {	spin_lock(&ctx_alloc_lock);				\
+do {	free_page((unsigned long)(__mm)->context.sparc64_tsb);	\
+	spin_lock(&ctx_alloc_lock);				\
 	if (CTX_VALID((__mm)->context)) {			\
 		unsigned long nr = CTX_NRBITS((__mm)->context);	\
 		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));	\
@@ -43,35 +50,7 @@ do {	spin_lock(&ctx_alloc_lock);				\
 	spin_unlock(&ctx_alloc_lock);				\
 } while(0)
 
-/* Reload the two core values used by TLB miss handler
- * processing on sparc64.  They are:
- * 1) The physical address of mm->pgd, when full page
- *    table walks are necessary, this is where the
- *    search begins.
- * 2) A "PGD cache".  For 32-bit tasks only pgd[0] is
- *    ever used since that maps the entire low 4GB
- *    completely.  To speed up TLB miss processing we
- *    make this value available to the handlers.  This
- *    decreases the amount of memory traffic incurred.
- */
-#define reload_tlbmiss_state(__tsk, __mm) \
-do { \
-	register unsigned long paddr asm("o5"); \
-	register unsigned long pgd_cache asm("o4"); \
-	paddr = __pa((__mm)->pgd); \
-	pgd_cache = 0UL; \
-	if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
-		pgd_cache = get_pgd_cache((__mm)->pgd); \
-	__asm__ __volatile__("wrpr	%%g0, 0x494, %%pstate\n\t" \
-			     "mov	%3, %%g4\n\t" \
-			     "mov	%0, %%g7\n\t" \
-			     "stxa	%1, [%%g4] %2\n\t" \
-			     "membar	#Sync\n\t" \
-			     "wrpr	%%g0, 0x096, %%pstate" \
-			     : /* no outputs */ \
-			     : "r" (paddr), "r" (pgd_cache),\
-			       "i" (ASI_DMMU), "i" (TSB_REG)); \
-} while(0)
+extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
 
 /* Set MMU context in the actual hardware. */
 #define load_secondary_context(__mm) \
@@ -101,7 +80,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 
 	if (!ctx_valid || (old_mm != mm)) {
 		load_secondary_context(mm);
-		reload_tlbmiss_state(tsk, mm);
+		tsb_context_switch(__pa(mm->pgd),
+				   mm->context.sparc64_tsb);
 	}
 
 	/* Even if (mm == old_mm) we _must_ check
@@ -139,7 +119,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
 
 	load_secondary_context(mm);
 	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-	reload_tlbmiss_state(current, mm);
+	tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
 }
 
 #endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index a96067cca96..baf59c00ea4 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -61,6 +61,7 @@ static __inline__ void free_pgd_slow(pgd_t *pgd)
 	free_page((unsigned long)pgd);
 }
 
+/* XXX This crap can die, no longer using virtual page tables... */
 #ifdef DCACHE_ALIASING_POSSIBLE
 #define VPTE_COLOR(address)		(((address) >> (PAGE_SHIFT + 10)) & 1UL)
 #define DCACHE_COLOR(address)		(((address) >> PAGE_SHIFT) & 1UL)
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index f0a9b44d3eb..f3ba1e05819 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -25,7 +25,8 @@
 #include <asm/const.h>
 
 /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
- * The page copy blockops can use 0x2000000 to 0x10000000.
+ * The page copy blockops can use 0x2000000 to 0x4000000.
+ * The TSB is mapped in the 0x4000000 to 0x6000000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -34,6 +35,7 @@
  * 0x400000000.
  */
 #define	TLBTEMP_BASE		_AC(0x0000000002000000,UL)
+#define	TSBMAP_BASE		_AC(0x0000000004000000,UL)
 #define MODULES_VADDR		_AC(0x0000000010000000,UL)
 #define MODULES_LEN		_AC(0x00000000e0000000,UL)
 #define MODULES_END		_AC(0x00000000f0000000,UL)
@@ -296,11 +298,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-/* extract the pgd cache used for optimizing the tlb miss
- * slow path when executing 32-bit compat processes
- */
-#define get_pgd_cache(pgd)	((unsigned long) pgd_val(*pgd) << 11)
-
 /* Find an entry in the second-level page table.. */
 #define pmd_offset(pudp, address)	\
 	((pmd_t *) pud_page(*(pudp)) + \
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index cd8d9b4c865..b3889f3f943 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -28,6 +28,8 @@
  * User lives in his very own context, and cannot reference us. Note
  * that TASK_SIZE is a misnomer, it really gives maximum user virtual 
  * address that the kernel will allocate out.
+ *
+ * XXX No longer using virtual page tables, kill this upper limit...
  */
 #define VA_BITS		44
 #ifndef __ASSEMBLY__
@@ -37,18 +39,6 @@
 #endif
 #define TASK_SIZE	((unsigned long)-VPTE_SIZE)
 
-/*
- * The vpte base must be able to hold the entire vpte, half
- * of which lives above, and half below, the base. And it
- * is placed as close to the highest address range as possible.
- */
-#define VPTE_BASE_SPITFIRE	(-(VPTE_SIZE/2))
-#if 1
-#define VPTE_BASE_CHEETAH	VPTE_BASE_SPITFIRE
-#else
-#define VPTE_BASE_CHEETAH	0xffe0000000000000
-#endif
-
 #ifndef __ASSEMBLY__
 
 typedef struct {
diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h
index 3ef9909ac3a..9ad5d9c51d4 100644
--- a/include/asm-sparc64/tlbflush.h
+++ b/include/asm-sparc64/tlbflush.h
@@ -5,6 +5,11 @@
 #include <linux/mm.h>
 #include <asm/mmu_context.h>
 
+/* TSB flush operations. */
+struct mmu_gather;
+extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tsb_user(struct mmu_gather *mp);
+
 /* TLB flush operations. */
 
 extern void flush_tlb_pending(void);
@@ -14,28 +19,36 @@ extern void flush_tlb_pending(void);
 #define flush_tlb_page(vma,addr)	flush_tlb_pending()
 #define flush_tlb_mm(mm)		flush_tlb_pending()
 
+/* Local cpu only.  */
 extern void __flush_tlb_all(void);
+
 extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
 
 extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifndef CONFIG_SMP
 
-#define flush_tlb_all()		__flush_tlb_all()
 #define flush_tlb_kernel_range(start,end) \
-	__flush_tlb_kernel_range(start,end)
+do {	flush_tsb_kernel_range(start,end); \
+	__flush_tlb_kernel_range(start,end); \
+} while (0)
 
 #else /* CONFIG_SMP */
 
-extern void smp_flush_tlb_all(void);
 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
-#define flush_tlb_all()		smp_flush_tlb_all()
 #define flush_tlb_kernel_range(start, end) \
-	smp_flush_tlb_kernel_range(start, end)
+do {	flush_tsb_kernel_range(start,end); \
+	smp_flush_tlb_kernel_range(start, end); \
+} while (0)
 
 #endif /* ! CONFIG_SMP */
 
-extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long);
+static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+	/* We don't use virtual page tables for TLB miss processing
+	 * any more.  Nowadays we use the TSB.
+	 */
+}
 
 #endif /* _SPARC64_TLBFLUSH_H */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
new file mode 100644
index 00000000000..03d272e0e47
--- /dev/null
+++ b/include/asm-sparc64/tsb.h
@@ -0,0 +1,165 @@
+#ifndef _SPARC64_TSB_H
+#define _SPARC64_TSB_H
+
+/* The sparc64 TSB is similar to the powerpc hashtables.  It's a
+ * power-of-2 sized table of TAG/PTE pairs.  The cpu precomputes
+ * pointers into this table for 8K and 64K page sizes, and also a
+ * comparison TAG based upon the virtual address and context which
+ * faults.
+ *
+ * TLB miss trap handler software does the actual lookup via something
+ * of the form:
+ *
+ * 	ldxa		[%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
+ * 	ldxa		[%g0] ASI_{D,I}MMU, %g6
+ * 	ldda		[%g1] ASI_NUCLEUS_QUAD_LDD, %g4
+ * 	cmp		%g4, %g6
+ * 	bne,pn	%xcc, tsb_miss_{d,i}tlb
+ * 	 mov		FAULT_CODE_{D,I}TLB, %g3
+ * 	stxa		%g5, [%g0] ASI_{D,I}TLB_DATA_IN
+ * 	retry
+ *
+
+ * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
+ * PTE.  The TAG is of the same layout as the TLB TAG TARGET mmu
+ * register which is:
+ *
+ * -------------------------------------------------
+ * |  -  |  CONTEXT |  -  |    VADDR bits 63:22    |
+ * -------------------------------------------------
+ *  63 61 60      48 47 42 41                     0
+ *
+ * Like the powerpc hashtables we need to use locking in order to
+ * synchronize while we update the entries.  PTE updates need locking
+ * as well.
+ *
+ * We need to carefully choose a lock bits for the TSB entry.  We
+ * choose to use bit 47 in the tag.  Also, since we never map anything
+ * at page zero in context zero, we use zero as an invalid tag entry.
+ * When the lock bit is set, this forces a tag comparison failure.
+ *
+ * Currently, we allocate an 8K TSB per-process and we use it for both
+ * I-TLB and D-TLB misses.  Perhaps at some point we'll add code that
+ * monitors the number of active pages in the process as we get
+ * major/minor faults, and grow the TSB in response.  The only trick
+ * in implementing that is synchronizing the freeing of the old TSB
+ * wrt.  parallel TSB updates occuring on other processors.  On
+ * possible solution is to use RCU for the freeing of the TSB.
+ */
+
+#define TSB_TAG_LOCK	(1 << (47 - 32))
+
+#define TSB_MEMBAR	membar	#StoreStore
+
+#define TSB_LOCK_TAG(TSB, REG1, REG2)	\
+99:	lduwa	[TSB] ASI_N, REG1;	\
+	sethi	%hi(TSB_TAG_LOCK), REG2;\
+	andcc	REG1, REG2, %g0;	\
+	bne,pn	%icc, 99b;		\
+	 nop;				\
+	casa	[TSB] ASI_N, REG1, REG2;\
+	cmp	REG1, REG2;		\
+	bne,pn	%icc, 99b;		\
+	 nop;				\
+	TSB_MEMBAR
+
+#define TSB_WRITE(TSB, TTE, TAG)	   \
+	stx		TTE, [TSB + 0x08]; \
+	TSB_MEMBAR;			   \
+	stx		TAG, [TSB + 0x00];
+
+	/* Do a kernel page table walk.  Leaves physical PTE pointer in
+	 * REG1.  Jumps to FAIL_LABEL on early page table walk termination.
+	 * VADDR will not be clobbered, but REG2 will.
+	 */
+#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL)	\
+	sethi		%hi(swapper_pg_dir), REG1; \
+	or		REG1, %lo(swapper_pg_dir), REG1; \
+	sllx		VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+	andn		REG2, 0x3, REG2; \
+	lduw		[REG1 + REG2], REG1; \
+	brz,pn		REG1, FAIL_LABEL; \
+	 sllx		VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+	sllx		REG1, 11, REG1; \
+	andn		REG2, 0x3, REG2; \
+	lduwa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+	brz,pn		REG1, FAIL_LABEL; \
+	 sllx		VADDR, 64 - PMD_SHIFT, REG2; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+	sllx		REG1, 11, REG1; \
+	andn		REG2, 0x7, REG2; \
+	add		REG1, REG2, REG1;
+
+	/* Do a user page table walk in MMU globals.  Leaves physical PTE
+	 * pointer in REG1.  Jumps to FAIL_LABEL on early page table walk
+	 * termination.  Physical base of page tables is in PHYS_PGD which
+	 * will not be modified.
+	 *
+	 * VADDR will not be clobbered, but REG1 and REG2 will.
+	 */
+#define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL)	\
+	sllx		VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+	andn		REG2, 0x3, REG2; \
+	lduwa		[PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
+	brz,pn		REG1, FAIL_LABEL; \
+	 sllx		VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+	sllx		REG1, 11, REG1; \
+	andn		REG2, 0x3, REG2; \
+	lduwa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+	brz,pn		REG1, FAIL_LABEL; \
+	 sllx		VADDR, 64 - PMD_SHIFT, REG2; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+	sllx		REG1, 11, REG1; \
+	andn		REG2, 0x7, REG2; \
+	add		REG1, REG2, REG1;
+
+/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
+ * If no entry is found, FAIL_LABEL will be branched to.  On success
+ * the resulting PTE value will be left in REG1.  VADDR is preserved
+ * by this routine.
+ */
+#define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
+	sethi		%hi(prom_trans), REG1; \
+	or		REG1, %lo(prom_trans), REG1; \
+97:	ldx		[REG1 + 0x00], REG2; \
+	brz,pn		REG2, FAIL_LABEL; \
+	 nop; \
+	ldx		[REG1 + 0x08], REG3; \
+	add		REG2, REG3, REG3; \
+	cmp		REG2, VADDR; \
+	bgu,pt		%xcc, 98f; \
+	 cmp		VADDR, REG3; \
+	bgeu,pt		%xcc, 98f; \
+	 ldx		[REG1 + 0x10], REG3; \
+	sub		VADDR, REG2, REG2; \
+	ba,pt		%xcc, 99f; \
+	 add		REG3, REG2, REG1; \
+98:	ba,pt		%xcc, 97b; \
+	 add		REG1, (3 * 8), REG1; \
+99:
+
+	/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
+	 * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
+	 * and the found TTE will be left in REG1.  REG3 and REG4 must
+	 * be an even/odd pair of registers.
+	 *
+	 * VADDR and TAG will be preserved and not clobbered by this macro.
+	 */
+	/* XXX non-8K base page size support... */
+#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+	sethi		%hi(swapper_tsb), REG1; \
+	or		REG1, %lo(swapper_tsb), REG1; \
+	srlx		VADDR, 13, REG2; \
+	and		REG2, (512 - 1), REG2; \
+	sllx		REG2, 4, REG2; \
+	add		REG1, REG2, REG2; \
+	ldda		[REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \
+	cmp		REG3, TAG; \
+	be,a,pt		%xcc, OK_LABEL; \
+	 mov		REG4, REG1;
+
+#endif /* !(_SPARC64_TSB_H) */
-- 
cgit v1.2.3-70-g09d2


From 05e28f9de65a38bb0c769080e91b6976e7e1e70c Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:30:13 -0800
Subject: [SPARC64]: No need to D-cache color page tables any longer.

Unlike the virtual page tables, the new TSB scheme does not
require this ugly hack.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/mm/init.c        |  71 +++--------------------------
 include/asm-sparc64/cpudata.h |   5 ++-
 include/asm-sparc64/pgalloc.h | 101 +++++++++++++++++++-----------------------
 3 files changed, 55 insertions(+), 122 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index da068f6b259..936ae1a594a 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -145,6 +145,10 @@ int bigkernel = 0;
 #define PGT_CACHE_LOW	25
 #define PGT_CACHE_HIGH	50
 
+#ifndef CONFIG_SMP
+struct pgtable_cache_struct pgt_quicklists;
+#endif
+
 void check_pgt_cache(void)
 {
 	preempt_disable();
@@ -152,10 +156,8 @@ void check_pgt_cache(void)
 		do {
 			if (pgd_quicklist)
 				free_pgd_slow(get_pgd_fast());
-			if (pte_quicklist[0])
-				free_pte_slow(pte_alloc_one_fast(NULL, 0));
-			if (pte_quicklist[1])
-				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
+			if (pte_quicklist)
+				free_pte_slow(pte_alloc_one_fast());
 		} while (pgtable_cache_size > PGT_CACHE_LOW);
 	}
 	preempt_enable();
@@ -962,67 +964,6 @@ out:
 	spin_unlock(&ctx_alloc_lock);
 }
 
-#ifndef CONFIG_SMP
-struct pgtable_cache_struct pgt_quicklists;
-#endif
-
-/* XXX We don't need to color these things in the D-cache any longer.  */
-#ifdef DCACHE_ALIASING_POSSIBLE
-#define DC_ALIAS_SHIFT	1
-#else
-#define DC_ALIAS_SHIFT	0
-#endif
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-	struct page *page;
-	unsigned long color;
-
-	{
-		pte_t *ptep = pte_alloc_one_fast(mm, address);
-
-		if (ptep)
-			return ptep;
-	}
-
-	color = VPTE_COLOR(address);
-	page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
-	if (page) {
-		unsigned long *to_free;
-		unsigned long paddr;
-		pte_t *pte;
-
-#ifdef DCACHE_ALIASING_POSSIBLE
-		set_page_count(page, 1);
-		ClearPageCompound(page);
-
-		set_page_count((page + 1), 1);
-		ClearPageCompound(page + 1);
-#endif
-		paddr = (unsigned long) page_address(page);
-		memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
-
-		if (!color) {
-			pte = (pte_t *) paddr;
-			to_free = (unsigned long *) (paddr + PAGE_SIZE);
-		} else {
-			pte = (pte_t *) (paddr + PAGE_SIZE);
-			to_free = (unsigned long *) paddr;
-		}
-
-#ifdef DCACHE_ALIASING_POSSIBLE
-		/* Now free the other one up, adjust cache size. */
-		preempt_disable();
-		*to_free = (unsigned long) pte_quicklist[color ^ 0x1];
-		pte_quicklist[color ^ 0x1] = to_free;
-		pgtable_cache_size++;
-		preempt_enable();
-#endif
-
-		return pte;
-	}
-	return NULL;
-}
-
 void sparc_ultra_dump_itlb(void)
 {
         int slot;
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 74de79dca91..45a9a2cfaf7 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -20,8 +20,9 @@ typedef struct {
 	/* Dcache line 2 */
 	unsigned int	pgcache_size;
 	unsigned int	__pad1;
-	unsigned long	*pte_cache[2];
+	unsigned long	*pte_cache;
 	unsigned long	*pgd_cache;
+	unsigned long	__pad2;
 
 	/* Dcache line 3, rarely used */
 	unsigned int	dcache_size;
@@ -30,8 +31,8 @@ typedef struct {
 	unsigned int	icache_line_size;
 	unsigned int	ecache_size;
 	unsigned int	ecache_line_size;
-	unsigned int	__pad2;
 	unsigned int	__pad3;
+	unsigned int	__pad4;
 } cpuinfo_sparc;
 
 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index baf59c00ea4..ecea1bbdc11 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -19,16 +19,15 @@
 #else
 extern struct pgtable_cache_struct {
 	unsigned long *pgd_cache;
-	unsigned long *pte_cache[2];
+	unsigned long *pte_cache;
 	unsigned int pgcache_size;
 } pgt_quicklists;
 #endif
 #define pgd_quicklist		(pgt_quicklists.pgd_cache)
-#define pmd_quicklist		((unsigned long *)0)
 #define pte_quicklist		(pgt_quicklists.pte_cache)
 #define pgtable_cache_size	(pgt_quicklists.pgcache_size)
 
-static __inline__ void free_pgd_fast(pgd_t *pgd)
+static inline void free_pgd_fast(pgd_t *pgd)
 {
 	preempt_disable();
 	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
@@ -37,7 +36,7 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
 	preempt_enable();
 }
 
-static __inline__ pgd_t *get_pgd_fast(void)
+static inline pgd_t *get_pgd_fast(void)
 {
 	unsigned long *ret;
 
@@ -56,47 +55,35 @@ static __inline__ pgd_t *get_pgd_fast(void)
 	return (pgd_t *)ret;
 }
 
-static __inline__ void free_pgd_slow(pgd_t *pgd)
+static inline void free_pgd_slow(pgd_t *pgd)
 {
 	free_page((unsigned long)pgd);
 }
 
-/* XXX This crap can die, no longer using virtual page tables... */
-#ifdef DCACHE_ALIASING_POSSIBLE
-#define VPTE_COLOR(address)		(((address) >> (PAGE_SHIFT + 10)) & 1UL)
-#define DCACHE_COLOR(address)		(((address) >> PAGE_SHIFT) & 1UL)
-#else
-#define VPTE_COLOR(address)		0
-#define DCACHE_COLOR(address)		0
-#endif
-
 #define pud_populate(MM, PUD, PMD)	pud_set(PUD, PMD)
 
-static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
+static inline pmd_t *pmd_alloc_one_fast(void)
 {
 	unsigned long *ret;
-	int color = 0;
 
 	preempt_disable();
-	if (pte_quicklist[color] == NULL)
-		color = 1;
-
-	if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
-		pte_quicklist[color] = (unsigned long *)(*ret);
+	ret = (unsigned long *) pte_quicklist;
+	if (likely(ret)) {
+		pte_quicklist = (unsigned long *)(*ret);
 		ret[0] = 0;
 		pgtable_cache_size--;
 	}
 	preempt_enable();
 
-	return (pmd_t *)ret;
+	return (pmd_t *) ret;
 }
 
-static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
 	pmd_t *pmd;
 
-	pmd = pmd_alloc_one_fast(mm, address);
-	if (!pmd) {
+	pmd = pmd_alloc_one_fast();
+	if (unlikely(!pmd)) {
 		pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
 		if (pmd)
 			memset(pmd, 0, PAGE_SIZE);
@@ -104,18 +91,16 @@ static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addre
 	return pmd;
 }
 
-static __inline__ void free_pmd_fast(pmd_t *pmd)
+static inline void free_pmd_fast(pmd_t *pmd)
 {
-	unsigned long color = DCACHE_COLOR((unsigned long)pmd);
-
 	preempt_disable();
-	*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
-	pte_quicklist[color] = (unsigned long *) pmd;
+	*(unsigned long *)pmd = (unsigned long) pte_quicklist;
+	pte_quicklist = (unsigned long *) pmd;
 	pgtable_cache_size++;
 	preempt_enable();
 }
 
-static __inline__ void free_pmd_slow(pmd_t *pmd)
+static inline void free_pmd_slow(pmd_t *pmd)
 {
 	free_page((unsigned long)pmd);
 }
@@ -124,48 +109,54 @@ static __inline__ void free_pmd_slow(pmd_t *pmd)
 #define pmd_populate(MM,PMD,PTE_PAGE)		\
 	pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
 
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
-
-static inline struct page *
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
-	pte_t *pte = pte_alloc_one_kernel(mm, addr);
-
-	if (pte)
-		return virt_to_page(pte);
-
-	return NULL;
-}
-
-static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
+static inline pte_t *pte_alloc_one_fast(void)
 {
-	unsigned long color = VPTE_COLOR(address);
 	unsigned long *ret;
 
 	preempt_disable();
-	if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
-		pte_quicklist[color] = (unsigned long *)(*ret);
+	ret = (unsigned long *) pte_quicklist;
+	if (likely(ret)) {
+		pte_quicklist = (unsigned long *)(*ret);
 		ret[0] = 0;
 		pgtable_cache_size--;
 	}
 	preempt_enable();
-	return (pte_t *)ret;
+
+	return (pte_t *) ret;
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+	pte_t *ptep = pte_alloc_one_fast();
+
+	if (likely(ptep))
+		return ptep;
+
+	return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 }
 
-static __inline__ void free_pte_fast(pte_t *pte)
+static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-	unsigned long color = DCACHE_COLOR((unsigned long)pte);
+	pte_t *pte = pte_alloc_one_fast();
 
+	if (likely(pte))
+		return virt_to_page(pte);
+
+	return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+}
+
+static inline void free_pte_fast(pte_t *pte)
+{
 	preempt_disable();
-	*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
-	pte_quicklist[color] = (unsigned long *) pte;
+	*(unsigned long *)pte = (unsigned long) pte_quicklist;
+	pte_quicklist = (unsigned long *) pte;
 	pgtable_cache_size++;
 	preempt_enable();
 }
 
-static __inline__ void free_pte_slow(pte_t *pte)
+static inline void free_pte_slow(pte_t *pte)
 {
-	free_page((unsigned long)pte);
+	free_page((unsigned long) pte);
 }
 
 static inline void pte_free_kernel(pte_t *pte)
-- 
cgit v1.2.3-70-g09d2


From 3c936465249f863f322154ff1aaa628b84ee5750 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:30:27 -0800
Subject: [SPARC64]: Kill pgtable quicklists and use SLAB.

Taking a nod from the powerpc port.

With the per-cpu caching of both the page allocator and SLAB, the
pgtable quicklist scheme becomes relatively silly and primitive.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/sparc64_ksyms.c |   4 -
 arch/sparc64/mm/init.c              |  32 ++++----
 include/asm-sparc64/cpudata.h       |   9 +-
 include/asm-sparc64/pgalloc.h       | 158 ++++++------------------------------
 include/asm-sparc64/pgtable.h       |   7 +-
 5 files changed, 44 insertions(+), 166 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 3c06bfb92a8..f1f01378d07 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -241,10 +241,6 @@ EXPORT_SYMBOL(verify_compat_iovec);
 #endif
 
 EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(pte_alloc_one_kernel);
-#ifndef CONFIG_SMP
-EXPORT_SYMBOL(pgt_quicklists);
-#endif
 EXPORT_SYMBOL(put_fs_struct);
 
 /* math-emu wants this */
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 936ae1a594a..7c456afaa9a 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -141,26 +141,25 @@ unsigned long sparc64_kern_sec_context __read_mostly;
 
 int bigkernel = 0;
 
-/* XXX Tune this... */
-#define PGT_CACHE_LOW	25
-#define PGT_CACHE_HIGH	50
+kmem_cache_t *pgtable_cache __read_mostly;
 
-#ifndef CONFIG_SMP
-struct pgtable_cache_struct pgt_quicklists;
-#endif
+static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+{
+	clear_page(addr);
+}
 
-void check_pgt_cache(void)
+void pgtable_cache_init(void)
 {
-	preempt_disable();
-	if (pgtable_cache_size > PGT_CACHE_HIGH) {
-		do {
-			if (pgd_quicklist)
-				free_pgd_slow(get_pgd_fast());
-			if (pte_quicklist)
-				free_pte_slow(pte_alloc_one_fast());
-		} while (pgtable_cache_size > PGT_CACHE_LOW);
+	pgtable_cache = kmem_cache_create("pgtable_cache",
+					  PAGE_SIZE, PAGE_SIZE,
+					  SLAB_HWCACHE_ALIGN |
+					  SLAB_MUST_HWCACHE_ALIGN,
+					  zero_ctor,
+					  NULL);
+	if (!pgtable_cache) {
+		prom_printf("pgtable_cache_init(): Could not create!\n");
+		prom_halt();
 	}
-	preempt_enable();
 }
 
 #ifdef CONFIG_DEBUG_DCFLUSH
@@ -340,7 +339,6 @@ void show_mem(void)
 	       nr_swap_pages << (PAGE_SHIFT-10));
 	printk("%ld pages of RAM\n", num_physpages);
 	printk("%d free pages\n", nr_free_pages());
-	printk("%d pages in page table cache\n",pgtable_cache_size);
 }
 
 void mmu_info(struct seq_file *m)
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 45a9a2cfaf7..f7c0faede8b 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -17,14 +17,7 @@ typedef struct {
 	unsigned long	clock_tick;	/* %tick's per second */
 	unsigned long	udelay_val;
 
-	/* Dcache line 2 */
-	unsigned int	pgcache_size;
-	unsigned int	__pad1;
-	unsigned long	*pte_cache;
-	unsigned long	*pgd_cache;
-	unsigned long	__pad2;
-
-	/* Dcache line 3, rarely used */
+	/* Dcache line 2, rarely used */
 	unsigned int	dcache_size;
 	unsigned int	dcache_line_size;
 	unsigned int	icache_size;
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index ecea1bbdc11..12e4a273bd4 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 
 #include <asm/spitfire.h>
 #include <asm/cpudata.h>
@@ -13,164 +14,59 @@
 #include <asm/page.h>
 
 /* Page table allocation/freeing. */
-#ifdef CONFIG_SMP
-/* Sliiiicck */
-#define pgt_quicklists	local_cpu_data()
-#else
-extern struct pgtable_cache_struct {
-	unsigned long *pgd_cache;
-	unsigned long *pte_cache;
-	unsigned int pgcache_size;
-} pgt_quicklists;
-#endif
-#define pgd_quicklist		(pgt_quicklists.pgd_cache)
-#define pte_quicklist		(pgt_quicklists.pte_cache)
-#define pgtable_cache_size	(pgt_quicklists.pgcache_size)
+extern kmem_cache_t *pgtable_cache;
 
-static inline void free_pgd_fast(pgd_t *pgd)
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-	preempt_disable();
-	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
-	pgd_quicklist = (unsigned long *) pgd;
-	pgtable_cache_size++;
-	preempt_enable();
+	return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
 }
 
-static inline pgd_t *get_pgd_fast(void)
+static inline void pgd_free(pgd_t *pgd)
 {
-	unsigned long *ret;
-
-	preempt_disable();
-	if((ret = pgd_quicklist) != NULL) {
-		pgd_quicklist = (unsigned long *)(*ret);
-		ret[0] = 0;
-		pgtable_cache_size--;
-		preempt_enable();
-	} else {
-		preempt_enable();
-		ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
-		if(ret)
-			memset(ret, 0, PAGE_SIZE);
-	}
-	return (pgd_t *)ret;
-}
-
-static inline void free_pgd_slow(pgd_t *pgd)
-{
-	free_page((unsigned long)pgd);
+	kmem_cache_free(pgtable_cache, pgd);
 }
 
 #define pud_populate(MM, PUD, PMD)	pud_set(PUD, PMD)
 
-static inline pmd_t *pmd_alloc_one_fast(void)
-{
-	unsigned long *ret;
-
-	preempt_disable();
-	ret = (unsigned long *) pte_quicklist;
-	if (likely(ret)) {
-		pte_quicklist = (unsigned long *)(*ret);
-		ret[0] = 0;
-		pgtable_cache_size--;
-	}
-	preempt_enable();
-
-	return (pmd_t *) ret;
-}
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-	pmd_t *pmd;
-
-	pmd = pmd_alloc_one_fast();
-	if (unlikely(!pmd)) {
-		pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-		if (pmd)
-			memset(pmd, 0, PAGE_SIZE);
-	}
-	return pmd;
-}
-
-static inline void free_pmd_fast(pmd_t *pmd)
-{
-	preempt_disable();
-	*(unsigned long *)pmd = (unsigned long) pte_quicklist;
-	pte_quicklist = (unsigned long *) pmd;
-	pgtable_cache_size++;
-	preempt_enable();
-}
-
-static inline void free_pmd_slow(pmd_t *pmd)
-{
-	free_page((unsigned long)pmd);
-}
-
-#define pmd_populate_kernel(MM, PMD, PTE)	pmd_set(PMD, PTE)
-#define pmd_populate(MM,PMD,PTE_PAGE)		\
-	pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
-
-static inline pte_t *pte_alloc_one_fast(void)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-	unsigned long *ret;
-
-	preempt_disable();
-	ret = (unsigned long *) pte_quicklist;
-	if (likely(ret)) {
-		pte_quicklist = (unsigned long *)(*ret);
-		ret[0] = 0;
-		pgtable_cache_size--;
-	}
-	preempt_enable();
-
-	return (pte_t *) ret;
+	return kmem_cache_alloc(pgtable_cache,
+				GFP_KERNEL|__GFP_REPEAT);
 }
 
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+static inline void pmd_free(pmd_t *pmd)
 {
-	pte_t *ptep = pte_alloc_one_fast();
-
-	if (likely(ptep))
-		return ptep;
-
-	return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+	kmem_cache_free(pgtable_cache, pmd);
 }
 
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					  unsigned long address)
 {
-	pte_t *pte = pte_alloc_one_fast();
-
-	if (likely(pte))
-		return virt_to_page(pte);
-
-	return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+	return kmem_cache_alloc(pgtable_cache,
+				GFP_KERNEL|__GFP_REPEAT);
 }
 
-static inline void free_pte_fast(pte_t *pte)
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+					 unsigned long address)
 {
-	preempt_disable();
-	*(unsigned long *)pte = (unsigned long) pte_quicklist;
-	pte_quicklist = (unsigned long *) pte;
-	pgtable_cache_size++;
-	preempt_enable();
+	return virt_to_page(pte_alloc_one_kernel(mm, address));
 }
-
-static inline void free_pte_slow(pte_t *pte)
-{
-	free_page((unsigned long) pte);
-}
-
+		
 static inline void pte_free_kernel(pte_t *pte)
 {
-	free_pte_fast(pte);
+	kmem_cache_free(pgtable_cache, pte);
 }
 
 static inline void pte_free(struct page *ptepage)
 {
-	free_pte_fast(page_address(ptepage));
+	pte_free_kernel(page_address(ptepage));
 }
 
-#define pmd_free(pmd)		free_pmd_fast(pmd)
-#define pgd_free(pgd)		free_pgd_fast(pgd)
-#define pgd_alloc(mm)		get_pgd_fast()
+
+#define pmd_populate_kernel(MM, PMD, PTE)	pmd_set(PMD, PTE)
+#define pmd_populate(MM,PMD,PTE_PAGE)		\
+	pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
+
+#define check_pgt_cache()	do { } while (0)
 
 #endif /* _SPARC64_PGALLOC_H */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index f3ba1e05819..77ba0b6cc1c 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -432,12 +432,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
 					  unsigned long);
 #define HAVE_ARCH_FB_UNMAPPED_AREA
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()	do { } while (0)
-
-extern void check_pgt_cache(void);
+extern void pgtable_cache_init(void);
 
 #endif /* !(__ASSEMBLY__) */
 
-- 
cgit v1.2.3-70-g09d2


From 56fb4df6da76c35dca22036174e2d1edef83ff1f Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sun, 26 Feb 2006 23:24:22 -0800
Subject: [SPARC64]: Elminate all usage of hard-coded trap globals.

UltraSPARC has special sets of global registers which are switched to
for certain trap types.  There is one set for MMU related traps, one
set of Interrupt Vector processing, and another set (called the
Alternate globals) for all other trap types.

For what seems like forever we've hard coded the values in some of
these trap registers.  Some examples include:

1) Interrupt Vector global %g6 holds current processors interrupt
   work struct where received interrupts are managed for IRQ handler
   dispatch.

2) MMU global %g7 holds the base of the page tables of the currently
   active address space.

3) Alternate global %g6 held the current_thread_info() value.

Such hardcoding has resulted in some serious issues in many areas.
There are some code sequences where having another register available
would help clean up the implementation.  Taking traps such as
cross-calls from the OBP firmware requires some trick code sequences
wherein we have to save away and restore all of the special sets of
global registers when we enter/exit OBP.

We were also using the IMMU TSB register on SMP to hold the per-cpu
area base address, which doesn't work any longer now that we actually
use the TSB facility of the cpu.

The implementation is pretty straight forward.  One tricky bit is
getting the current processor ID as that is different on different cpu
variants.  We use a stub with a fancy calling convention which we
patch at boot time.  The calling convention is that the stub is
branched to and the (PC - 4) to return to is in register %g1.  The cpu
number is left in %g6.  This stub can be invoked by using the
__GET_CPUID macro.

We use an array of per-cpu trap state to store the current thread and
physical address of the current address space's page tables.  The
TRAP_LOAD_THREAD_REG loads %g6 with the current thread from this
table, it uses __GET_CPUID and also clobbers %g1.

TRAP_LOAD_IRQ_WORK is used by the interrupt vector processing to load
the current processor's IRQ software state into %g6.  It also uses
__GET_CPUID and clobbers %g1.

Finally, TRAP_LOAD_PGD_PHYS loads the physical address base of the
current address space's page tables into %g7, it clobbers %g1 and uses
__GET_CPUID.

Many refinements are possible, as well as some tuning, with this stuff
in place.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S      | 122 +++++++++++++++++++++++++++++++++------
 arch/sparc64/kernel/etrap.S      |  18 +++---
 arch/sparc64/kernel/head.S       |  20 +------
 arch/sparc64/kernel/irq.c        |  26 +--------
 arch/sparc64/kernel/rtrap.S      |  10 ++--
 arch/sparc64/kernel/setup.c      |   8 +++
 arch/sparc64/kernel/smp.c        |  55 ++++--------------
 arch/sparc64/kernel/trampoline.S |   9 +--
 arch/sparc64/kernel/traps.c      |  19 ++++++
 arch/sparc64/kernel/tsb.S        |  26 +++++++--
 arch/sparc64/kernel/ttable.S     |   2 +-
 arch/sparc64/kernel/winfixup.S   |  24 +++-----
 arch/sparc64/mm/ultra.S          |  10 ++--
 include/asm-sparc64/cpudata.h    |  86 ++++++++++++++++++++++++++-
 include/asm-sparc64/system.h     |   2 +
 include/asm-sparc64/ttable.h     |  18 +++---
 16 files changed, 287 insertions(+), 168 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index a73553ae7e5..906b64ffdb1 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -50,7 +50,8 @@ do_fpdis:
 	add		%g0, %g0, %g0
 	ba,a,pt		%xcc, rtrap_clr_l6
 
-1:	ldub		[%g6 + TI_FPSAVED], %g5
+1:	TRAP_LOAD_THREAD_REG
+	ldub		[%g6 + TI_FPSAVED], %g5
 	wr		%g0, FPRS_FEF, %fprs
 	andcc		%g5, FPRS_FEF, %g0
 	be,a,pt		%icc, 1f
@@ -189,6 +190,7 @@ fp_other_bounce:
 	.globl		do_fpother_check_fitos
 	.align		32
 do_fpother_check_fitos:
+	TRAP_LOAD_THREAD_REG
 	sethi		%hi(fp_other_bounce - 4), %g7
 	or		%g7, %lo(fp_other_bounce - 4), %g7
 
@@ -353,8 +355,6 @@ do_fptrap_after_fsr:
 	 *
 	 * With this method we can do most of the cross-call tlb/cache
 	 * flushing very quickly.
-	 *
-	 * Current CPU's IRQ worklist table is locked into %g6, don't touch.
 	 */
 	.text
 	.align		32
@@ -378,6 +378,8 @@ do_ivec:
 	sllx		%g2, %g4, %g2
 	sllx		%g4, 2, %g4
 
+	TRAP_LOAD_IRQ_WORK
+
 	lduw		[%g6 + %g4], %g5	/* g5 = irq_work(cpu, pil) */
 	stw		%g5, [%g3 + 0x00]	/* bucket->irq_chain = g5 */
 	stw		%g3, [%g6 + %g4]	/* irq_work(cpu, pil) = bucket */
@@ -488,9 +490,24 @@ setcc:
 	retl
 	 stx		%o1, [%o0 + PT_V9_TSTATE]
 
-	.globl		utrap, utrap_ill
-utrap:	brz,pn		%g1, etrap
+	.globl		utrap_trap
+utrap_trap:		/* %g3=handler,%g4=level */
+	TRAP_LOAD_THREAD_REG
+	ldx		[%g6 + TI_UTRAPS], %g1
+	brnz,pt		%g1, invoke_utrap
 	 nop
+
+	ba,pt		%xcc, etrap
+	 rd		%pc, %g7
+	mov		%l4, %o1
+        call		bad_trap
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+invoke_utrap:
+	sllx		%g3, 3, %g3
+	ldx		[%g1 + %g3], %g1
 	save		%sp, -128, %sp
 	rdpr		%tstate, %l6
 	rdpr		%cwp, %l7
@@ -500,17 +517,6 @@ utrap:	brz,pn		%g1, etrap
 	rdpr		%tnpc, %l7
 	wrpr		%g1, 0, %tnpc
 	done
-utrap_ill:
-        call		bad_trap
-	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 clr		%l6
-
-	/* XXX Here is stuff we still need to write... -DaveM XXX */
-	.globl		netbsd_syscall
-netbsd_syscall:
-	retl
-	 nop
 
 	/* We need to carefully read the error status, ACK
 	 * the errors, prevent recursive traps, and pass the
@@ -1001,7 +1007,7 @@ dcpe_icpe_tl1_common:
 	 * %g3:		scratch
 	 * %g4:		AFSR
 	 * %g5:		AFAR
-	 * %g6:		current thread ptr
+	 * %g6:		unused, will have current thread ptr after etrap
 	 * %g7:		scratch
 	 */
 __cheetah_log_error:
@@ -1690,3 +1696,85 @@ __flushw_user:
 	 restore	%g0, %g0, %g0
 2:	retl
 	 nop
+
+	/* Read cpu ID from hardware, return in %g6.
+	 * (callers_pc - 4) is in %g1.  Patched at boot time.
+	 *
+	 * Default is spitfire implementation.
+	 *
+	 * The instruction sequence needs to be 5 instructions
+	 * in order to fit the longest implementation, which is
+	 * currently starfire.
+	 */
+	.align		32
+	.globl		__get_cpu_id
+__get_cpu_id:
+	ldxa		[%g0] ASI_UPA_CONFIG, %g6
+	srlx		%g6, 17, %g6
+	jmpl		%g1 + 0x4, %g0
+	 and		%g6, 0x1f, %g6
+	nop
+
+__get_cpu_id_cheetah_safari:
+	ldxa		[%g0] ASI_SAFARI_CONFIG, %g6
+	srlx		%g6, 17, %g6
+	jmpl		%g1 + 0x4, %g0
+	 and		%g6, 0x3ff, %g6
+	nop
+
+__get_cpu_id_cheetah_jbus:
+	ldxa		[%g0] ASI_JBUS_CONFIG, %g6
+	srlx		%g6, 17, %g6
+	jmpl		%g1 + 0x4, %g0
+	 and		%g6, 0x1f, %g6
+	nop
+
+__get_cpu_id_starfire:
+	sethi		%hi(0x1fff40000d0 >> 9), %g6
+	sllx		%g6, 9, %g6
+	or		%g6, 0xd0, %g6
+	jmpl		%g1 + 0x4, %g0
+	 lduwa		[%g6] ASI_PHYS_BYPASS_EC_E, %g6
+
+	.globl		per_cpu_patch
+per_cpu_patch:
+	sethi		%hi(this_is_starfire), %o0
+	lduw		[%o0 + %lo(this_is_starfire)], %o1
+	sethi		%hi(__get_cpu_id_starfire), %o0
+	brnz,pn		%o1, 10f
+	 or		%o0, %lo(__get_cpu_id_starfire), %o0
+	sethi		%hi(tlb_type), %o0
+	lduw		[%o0 + %lo(tlb_type)], %o1
+	brz,pt		%o1, 11f
+	 nop
+	rdpr		%ver, %o0
+	srlx		%o0, 32, %o0
+	sethi		%hi(0x003e0016), %o1
+	or		%o1, %lo(0x003e0016), %o1
+	cmp		%o0, %o1
+	sethi		%hi(__get_cpu_id_cheetah_jbus), %o0
+	be,pn		%icc, 10f
+	 or		%o0, %lo(__get_cpu_id_cheetah_jbus), %o0
+	sethi		%hi(__get_cpu_id_cheetah_safari), %o0
+	or		%o0, %lo(__get_cpu_id_cheetah_safari), %o0
+10:
+	sethi		%hi(__get_cpu_id), %o1
+	or		%o1, %lo(__get_cpu_id), %o1
+	lduw		[%o0 + 0x00], %o2
+	stw		%o2, [%o1 + 0x00]
+	flush		%o1 + 0x00
+	lduw		[%o0 + 0x04], %o2
+	stw		%o2, [%o1 + 0x04]
+	flush		%o1 + 0x04
+	lduw		[%o0 + 0x08], %o2
+	stw		%o2, [%o1 + 0x08]
+	flush		%o1 + 0x08
+	lduw		[%o0 + 0x0c], %o2
+	stw		%o2, [%o1 + 0x0c]
+	flush		%o1 + 0x0c
+	lduw		[%o0 + 0x10], %o2
+	stw		%o2, [%o1 + 0x10]
+	flush		%o1 + 0x10
+11:
+	retl
+	 nop
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 567dbb765c3..8b3b6d720ed 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -31,6 +31,7 @@
 		.globl	etrap, etrap_irq, etraptl1
 etrap:		rdpr	%pil, %g2
 etrap_irq:
+		TRAP_LOAD_THREAD_REG
 		rdpr	%tstate, %g1
 		sllx	%g2, 20, %g3
 		andcc	%g1, TSTATE_PRIV, %g0
@@ -98,11 +99,7 @@ etrap_irq:
 		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
 		wrpr	%g0, ETRAP_PSTATE2, %pstate
 		mov	%l6, %g6
-#ifdef CONFIG_SMP
-#error IMMU TSB usage must be fixed
-		mov	TSB_REG, %g3
-		ldxa	[%g3] ASI_IMMU, %g5
-#endif
+		LOAD_PER_CPU_BASE(%g4, %g3)
 		jmpl	%l2 + 0x4, %g0
 		 ldx	[%g6 + TI_TASK], %g4
 
@@ -126,6 +123,7 @@ etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
 		 *	0x58	TL4's TT
 		 *	0x60	TL
 		 */
+		TRAP_LOAD_THREAD_REG
 		sub	%sp, ((4 * 8) * 4) + 8, %g2
 		rdpr	%tl, %g1
 
@@ -179,7 +177,9 @@ etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
 
 		.align	64
 		.globl	scetrap
-scetrap:	rdpr	%pil, %g2
+scetrap:
+		TRAP_LOAD_THREAD_REG
+		rdpr	%pil, %g2
 		rdpr	%tstate, %g1
 		sllx	%g2, 20, %g3
 		andcc	%g1, TSTATE_PRIV, %g0
@@ -248,11 +248,7 @@ scetrap:	rdpr	%pil, %g2
 		stx	%i6, [%sp + PTREGS_OFF + PT_V9_I6]
 		mov	%l6, %g6
 		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
-#ifdef CONFIG_SMP
-#error IMMU TSB usage must be fixed
-		mov	TSB_REG, %g3
-		ldxa	[%g3] ASI_IMMU, %g5
-#endif
+		LOAD_PER_CPU_BASE(%g4, %g3)
 		ldx	[%g6 + TI_TASK], %g4
 		done
 
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index d00e20693be..82ce5bced9c 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -26,6 +26,7 @@
 #include <asm/head.h>
 #include <asm/ttable.h>
 #include <asm/mmu.h>
+#include <asm/cpudata.h>
 	
 /* This section from from _start to sparc64_boot_end should fit into
  * 0x0000000000404000 to 0x0000000000408000.
@@ -421,24 +422,6 @@ setup_trap_table:
 	stxa	%g2, [%g1] ASI_DMMU
 	membar	#Sync
 
-	/* The Linux trap handlers expect various trap global registers
-	 * to be setup with some fixed values.  So here we set these
-	 * up very carefully.  These globals are:
-	 *
-	 * Alternate Globals (PSTATE_AG):
-	 *
-	 * %g6			--> current_thread_info()
-	 *
-	 * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()):
-	 *
-	 * %g6			--> __irq_work[smp_processor_id()]
-	 */
-
-	rdpr	%pstate, %o1
-	mov	%g6, %o2
-	wrpr	%o1, PSTATE_AG, %pstate
-	mov	%o2, %g6
-
 	/* Kill PROM timer */
 	sethi	%hi(0x80000000), %o2
 	sllx	%o2, 32, %o2
@@ -457,7 +440,6 @@ setup_trap_table:
 
 2:
 	wrpr	%g0, %g0, %wstate
-	wrpr	%o1, 0x0, %pstate
 
 	call	init_irqwork_curcpu
 	 nop
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index f7490ef629b..3e48af2769d 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -848,33 +848,9 @@ static void kill_prom_timer(void)
 
 void init_irqwork_curcpu(void)
 {
-	register struct irq_work_struct *workp asm("o2");
-	register unsigned long tmp asm("o3");
 	int cpu = hard_smp_processor_id();
 
-	memset(__irq_work + cpu, 0, sizeof(*workp));
-
-	/* Make sure we are called with PSTATE_IE disabled.  */
-	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
-			     : "=r" (tmp));
-	if (tmp & PSTATE_IE) {
-		prom_printf("BUG: init_irqwork_curcpu() called with "
-			    "PSTATE_IE enabled, bailing.\n");
-		__asm__ __volatile__("mov	%%i7, %0\n\t"
-				     : "=r" (tmp));
-		prom_printf("BUG: Called from %lx\n", tmp);
-		prom_halt();
-	}
-
-	/* Set interrupt globals.  */
-	workp = &__irq_work[cpu];
-	__asm__ __volatile__(
-	"rdpr	%%pstate, %0\n\t"
-	"wrpr	%0, %1, %%pstate\n\t"
-	"mov	%2, %%g6\n\t"
-	"wrpr	%0, 0x0, %%pstate\n\t"
-	: "=&r" (tmp)
-	: "i" (PSTATE_IG), "r" (workp));
+	memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct));
 }
 
 /* Only invoked on boot processor. */
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 213eb4a9d8a..5a62ec5d531 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -223,12 +223,10 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 		ldx			[%sp + PTREGS_OFF + PT_V9_G3], %g3
 		ldx			[%sp + PTREGS_OFF + PT_V9_G4], %g4
 		ldx			[%sp + PTREGS_OFF + PT_V9_G5], %g5
-#ifdef CONFIG_SMP
-#error IMMU TSB usage must be fixed
-		mov			TSB_REG, %g6
-		brnz,a,pn		%l3, 1f
-		 ldxa			[%g6] ASI_IMMU, %g5
-#endif
+		brz,pt			%l3, 1f
+		 nop
+		/* Must do this before thread reg is clobbered below.  */
+		LOAD_PER_CPU_BASE(%g6, %g7)
 1:
 		ldx			[%sp + PTREGS_OFF + PT_V9_G6], %g6
 		ldx			[%sp + PTREGS_OFF + PT_V9_G7], %g7
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 158bd31e15b..59a70301a6c 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -507,6 +507,11 @@ void __init setup_arch(char **cmdline_p)
 	/* Work out if we are starfire early on */
 	check_if_starfire();
 
+	/* Now we know enough to patch the __get_cpu_id()
+	 * trampoline used by trap code.
+	 */
+	per_cpu_patch();
+
 	boot_flags_init(*cmdline_p);
 
 	idprom_init();
@@ -545,6 +550,9 @@ void __init setup_arch(char **cmdline_p)
 	smp_setup_cpu_possible_map();
 
 	paging_init();
+
+	/* Get boot processor trap_block[] setup.  */
+	init_cur_cpu_trap();
 }
 
 static int __init set_preferred_console(void)
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index d2d3369e7b5..8c245859d21 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -38,6 +38,7 @@
 #include <asm/timer.h>
 #include <asm/starfire.h>
 #include <asm/tlb.h>
+#include <asm/sections.h>
 
 extern void calibrate_delay(void);
 
@@ -87,10 +88,6 @@ void __init smp_store_cpu_info(int id)
 	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
 						     "clock-frequency", 0);
 
-	cpu_data(id).pgcache_size		= 0;
-	cpu_data(id).pte_cache[0]		= NULL;
-	cpu_data(id).pte_cache[1]		= NULL;
-	cpu_data(id).pgd_cache			= NULL;
 	cpu_data(id).idle_volume		= 1;
 
 	cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
@@ -121,26 +118,15 @@ static volatile unsigned long callin_flag = 0;
 
 extern void inherit_locked_prom_mappings(int save_p);
 
-static inline void cpu_setup_percpu_base(unsigned long cpu_id)
-{
-#error IMMU TSB usage must be fixed
-	__asm__ __volatile__("mov	%0, %%g5\n\t"
-			     "stxa	%0, [%1] %2\n\t"
-			     "membar	#Sync"
-			     : /* no outputs */
-			     : "r" (__per_cpu_offset(cpu_id)),
-			       "r" (TSB_REG), "i" (ASI_IMMU));
-}
-
 void __init smp_callin(void)
 {
 	int cpuid = hard_smp_processor_id();
 
 	inherit_locked_prom_mappings(0);
 
-	__flush_tlb_all();
+	__local_per_cpu_offset = __per_cpu_offset(cpuid);
 
-	cpu_setup_percpu_base(cpuid);
+	__flush_tlb_all();
 
 	smp_setup_percpu_timer();
 
@@ -1107,12 +1093,15 @@ void __init smp_setup_cpu_possible_map(void)
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	if (hard_smp_processor_id() >= NR_CPUS) {
+	int cpu = hard_smp_processor_id();
+
+	if (cpu >= NR_CPUS) {
 		prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
 		prom_halt();
 	}
 
-	current_thread_info()->cpu = hard_smp_processor_id();
+	current_thread_info()->cpu = cpu;
+	__local_per_cpu_offset = __per_cpu_offset(cpu);
 
 	cpu_set(smp_processor_id(), cpu_online_map);
 	cpu_set(smp_processor_id(), phys_cpu_present_map);
@@ -1173,12 +1162,9 @@ void __init setup_per_cpu_areas(void)
 {
 	unsigned long goal, size, i;
 	char *ptr;
-	/* Created by linker magic */
-	extern char __per_cpu_start[], __per_cpu_end[];
 
 	/* Copy section for each CPU (we discard the original) */
-	goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
-
+	goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
 #ifdef CONFIG_MODULES
 	if (goal < PERCPU_ENOUGH_ROOM)
 		goal = PERCPU_ENOUGH_ROOM;
@@ -1187,31 +1173,10 @@ void __init setup_per_cpu_areas(void)
 	for (size = 1UL; size < goal; size <<= 1UL)
 		__per_cpu_shift++;
 
-	/* Make sure the resulting __per_cpu_base value
-	 * will fit in the 43-bit sign extended IMMU
-	 * TSB register.
-	 */
-	ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
-			      (unsigned long) __per_cpu_start);
+	ptr = alloc_bootmem(size * NR_CPUS);
 
 	__per_cpu_base = ptr - __per_cpu_start;
 
-	if ((__per_cpu_shift < PAGE_SHIFT) ||
-	    (__per_cpu_base & ~PAGE_MASK) ||
-	    (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
-		prom_printf("PER_CPU: Invalid layout, "
-			    "ptr[%p] shift[%lx] base[%lx]\n",
-			    ptr, __per_cpu_shift, __per_cpu_base);
-		prom_halt();
-	}
-
 	for (i = 0; i < NR_CPUS; i++, ptr += size)
 		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-
-	/* Finally, load in the boot cpu's base value.
-	 * We abuse the IMMU TSB register for trap handler
-	 * entry and exit loading of %g5.  That is why it
-	 * has to be page aligned.
-	 */
-	cpu_setup_percpu_base(hard_smp_processor_id());
 }
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 782d8c4973e..18c333f841e 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -287,21 +287,18 @@ do_unlock:
 	wrpr		%g0, 0, %wstate
 	wrpr		%g0, 0, %tl
 
-	/* Setup the trap globals, then we can resurface. */
-	rdpr		%pstate, %o1
-	mov		%g6, %o2
-	wrpr		%o1, PSTATE_AG, %pstate
+	/* Load TBA, then we can resurface. */
 	sethi		%hi(sparc64_ttable_tl0), %g5
 	wrpr		%g5, %tba
-	mov		%o2, %g6
 
-	wrpr		%o1, 0x0, %pstate
 	ldx		[%g6 + TI_TASK], %g4
 
 	wrpr		%g0, 0, %wstate
 
 	call		init_irqwork_curcpu
 	 nop
+	call		init_cur_cpu_trap
+	 nop
 
 	/* Start using proper page size encodings in ctx register.  */
 	sethi	%hi(sparc64_kern_pri_context), %g3
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 8d44ae5a15e..f47f4874253 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2130,7 +2130,22 @@ void do_getpsr(struct pt_regs *regs)
 	}
 }
 
+struct trap_per_cpu trap_block[NR_CPUS];
+
+/* This can get invoked before sched_init() so play it super safe
+ * and use hard_smp_processor_id().
+ */
+void init_cur_cpu_trap(void)
+{
+	int cpu = hard_smp_processor_id();
+	struct trap_per_cpu *p = &trap_block[cpu];
+
+	p->thread = current_thread_info();
+	p->pgd_paddr = 0;
+}
+
 extern void thread_info_offsets_are_bolixed_dave(void);
+extern void trap_per_cpu_offsets_are_bolixed_dave(void);
 
 /* Only invoked on boot processor. */
 void __init trap_init(void)
@@ -2165,6 +2180,10 @@ void __init trap_init(void)
 	    (TI_FPREGS & (64 - 1)))
 		thread_info_offsets_are_bolixed_dave();
 
+	if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
+	    TRAP_PER_CPU_PGD_PADDR != offsetof(struct trap_per_cpu, pgd_paddr))
+		trap_per_cpu_offsets_are_bolixed_dave();
+
 	/* Attach to the address space of init_task.  On SMP we
 	 * do this in smp.c:smp_callin for other cpus.
 	 */
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 44b9e6fed09..50752c51877 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -36,6 +36,15 @@ tsb_miss_itlb:
 	 nop
 
 tsb_miss_page_table_walk:
+	/* This clobbers %g1 and %g6, preserve them... */
+	mov		%g1, %g5
+	mov		%g6, %g2
+
+	TRAP_LOAD_PGD_PHYS
+
+	mov		%g2, %g6
+	mov		%g5, %g1
+
 	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
 
 tsb_reload:
@@ -112,15 +121,20 @@ winfix_trampoline:
 	 * %o0: page table physical address
 	 * %o1:	TSB address
 	 */
+	.align	32
 	.globl	tsb_context_switch
 tsb_context_switch:
-	wrpr	%g0, PSTATE_MG | PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV, %pstate
+	rdpr	%pstate, %o5
+	wrpr	%o5, PSTATE_IE, %pstate
 
-	/* Set page table base alternate global.  */
-	mov	%o0, %g7
+	ldub	[%g6 + TI_CPU], %o3
+	sethi	%hi(trap_block), %o4
+	sllx	%o3, TRAP_BLOCK_SZ_SHIFT, %o3
+	or	%o4, %lo(trap_block), %o4
+	add	%o4, %o3, %o4
+	stx	%o0, [%o4 + TRAP_PER_CPU_PGD_PADDR]
 
-	/* XXX can this happen?  */
-	brz,pn	%o1, 9f
+	brgez	%o1, 9f
 	 nop
 
 	/* Lock TSB into D-TLB.  */
@@ -163,7 +177,7 @@ tsb_context_switch:
 	membar		#Sync
 
 9:
-	wrpr	%g0, PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE, %pstate
+	wrpr	%o5, %pstate
 
 	retl
 	 mov	%o2, %o0
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 56f060c8fbf..2fb7a33993c 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -128,7 +128,7 @@ tl0_flushw:	FLUSH_WINDOW_TRAP
 tl0_resv104:	BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107)
 		.globl tl0_solaris
 tl0_solaris:	SOLARIS_SYSCALL_TRAP
-tl0_netbsd:	NETBSD_SYSCALL_TRAP
+tl0_resv109:	BTRAP(0x109)
 tl0_resv10a:	BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e)
 tl0_resv10f:	BTRAP(0x10f)
 tl0_linux32:	LINUX_32BIT_SYSCALL_TRAP
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index f5d93aa99cb..de588036df4 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -39,6 +39,7 @@ set_pcontext:
 	 */
 	.globl	fill_fixup, spill_fixup
 fill_fixup:
+	TRAP_LOAD_THREAD_REG
 	rdpr		%tstate, %g1
 	andcc		%g1, TSTATE_PRIV, %g0
 	or		%g4, FAULT_CODE_WINFIXUP, %g4
@@ -84,11 +85,7 @@ fill_fixup:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6
 	ldx		[%g6 + TI_TASK], %g4
-#ifdef CONFIG_SMP
-#error IMMU TSB usage must be fixed
-	mov		TSB_REG, %g1
-	ldxa		[%g1] ASI_IMMU, %g5
-#endif
+	LOAD_PER_CPU_BASE(%g1, %g2)
 
 	/* This is the same as below, except we handle this a bit special
 	 * since we must preserve %l5 and %l6, see comment above.
@@ -107,6 +104,7 @@ fill_fixup:
 	 * do not touch %g7 or %g2 so we handle the two cases fine.
 	 */
 spill_fixup:
+	TRAP_LOAD_THREAD_REG
 	ldx		[%g6 + TI_FLAGS], %g1
 	andcc		%g1, _TIF_32BIT, %g0
 	ldub		[%g6 + TI_WSAVED], %g1
@@ -182,6 +180,7 @@ winfix_mna:
 	wrpr		%g3, %tnpc
 	done
 fill_fixup_mna:
+	TRAP_LOAD_THREAD_REG
 	rdpr		%tstate, %g1
 	andcc		%g1, TSTATE_PRIV, %g0
 	be,pt		%xcc, window_mna_from_user_common
@@ -209,17 +208,14 @@ fill_fixup_mna:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6			! Get current back.
 	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-#ifdef CONFIG_SMP
-#error IMMU TSB usage must be fixed
-	mov		TSB_REG, %g1
-	ldxa		[%g1] ASI_IMMU, %g5
-#endif
+	LOAD_PER_CPU_BASE(%g1, %g2)
 	call		mem_address_unaligned
 	 add		%sp, PTREGS_OFF, %o0
 
 	b,pt		%xcc, rtrap
 	 nop						! yes, the nop is correct
 spill_fixup_mna:
+	TRAP_LOAD_THREAD_REG
 	ldx		[%g6 + TI_FLAGS], %g1
 	andcc		%g1, _TIF_32BIT, %g0
 	ldub		[%g6 + TI_WSAVED], %g1
@@ -287,6 +283,7 @@ winfix_dax:
 	wrpr		%g3, %tnpc
 	done
 fill_fixup_dax:
+	TRAP_LOAD_THREAD_REG
 	rdpr		%tstate, %g1
 	andcc		%g1, TSTATE_PRIV, %g0
 	be,pt		%xcc, window_dax_from_user_common
@@ -314,17 +311,14 @@ fill_fixup_dax:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6			! Get current back.
 	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-#ifdef CONFIG_SMP
-#error IMMU TSB usage must be fixed
-	mov		TSB_REG, %g1
-	ldxa		[%g1] ASI_IMMU, %g5
-#endif
+	LOAD_PER_CPU_BASE(%g1, %g2)
 	call		spitfire_data_access_exception
 	 add		%sp, PTREGS_OFF, %o0
 
 	b,pt		%xcc, rtrap
 	 nop						! yes, the nop is correct
 spill_fixup_dax:
+	TRAP_LOAD_THREAD_REG
 	ldx		[%g6 + TI_FLAGS], %g1
 	andcc		%g1, _TIF_32BIT, %g0
 	ldub		[%g6 + TI_WSAVED], %g1
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 22791f29552..a87394824ec 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -295,12 +295,10 @@ cheetah_patch_cachetlbops:
 	 *   %g1	address arg 1	(tlb page and range flushes)
 	 *   %g7	address arg 2	(tlb range flush only)
 	 *
-	 *   %g6	ivector table, don't touch
-	 *   %g2	scratch 1
-	 *   %g3	scratch 2
-	 *   %g4	scratch 3
-	 *
-	 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
+	 *   %g6	scratch 1
+	 *   %g2	scratch 2
+	 *   %g3	scratch 3
+	 *   %g4	scratch 4
 	 */
 	.align		32
 	.globl		xcall_flush_tlb_mm
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index f7c0faede8b..6c57cbb9a7d 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -1,12 +1,15 @@
 /* cpudata.h: Per-cpu parameters.
  *
- * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
  */
 
 #ifndef _SPARC64_CPUDATA_H
 #define _SPARC64_CPUDATA_H
 
+#ifndef __ASSEMBLY__
+
 #include <linux/percpu.h>
+#include <linux/threads.h>
 
 typedef struct {
 	/* Dcache line 1 */
@@ -32,4 +35,85 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
 #define cpu_data(__cpu)		per_cpu(__cpu_data, (__cpu))
 #define local_cpu_data()	__get_cpu_var(__cpu_data)
 
+/* Trap handling code needs to get at a few critical values upon
+ * trap entry and to process TSB misses.  These cannot be in the
+ * per_cpu() area as we really need to lock them into the TLB and
+ * thus make them part of the main kernel image.  As a result we
+ * try to make this as small as possible.
+ *
+ * This is padded out and aligned to 64-bytes to avoid false sharing
+ * on SMP.
+ */
+
+/* If you modify the size of this structure, please update
+ * TRAP_BLOCK_SZ_SHIFT below.
+ */
+struct thread_info;
+struct trap_per_cpu {
+/* D-cache line 1 */
+	struct thread_info	*thread;
+	unsigned long		pgd_paddr;
+	unsigned long		__pad1[2];
+
+/* D-cache line 2 */
+	unsigned long		__pad2[4];
+} __attribute__((aligned(64)));
+extern struct trap_per_cpu trap_block[NR_CPUS];
+extern void init_cur_cpu_trap(void);
+extern void per_cpu_patch(void);
+
+#endif /* !(__ASSEMBLY__) */
+
+#define TRAP_PER_CPU_THREAD	0x00
+#define TRAP_PER_CPU_PGD_PADDR	0x08
+
+#define TRAP_BLOCK_SZ_SHIFT	6
+
+/* Clobbers %g1, loads %g6 with local processor's cpuid */
+#define __GET_CPUID			\
+	ba,pt	%xcc, __get_cpu_id;	\
+	 rd	%pc, %g1;
+
+/* Clobbers %g1, current address space PGD phys address into %g7.  */
+#define TRAP_LOAD_PGD_PHYS			\
+	__GET_CPUID				\
+	sllx	%g6, TRAP_BLOCK_SZ_SHIFT, %g6;	\
+	sethi	%hi(trap_block), %g7;		\
+	or	%g7, %lo(trap_block), %g7;	\
+	add	%g7, %g6, %g7;			\
+	ldx	[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7;
+
+/* Clobbers %g1, loads local processor's IRQ work area into %g6.  */
+#define TRAP_LOAD_IRQ_WORK			\
+	__GET_CPUID				\
+	sethi	%hi(__irq_work), %g1;		\
+	sllx	%g6, 6, %g6;			\
+	or	%g1, %lo(__irq_work), %g1;	\
+	add	%g1, %g6, %g6;
+
+/* Clobbers %g1, loads %g6 with current thread info pointer.  */
+#define TRAP_LOAD_THREAD_REG			\
+	__GET_CPUID				\
+	sllx	%g6, TRAP_BLOCK_SZ_SHIFT, %g6;	\
+	sethi	%hi(trap_block), %g1;		\
+	or	%g1, %lo(trap_block), %g1;	\
+	ldx	[%g1 + %g6], %g6;
+
+/* Given the current thread info pointer in %g6, load the per-cpu
+ * area base of the current processor into %g5.  REG1 and REG2 are
+ * clobbered.
+ */
+#ifdef CONFIG_SMP
+#define LOAD_PER_CPU_BASE(REG1, REG2)			\
+	ldub	[%g6 + TI_CPU], REG1;			\
+	sethi	%hi(__per_cpu_shift), %g5;		\
+	sethi	%hi(__per_cpu_base), REG2;		\
+	ldx	[%g5 + %lo(__per_cpu_shift)], %g5;	\
+	ldx	[REG2 + %lo(__per_cpu_base)], REG2;	\
+	sllx	REG1, %g5, %g5;				\
+	add	%g5, REG2, %g5;
+#else
+#define LOAD_PER_CPU_BASE(REG1, REG2)
+#endif
+
 #endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index af254e58183..26c0807af3e 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -209,6 +209,8 @@ do {	if (test_thread_flag(TIF_PERFCTR)) {				\
 	/* so that ASI is only written if it changes, think again. */	\
 	__asm__ __volatile__("wr %%g0, %0, %%asi"			\
 	: : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
+	trap_block[current_thread_info()->cpu].thread =			\
+		task_thread_info(next);					\
 	__asm__ __volatile__(						\
 	"mov	%%g4, %%g7\n\t"						\
 	"wrpr	%%g0, 0x95, %%pstate\n\t"				\
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index 2784f80094c..f557db4faf8 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -109,14 +109,14 @@
 	nop;nop;nop;
 	
 #define TRAP_UTRAP(handler,lvl)				\
-	ldx	[%g6 + TI_UTRAPS], %g1;			\
-	sethi	%hi(109f), %g7;				\
-	brz,pn	%g1, utrap;				\
-	 or	%g7, %lo(109f), %g7;			\
-	ba,pt	%xcc, utrap;				\
-109:	 ldx	[%g1 + handler*8], %g1;			\
-	ba,pt	%xcc, utrap_ill;			\
-	 mov	lvl, %o1;
+	mov	handler, %g3;				\
+	ba,pt	%xcc, utrap_trap;			\
+	 mov	lvl, %g4;				\
+	nop;						\
+	nop;						\
+	nop;						\
+	nop;						\
+	nop;
 
 #ifdef CONFIG_SUNOS_EMUL
 #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table)
@@ -136,8 +136,6 @@
 #else
 #define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall)
 #endif
-/* FIXME: Write these actually */	
-#define NETBSD_SYSCALL_TRAP TRAP(netbsd_syscall)
 #define BREAKPOINT_TRAP TRAP(breakpoint_trap)
 
 #define TRAP_IRQ(routine, level)			\
-- 
cgit v1.2.3-70-g09d2


From 09f94287f7260e03bbeab497e743691fafcc22c3 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:31:06 -0800
Subject: [SPARC64]: TSB refinements.

Move {init_new,destroy}_context() out of line.

Do not put huge pages into the TSB, only base page size translations.
There are some clever things we could do here, but for now let's be
correct instead of fancy.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/tsb.S         | 11 +++++++++++
 arch/sparc64/mm/tsb.c             | 28 ++++++++++++++++++++++++++++
 include/asm-sparc64/mmu_context.h | 32 ++------------------------------
 include/asm-sparc64/pgtable.h     |  4 ++++
 4 files changed, 45 insertions(+), 30 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 50752c51877..76f2c0b01f3 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -55,6 +55,17 @@ tsb_reload:
 	brgez,a,pn	%g5, tsb_do_fault
 	 stx		%g0, [%g1]
 
+	/* If it is larger than the base page size, don't
+	 * bother putting it into the TSB.
+	 */
+	srlx		%g5, 32, %g2
+	sethi		%hi(_PAGE_ALL_SZ_BITS >> 32), %g4
+	sethi		%hi(_PAGE_SZBITS >> 32), %g7
+	and		%g2, %g4, %g2
+	cmp		%g2, %g7
+	bne,a,pn	%xcc, tsb_tlb_reload
+	 stx		%g0, [%g1]
+
 	TSB_WRITE(%g1, %g5, %g6)
 
 	/* Finally, load TLB and return from trap.  */
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 15e8af58b1d..2f84cef6c1b 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -8,6 +8,7 @@
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
+#include <asm/mmu_context.h>
 
 #define TSB_ENTRY_ALIGNMENT	16
 
@@ -82,3 +83,30 @@ void flush_tsb_user(struct mmu_gather *mp)
 		}
 	}
 }
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+	mm->context.sparc64_ctx_val = 0UL;
+	if (unlikely(!page))
+		return -ENOMEM;
+
+	mm->context.sparc64_tsb = (unsigned long *) page;
+
+	return 0;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+	free_page((unsigned long) mm->context.sparc64_tsb);
+
+	spin_lock(&ctx_alloc_lock);
+
+	if (CTX_VALID(mm->context)) {
+		unsigned long nr = CTX_NRBITS(mm->context);
+		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
+	}
+
+	spin_unlock(&ctx_alloc_lock);
+}
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 34640a370ab..0dffb4ce8a1 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -19,36 +19,8 @@ extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
 extern void get_new_mmu_context(struct mm_struct *mm);
-
-/* Initialize a new mmu context.  This is invoked when a new
- * address space instance (unique or shared) is instantiated.
- * This just needs to set mm->context to an invalid context.
- */
-#define init_new_context(__tsk, __mm)	\
-({	unsigned long __pg = get_zeroed_page(GFP_KERNEL); \
-	(__mm)->context.sparc64_ctx_val = 0UL; \
-	(__mm)->context.sparc64_tsb = \
-	  (unsigned long *) __pg; \
-	(__pg ? 0 : -ENOMEM); \
-})
-
-
-/* Destroy a dead context.  This occurs when mmput drops the
- * mm_users count to zero, the mmaps have been released, and
- * all the page tables have been flushed.  Our job is to destroy
- * any remaining processor-specific state, and in the sparc64
- * case this just means freeing up the mmu context ID held by
- * this task if valid.
- */
-#define destroy_context(__mm)					\
-do {	free_page((unsigned long)(__mm)->context.sparc64_tsb);	\
-	spin_lock(&ctx_alloc_lock);				\
-	if (CTX_VALID((__mm)->context)) {			\
-		unsigned long nr = CTX_NRBITS((__mm)->context);	\
-		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));	\
-	}							\
-	spin_unlock(&ctx_alloc_lock);				\
-} while(0)
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
 
 extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
 
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 77ba0b6cc1c..2b2ecd6104d 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -116,6 +116,10 @@
 #define _PAGE_W		_AC(0x0000000000000002,UL) /* Writable               */
 #define _PAGE_G		_AC(0x0000000000000001,UL) /* Global                 */
 
+#define _PAGE_ALL_SZ_BITS	\
+	(_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \
+	 _PAGE_SZ8K  | _PAGE_SZ32MB | _PAGE_SZ256MB)
+
 /* Here are the SpitFire software bits we use in the TTE's.
  *
  * WARNING: If you are going to try and start using some
-- 
cgit v1.2.3-70-g09d2


From 98c5584cfc47932c4f3ccf5eee2e0bae1447b85e Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:31:20 -0800
Subject: [SPARC64]: Add infrastructure for dynamic TSB sizing.

This also cleans up tsb_context_switch().  The assembler
routine is now __tsb_context_switch() and the former is
an inline function that picks out the bits from the mm_struct
and passes it into the assembler code as arguments.

setup_tsb_parms() computes the locked TLB entry to map the
TSB.  Later when we support using the physical address quad
load instructions of Cheetah+ and later, we'll simply use
the physical address for the TSB register value and set
the map virtual and PTE both to zero.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/binfmt_aout32.c |   3 +-
 arch/sparc64/kernel/process.c       |   3 +-
 arch/sparc64/kernel/tsb.S           |  55 +++++++-----------
 arch/sparc64/mm/tsb.c               | 109 +++++++++++++++++++++++++++++++-----
 include/asm-sparc64/mmu.h           |  13 ++++-
 include/asm-sparc64/mmu_context.h   |  15 +++--
 include/asm-sparc64/tsb.h           |   2 +-
 7 files changed, 142 insertions(+), 58 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index a57d7f2b6f1..181c8cdf954 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -330,8 +330,7 @@ beyond_if:
 
 	current->mm->start_stack =
 		(unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
-	tsb_context_switch(__pa(current->mm->pgd),
-	                   current->mm->context.sparc64_tsb);
+	tsb_context_switch(mm);
 
 	start_thread32(regs, ex.a_entry, current->mm->start_stack);
 	if (current->ptrace & PT_PTRACED)
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 2784aab0d3e..26548fc604b 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -441,8 +441,7 @@ void flush_thread(void)
 
 	mm = t->task->mm;
 	if (mm)
-		tsb_context_switch(__pa(mm->pgd),
-				   mm->context.sparc64_tsb);
+		tsb_context_switch(mm);
 
 	set_thread_wsaved(0);
 
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 76f2c0b01f3..fe266bad0a2 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -130,48 +130,36 @@ winfix_trampoline:
 	 * schedule() time.
 	 *
 	 * %o0: page table physical address
-	 * %o1:	TSB address
+	 * %o1:	TSB register value
+	 * %o2:	TSB virtual address
+	 * %o3:	TSB mapping locked PTE
+	 *
+	 * We have to run this whole thing with interrupts
+	 * disabled so that the current cpu doesn't change
+	 * due to preemption.
 	 */
 	.align	32
-	.globl	tsb_context_switch
-tsb_context_switch:
+	.globl	__tsb_context_switch
+__tsb_context_switch:
 	rdpr	%pstate, %o5
 	wrpr	%o5, PSTATE_IE, %pstate
 
-	ldub	[%g6 + TI_CPU], %o3
-	sethi	%hi(trap_block), %o4
-	sllx	%o3, TRAP_BLOCK_SZ_SHIFT, %o3
-	or	%o4, %lo(trap_block), %o4
-	add	%o4, %o3, %o4
-	stx	%o0, [%o4 + TRAP_PER_CPU_PGD_PADDR]
-
-	brgez	%o1, 9f
-	 nop
-
-	/* Lock TSB into D-TLB.  */
-	sethi		%hi(PAGE_SIZE), %o3
-	and		%o3, %o1, %o3
-	sethi		%hi(TSBMAP_BASE), %o2
-	add		%o2, %o3, %o2
+	ldub	[%g6 + TI_CPU], %g1
+	sethi	%hi(trap_block), %g2
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1
+	or	%g2, %lo(trap_block), %g2
+	add	%g2, %g1, %g2
+	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
 
-	/* XXX handle PAGE_SIZE != 8K correctly...  */
 	mov	TSB_REG, %g1
-	stxa	%o2, [%g1] ASI_DMMU
+	stxa	%o1, [%g1] ASI_DMMU
 	membar	#Sync
 
-	stxa	%o2, [%g1] ASI_IMMU
+	stxa	%o1, [%g1] ASI_IMMU
 	membar	#Sync
 
-#define KERN_HIGHBITS	((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000)
-#define KERN_LOWBITS	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L)
-	sethi		%uhi(KERN_HIGHBITS), %g2
-	or		%g2, %ulo(KERN_HIGHBITS), %g2
-	sllx		%g2, 32, %g2
-	or		%g2, KERN_LOWBITS, %g2
-#undef KERN_HIGHBITS
-#undef KERN_LOWBITS
-
-	xor		%o1, %g2, %o1	
+	brz	%o2, 9f
+	 nop
 
 	/* We use entry 61 for this locked entry.  This is the spitfire
 	 * TLB entry number, and luckily cheetah masks the value with
@@ -184,11 +172,10 @@ tsb_context_switch:
 	stxa		%o2, [%g1] ASI_DMMU
 	membar		#Sync
 	mov		(61 << 3), %g1
-	stxa		%o1, [%g1] ASI_DTLB_DATA_ACCESS
+	stxa		%o3, [%g1] ASI_DTLB_DATA_ACCESS
 	membar		#Sync
-
 9:
 	wrpr	%o5, %pstate
 
 	retl
-	 mov	%o2, %o0
+	 nop
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 2f84cef6c1b..dfe7144fcdf 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -9,13 +9,7 @@
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
 #include <asm/mmu_context.h>
-
-#define TSB_ENTRY_ALIGNMENT	16
-
-struct tsb {
-	unsigned long tag;
-	unsigned long pte;
-} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
+#include <asm/pgtable.h>
 
 /* We use an 8K TSB for the whole kernel, this allows to
  * handle about 4MB of modules and vmalloc mappings without
@@ -27,10 +21,10 @@ struct tsb {
 
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
 
-static inline unsigned long tsb_hash(unsigned long vaddr)
+static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
 {
 	vaddr >>= PAGE_SHIFT;
-	return vaddr & (KERNEL_TSB_NENTRIES - 1);
+	return vaddr & (nentries - 1);
 }
 
 static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
@@ -51,7 +45,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
 	unsigned long v;
 
 	for (v = start; v < end; v += PAGE_SIZE) {
-		struct tsb *ent = &swapper_tsb[tsb_hash(v)];
+		unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
+		struct tsb *ent = &swapper_tsb[hash];
 
 		if (tag_compare(ent, v, 0)) {
 			ent->tag = 0UL;
@@ -63,8 +58,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
 void flush_tsb_user(struct mmu_gather *mp)
 {
 	struct mm_struct *mm = mp->mm;
-	struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb;
+	struct tsb *tsb = mm->context.tsb;
 	unsigned long ctx = ~0UL;
+	unsigned long nentries = mm->context.tsb_nentries;
 	int i;
 
 	if (CTX_VALID(mm->context))
@@ -76,7 +72,7 @@ void flush_tsb_user(struct mmu_gather *mp)
 
 		v &= ~0x1UL;
 
-		ent = &tsb[tsb_hash(v)];
+		ent = &tsb[tsb_hash(v, nentries)];
 		if (tag_compare(ent, v, ctx)) {
 			ent->tag = 0UL;
 			membar_storeload_storestore();
@@ -84,6 +80,83 @@ void flush_tsb_user(struct mmu_gather *mp)
 	}
 }
 
+static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
+{
+	unsigned long tsb_reg, base, tsb_paddr;
+	unsigned long page_sz, tte;
+
+	mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
+
+	base = TSBMAP_BASE;
+	tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
+	       _PAGE_CV    | _PAGE_P | _PAGE_W);
+	tsb_paddr = __pa(mm->context.tsb);
+
+	/* Use the smallest page size that can map the whole TSB
+	 * in one TLB entry.
+	 */
+	switch (tsb_bytes) {
+	case 8192 << 0:
+		tsb_reg = 0x0UL;
+#ifdef DCACHE_ALIASING_POSSIBLE
+		base += (tsb_paddr & 8192);
+#endif
+		tte |= _PAGE_SZ8K;
+		page_sz = 8192;
+		break;
+
+	case 8192 << 1:
+		tsb_reg = 0x1UL;
+		tte |= _PAGE_SZ64K;
+		page_sz = 64 * 1024;
+		break;
+
+	case 8192 << 2:
+		tsb_reg = 0x2UL;
+		tte |= _PAGE_SZ64K;
+		page_sz = 64 * 1024;
+		break;
+
+	case 8192 << 3:
+		tsb_reg = 0x3UL;
+		tte |= _PAGE_SZ64K;
+		page_sz = 64 * 1024;
+		break;
+
+	case 8192 << 4:
+		tsb_reg = 0x4UL;
+		tte |= _PAGE_SZ512K;
+		page_sz = 512 * 1024;
+		break;
+
+	case 8192 << 5:
+		tsb_reg = 0x5UL;
+		tte |= _PAGE_SZ512K;
+		page_sz = 512 * 1024;
+		break;
+
+	case 8192 << 6:
+		tsb_reg = 0x6UL;
+		tte |= _PAGE_SZ512K;
+		page_sz = 512 * 1024;
+		break;
+
+	case 8192 << 7:
+		tsb_reg = 0x7UL;
+		tte |= _PAGE_SZ4MB;
+		page_sz = 4 * 1024 * 1024;
+		break;
+	};
+
+	tsb_reg |= base;
+	tsb_reg |= (tsb_paddr & (page_sz - 1UL));
+	tte |= (tsb_paddr & ~(page_sz - 1UL));
+
+	mm->context.tsb_reg_val = tsb_reg;
+	mm->context.tsb_map_vaddr = base;
+	mm->context.tsb_map_pte = tte;
+}
+
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
 	unsigned long page = get_zeroed_page(GFP_KERNEL);
@@ -92,14 +165,22 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 	if (unlikely(!page))
 		return -ENOMEM;
 
-	mm->context.sparc64_tsb = (unsigned long *) page;
+	mm->context.tsb = (struct tsb *) page;
+	setup_tsb_params(mm, PAGE_SIZE);
 
 	return 0;
 }
 
 void destroy_context(struct mm_struct *mm)
 {
-	free_page((unsigned long) mm->context.sparc64_tsb);
+	free_page((unsigned long) mm->context.tsb);
+
+	/* We can remove these later, but for now it's useful
+	 * to catch any bogus post-destroy_context() references
+	 * to the TSB.
+	 */
+	mm->context.tsb = NULL;
+	mm->context.tsb_reg_val = 0UL;
 
 	spin_lock(&ctx_alloc_lock);
 
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 36384cf7faa..2effeba2476 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -90,9 +90,20 @@
 
 #ifndef __ASSEMBLY__
 
+#define TSB_ENTRY_ALIGNMENT	16
+
+struct tsb {
+	unsigned long tag;
+	unsigned long pte;
+} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
+
 typedef struct {
 	unsigned long	sparc64_ctx_val;
-	unsigned long	*sparc64_tsb;
+	struct tsb	*tsb;
+	unsigned long	tsb_nentries;
+	unsigned long	tsb_reg_val;
+	unsigned long	tsb_map_vaddr;
+	unsigned long	tsb_map_pte;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 0dffb4ce8a1..0a950f151d2 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -22,7 +22,15 @@ extern void get_new_mmu_context(struct mm_struct *mm);
 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
 
-extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
+extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg,
+				 unsigned long tsb_vaddr, unsigned long tsb_pte);
+
+static inline void tsb_context_switch(struct mm_struct *mm)
+{
+	__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
+			     mm->context.tsb_map_vaddr,
+			     mm->context.tsb_map_pte);
+}
 
 /* Set MMU context in the actual hardware. */
 #define load_secondary_context(__mm) \
@@ -52,8 +60,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 
 	if (!ctx_valid || (old_mm != mm)) {
 		load_secondary_context(mm);
-		tsb_context_switch(__pa(mm->pgd),
-				   mm->context.sparc64_tsb);
+		tsb_context_switch(mm);
 	}
 
 	/* Even if (mm == old_mm) we _must_ check
@@ -91,7 +98,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
 
 	load_secondary_context(mm);
 	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-	tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
+	tsb_context_switch(mm);
 }
 
 #endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 03d272e0e47..1f93b7d8cdb 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -19,7 +19,7 @@
  * 	stxa		%g5, [%g0] ASI_{D,I}TLB_DATA_IN
  * 	retry
  *
-
+ *
  * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
  * PTE.  The TAG is of the same layout as the TLB TAG TARGET mmu
  * register which is:
-- 
cgit v1.2.3-70-g09d2


From bd40791e1d289d807b8580abe1f117e9c62894e4 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:31:38 -0800
Subject: [SPARC64]: Dynamically grow TSB in response to RSS growth.

As the RSS grows, grow the TSB in order to reduce the likelyhood
of hash collisions and thus poor hit rates in the TSB.

This definitely needs some serious tuning.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/smp.c         |  28 ++++++-
 arch/sparc64/mm/init.c            |   7 ++
 arch/sparc64/mm/tsb.c             | 151 ++++++++++++++++++++++++++++++++++++--
 include/asm-sparc64/mmu.h         |   1 +
 include/asm-sparc64/mmu_context.h |   7 ++
 5 files changed, 184 insertions(+), 10 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 8c245859d21..3c14b549cf9 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -581,11 +581,11 @@ extern unsigned long xcall_call_function;
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(void (*func)(void *info), void *info,
-		      int nonatomic, int wait)
+static int smp_call_function_mask(void (*func)(void *info), void *info,
+				  int nonatomic, int wait, cpumask_t mask)
 {
 	struct call_data_struct data;
-	int cpus = num_online_cpus() - 1;
+	int cpus = cpus_weight(mask) - 1;
 	long timeout;
 
 	if (!cpus)
@@ -603,7 +603,7 @@ int smp_call_function(void (*func)(void *info), void *info,
 
 	call_data = &data;
 
-	smp_cross_call(&xcall_call_function, 0, 0, 0);
+	smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
 
 	/* 
 	 * Wait for other cpus to complete function or at
@@ -629,6 +629,13 @@ out_timeout:
 	return 0;
 }
 
+int smp_call_function(void (*func)(void *info), void *info,
+		      int nonatomic, int wait)
+{
+	return smp_call_function_mask(func, info, nonatomic, wait,
+				      cpu_online_map);
+}
+
 void smp_call_function_client(int irq, struct pt_regs *regs)
 {
 	void (*func) (void *info) = call_data->func;
@@ -646,6 +653,19 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
 	}
 }
 
+static void tsb_sync(void *info)
+{
+	struct mm_struct *mm = info;
+
+	if (current->active_mm == mm)
+		tsb_context_switch(mm);
+}
+
+void smp_tsb_sync(struct mm_struct *mm)
+{
+	smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
+}
+
 extern unsigned long xcall_flush_tlb_mm;
 extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 7c456afaa9a..a8119cb4fa3 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -246,9 +246,11 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
 
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
+	struct mm_struct *mm;
 	struct page *page;
 	unsigned long pfn;
 	unsigned long pg_flags;
+	unsigned long mm_rss;
 
 	pfn = pte_pfn(pte);
 	if (pfn_valid(pfn) &&
@@ -270,6 +272,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 
 		put_cpu();
 	}
+
+	mm = vma->vm_mm;
+	mm_rss = get_mm_rss(mm);
+	if (mm_rss >= mm->context.tsb_rss_limit)
+		tsb_grow(mm, mm_rss, GFP_ATOMIC);
 }
 
 void flush_dcache_page(struct page *page)
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index dfe7144fcdf..707af4b84a0 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -10,6 +10,7 @@
 #include <asm/tlb.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/tsb.h>
 
 /* We use an 8K TSB for the whole kernel, this allows to
  * handle about 4MB of modules and vmalloc mappings without
@@ -146,6 +147,9 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 		tte |= _PAGE_SZ4MB;
 		page_sz = 4 * 1024 * 1024;
 		break;
+
+	default:
+		BUG();
 	};
 
 	tsb_reg |= base;
@@ -157,23 +161,158 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 	mm->context.tsb_map_pte = tte;
 }
 
+/* The page tables are locked against modifications while this
+ * runs.
+ *
+ * XXX do some prefetching...
+ */
+static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
+		     struct tsb *new_tsb, unsigned long new_size)
+{
+	unsigned long old_nentries = old_size / sizeof(struct tsb);
+	unsigned long new_nentries = new_size / sizeof(struct tsb);
+	unsigned long i;
+
+	for (i = 0; i < old_nentries; i++) {
+		register unsigned long tag asm("o4");
+		register unsigned long pte asm("o5");
+		unsigned long v;
+		unsigned int hash;
+
+		__asm__ __volatile__(
+			"ldda [%2] %3, %0"
+			: "=r" (tag), "=r" (pte)
+			: "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD));
+
+		if (!tag || (tag & TSB_TAG_LOCK))
+			continue;
+
+		/* We only put base page size PTEs into the TSB,
+		 * but that might change in the future.  This code
+		 * would need to be changed if we start putting larger
+		 * page size PTEs into there.
+		 */
+		WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
+
+		/* The tag holds bits 22 to 63 of the virtual address
+		 * and the context.  Clear out the context, and shift
+		 * up to make a virtual address.
+		 */
+		v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
+
+		/* The implied bits of the tag (bits 13 to 21) are
+		 * determined by the TSB entry index, so fill that in.
+		 */
+		v |= (i & (512UL - 1UL)) << 13UL;
+
+		hash = tsb_hash(v, new_nentries);
+		new_tsb[hash].tag = tag;
+		new_tsb[hash].pte = pte;
+	}
+}
+
+/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
+ * update_mmu_cache() invokes this routine to try and grow the TSB.
+ * When we reach the maximum TSB size supported, we stick ~0UL into
+ * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
+ * will not trigger any longer.
+ *
+ * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
+ * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
+ * must be 512K aligned.
+ *
+ * The idea here is to grow the TSB when the RSS of the process approaches
+ * the number of entries that the current TSB can hold at once.  Currently,
+ * we trigger when the RSS hits 3/4 of the TSB capacity.
+ */
+void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
+{
+	unsigned long max_tsb_size = 1 * 1024 * 1024;
+	unsigned long size, old_size;
+	struct page *page;
+	struct tsb *old_tsb;
+
+	if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
+		max_tsb_size = (PAGE_SIZE << MAX_ORDER);
+
+	for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
+		unsigned long n_entries = size / sizeof(struct tsb);
+
+		n_entries = (n_entries * 3) / 4;
+		if (n_entries > rss)
+			break;
+	}
+
+	page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
+	if (unlikely(!page))
+		return;
+
+	if (size == max_tsb_size)
+		mm->context.tsb_rss_limit = ~0UL;
+	else
+		mm->context.tsb_rss_limit =
+			((size / sizeof(struct tsb)) * 3) / 4;
+
+	old_tsb = mm->context.tsb;
+	old_size = mm->context.tsb_nentries * sizeof(struct tsb);
+
+	if (old_tsb)
+		copy_tsb(old_tsb, old_size, page_address(page), size);
+
+	mm->context.tsb = page_address(page);
+	setup_tsb_params(mm, size);
+
+	/* If old_tsb is NULL, we're being invoked for the first time
+	 * from init_new_context().
+	 */
+	if (old_tsb) {
+		/* Now force all other processors to reload the new
+		 * TSB state.
+		 */
+		smp_tsb_sync(mm);
+
+		/* Finally reload it on the local cpu.  No further
+		 * references will remain to the old TSB and we can
+		 * thus free it up.
+		 */
+		tsb_context_switch(mm);
+
+		free_pages((unsigned long) old_tsb, get_order(old_size));
+	}
+}
+
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
-	unsigned long page = get_zeroed_page(GFP_KERNEL);
+	unsigned long initial_rss;
 
 	mm->context.sparc64_ctx_val = 0UL;
-	if (unlikely(!page))
-		return -ENOMEM;
 
-	mm->context.tsb = (struct tsb *) page;
-	setup_tsb_params(mm, PAGE_SIZE);
+	/* copy_mm() copies over the parent's mm_struct before calling
+	 * us, so we need to zero out the TSB pointer or else tsb_grow()
+	 * will be confused and think there is an older TSB to free up.
+	 */
+	mm->context.tsb = NULL;
+
+	/* If this is fork, inherit the parent's TSB size.  We would
+	 * grow it to that size on the first page fault anyways.
+	 */
+	initial_rss = mm->context.tsb_nentries;
+	if (initial_rss)
+		initial_rss -= 1;
+
+	tsb_grow(mm, initial_rss, GFP_KERNEL);
+
+	if (unlikely(!mm->context.tsb))
+		return -ENOMEM;
 
 	return 0;
 }
 
 void destroy_context(struct mm_struct *mm)
 {
-	free_page((unsigned long) mm->context.tsb);
+	unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
+
+	free_pages((unsigned long) mm->context.tsb, get_order(size));
 
 	/* We can remove these later, but for now it's useful
 	 * to catch any bogus post-destroy_context() references
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 2effeba2476..76008ff6a90 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -100,6 +100,7 @@ struct tsb {
 typedef struct {
 	unsigned long	sparc64_ctx_val;
 	struct tsb	*tsb;
+	unsigned long	tsb_rss_limit;
 	unsigned long	tsb_nentries;
 	unsigned long	tsb_reg_val;
 	unsigned long	tsb_map_vaddr;
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 0a950f151d2..1d232678821 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -32,6 +32,13 @@ static inline void tsb_context_switch(struct mm_struct *mm)
 			     mm->context.tsb_map_pte);
 }
 
+extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
+#ifdef CONFIG_SMP
+extern void smp_tsb_sync(struct mm_struct *mm);
+#else
+#define smp_tsb_sync(__mm) do { } while (0)
+#endif
+
 /* Set MMU context in the actual hardware. */
 #define load_secondary_context(__mm) \
 	__asm__ __volatile__("stxa	%0, [%1] %2\n\t" \
-- 
cgit v1.2.3-70-g09d2


From b70c0fa1613c4f69b4a340a0e2bee387560ebbb1 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:32:04 -0800
Subject: [SPARC64]: Preload TSB entries from update_mmu_cache().

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/tsb.S | 17 +++++++++++++++++
 arch/sparc64/mm/init.c    | 10 ++++++++++
 include/asm-sparc64/mmu.h |  2 ++
 3 files changed, 29 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index fe266bad0a2..08405ed6928 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -126,6 +126,23 @@ winfix_trampoline:
 	wrpr	%g3, %tnpc			! Write it into TNPC
 	done					! Trap return
 
+	/* Insert an entry into the TSB.
+	 *
+	 * %o0: TSB entry pointer
+	 * %o1: tag
+	 * %o2:	pte
+	 */
+	.align	32
+	.globl	tsb_insert
+tsb_insert:
+	rdpr	%pstate, %o5
+	wrpr	%o5, PSTATE_IE, %pstate
+	TSB_LOCK_TAG(%o0, %g2, %g3)
+	TSB_WRITE(%o0, %o2, %o1)
+	wrpr	%o5, %pstate
+	retl
+	 nop
+
 	/* Reload MMU related context switch state at
 	 * schedule() time.
 	 *
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index a8119cb4fa3..1e8a5a33639 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -277,6 +277,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 	mm_rss = get_mm_rss(mm);
 	if (mm_rss >= mm->context.tsb_rss_limit)
 		tsb_grow(mm, mm_rss, GFP_ATOMIC);
+
+	if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
+		struct tsb *tsb;
+		unsigned long tag;
+
+		tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
+				       (mm->context.tsb_nentries - 1UL)];
+		tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL;
+		tsb_insert(tsb, tag, pte_val(pte));
+	}
 }
 
 void flush_dcache_page(struct page *page)
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 76008ff6a90..18f98edfbcd 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -97,6 +97,8 @@ struct tsb {
 	unsigned long pte;
 } __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
 
+extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte);
+
 typedef struct {
 	unsigned long	sparc64_ctx_val;
 	struct tsb	*tsb;
-- 
cgit v1.2.3-70-g09d2


From 4753eb2ac7022b999e5e484f1a5dc001dba22bd3 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:32:44 -0800
Subject: [SPARC64]: Fix incorrect TSB lock bit handling.

The TSB_LOCK_BIT define is actually a special
value shifted down by 32-bits for the assembler
code macros.

In C code, this isn't what we want.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/mm/tsb.c     | 2 +-
 include/asm-sparc64/tsb.h | 5 +++--
 2 files changed, 4 insertions(+), 3 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 707af4b84a0..e605478217c 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -184,7 +184,7 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
 			: "=r" (tag), "=r" (pte)
 			: "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD));
 
-		if (!tag || (tag & TSB_TAG_LOCK))
+		if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
 			continue;
 
 		/* We only put base page size PTEs into the TSB,
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 1f93b7d8cdb..09ab3aaa8d2 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -47,13 +47,14 @@
  * possible solution is to use RCU for the freeing of the TSB.
  */
 
-#define TSB_TAG_LOCK	(1 << (47 - 32))
+#define TSB_TAG_LOCK_BIT	47
+#define TSB_TAG_LOCK_HIGH	(1 << (TSB_TAG_LOCK_BIT - 32))
 
 #define TSB_MEMBAR	membar	#StoreStore
 
 #define TSB_LOCK_TAG(TSB, REG1, REG2)	\
 99:	lduwa	[TSB] ASI_N, REG1;	\
-	sethi	%hi(TSB_TAG_LOCK), REG2;\
+	sethi	%hi(TSB_TAG_LOCK_HIGH), REG2;\
 	andcc	REG1, REG2, %g0;	\
 	bne,pn	%icc, 99b;		\
 	 nop;				\
-- 
cgit v1.2.3-70-g09d2


From a8b900d801697609d1b56cc9c110148c64678068 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:33:37 -0800
Subject: [SPARC64]: Kill sole argument passed to setup_tba().

No longer used, and move extern declaration to a header file.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/head.S    |  2 +-
 arch/sparc64/mm/init.c        | 11 ++---------
 include/asm-sparc64/cpudata.h |  1 +
 3 files changed, 4 insertions(+), 10 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 82ce5bced9c..2988be85147 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -454,7 +454,7 @@ setup_trap_table:
 	 restore
 
 	.globl	setup_tba
-setup_tba:	/* i0 = is_starfire */
+setup_tba:
 	save	%sp, -192, %sp
 
 	/* The boot processor is the only cpu which invokes this
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index f4d22ccb4cf..20e7af552ce 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1092,15 +1092,8 @@ void __init paging_init(void)
 	
 	inherit_prom_mappings();
 	
-	/* Ok, we can use our TLB miss and window trap handlers safely.
-	 * We need to do a quick peek here to see if we are on StarFire
-	 * or not, so setup_tba can setup the IRQ globals correctly (it
-	 * needs to get the hard smp processor id correctly).
-	 */
-	{
-		extern void setup_tba(int);
-		setup_tba(this_is_starfire);
-	}
+	/* Ok, we can use our TLB miss and window trap handlers safely.  */
+	setup_tba();
 
 	__flush_tlb_all();
 
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 6c57cbb9a7d..16d62891383 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -61,6 +61,7 @@ struct trap_per_cpu {
 extern struct trap_per_cpu trap_block[NR_CPUS];
 extern void init_cur_cpu_trap(void);
 extern void per_cpu_patch(void);
+extern void setup_tba(void);
 
 #endif /* !(__ASSEMBLY__) */
 
-- 
cgit v1.2.3-70-g09d2


From 2f7ee7c63f08b7f883b710a29d91c1891b81b8e1 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:33:49 -0800
Subject: [SPARC64]: Increase swapper_tsb size to 32K.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/head.S        | 12 ++++++------
 arch/sparc64/kernel/vmlinux.lds.S |  3 ---
 arch/sparc64/mm/tsb.c             |  8 --------
 include/asm-sparc64/tsb.h         | 13 ++++++++++---
 4 files changed, 16 insertions(+), 20 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 2988be85147..7840271d7aa 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -484,16 +484,16 @@ sparc64_boot_end:
 /*
  * The following skip makes sure the trap table in ttable.S is aligned
  * on a 32K boundary as required by the v9 specs for TBA register.
+ *
+ * We align to a 32K boundary, then we have the 32K kernel TSB,
+ * then the 32K aligned trap table.
  */
 1:
 	.skip	0x4000 + _start - 1b
 
-#ifdef CONFIG_SBUS
-/* This is just a hack to fool make depend config.h discovering
-   strategy: As the .S files below need config.h, but
-   make depend does not find it for them, we include config.h
-   in head.S */
-#endif
+	.globl	swapper_tsb
+swapper_tsb:
+	.skip	(32 * 1024)
 
 ! 0x0000000000408000
 
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index f018aaf4548..467d13a0d5c 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -43,9 +43,6 @@ SECTIONS
   __ex_table : { *(__ex_table) }
   __stop___ex_table = .;
 
-  . = ALIGN(8192);
-  swapper_tsb = .;
-  . += 8192;
   . = ALIGN(8192);
   __init_begin = .;
   .init.text : { 
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index e605478217c..1c4e5c2dfc5 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -12,14 +12,6 @@
 #include <asm/pgtable.h>
 #include <asm/tsb.h>
 
-/* We use an 8K TSB for the whole kernel, this allows to
- * handle about 4MB of modules and vmalloc mappings without
- * incurring many hash conflicts.
- */
-#define KERNEL_TSB_SIZE_BYTES	8192
-#define KERNEL_TSB_NENTRIES \
-	(KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb))
-
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
 
 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 09ab3aaa8d2..1addd91d720 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -143,6 +143,14 @@
 	 add		REG1, (3 * 8), REG1; \
 99:
 
+	/* We use a 32K TSB for the whole kernel, this allows to
+	 * handle about 16MB of modules and vmalloc mappings without
+	 * incurring many hash conflicts.
+	 */
+#define KERNEL_TSB_SIZE_BYTES	(32 * 1024)
+#define KERNEL_TSB_NENTRIES	\
+	(KERNEL_TSB_SIZE_BYTES / 16)
+
 	/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
 	 * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
 	 * and the found TTE will be left in REG1.  REG3 and REG4 must
@@ -150,12 +158,11 @@
 	 *
 	 * VADDR and TAG will be preserved and not clobbered by this macro.
 	 */
-	/* XXX non-8K base page size support... */
 #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
 	sethi		%hi(swapper_tsb), REG1; \
 	or		REG1, %lo(swapper_tsb), REG1; \
-	srlx		VADDR, 13, REG2; \
-	and		REG2, (512 - 1), REG2; \
+	srlx		VADDR, PAGE_SHIFT, REG2; \
+	and		REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
 	sllx		REG2, 4, REG2; \
 	add		REG1, REG2, REG2; \
 	ldda		[REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \
-- 
cgit v1.2.3-70-g09d2


From 86b818687d4894063ecd1190e54717a0cce8c009 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 31 Jan 2006 18:34:51 -0800
Subject: [SPARC64]: Fix race in LOAD_PER_CPU_BASE()

Since we use %g5 itself as a temporary, it can get clobbered
if we take an interrupt mid-stream and thus cause end up with
the final %g5 value too early as a result of rtrap processing.

Set %g5 at the very end, atomically, to avoid this problem.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/etrap.S    |  4 ++--
 arch/sparc64/kernel/rtrap.S    |  2 +-
 arch/sparc64/kernel/winfixup.S |  6 +++---
 include/asm-sparc64/cpudata.h  | 19 ++++++++++++-------
 4 files changed, 18 insertions(+), 13 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index db768101729..d974d18b15b 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -100,7 +100,7 @@ etrap_irq:
 		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
 		wrpr	%g0, ETRAP_PSTATE2, %pstate
 		mov	%l6, %g6
-		LOAD_PER_CPU_BASE(%g4, %g3)
+		LOAD_PER_CPU_BASE(%g4, %g3, %l1)
 		jmpl	%l2 + 0x4, %g0
 		 ldx	[%g6 + TI_TASK], %g4
 
@@ -250,7 +250,7 @@ scetrap:
 		stx	%i6, [%sp + PTREGS_OFF + PT_V9_I6]
 		mov	%l6, %g6
 		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
-		LOAD_PER_CPU_BASE(%g4, %g3)
+		LOAD_PER_CPU_BASE(%g4, %g3, %l1)
 		ldx	[%g6 + TI_TASK], %g4
 		done
 
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 89794ebdcbc..64bc03610bc 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -226,7 +226,7 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 		brz,pt			%l3, 1f
 		 nop
 		/* Must do this before thread reg is clobbered below.  */
-		LOAD_PER_CPU_BASE(%g6, %g7)
+		LOAD_PER_CPU_BASE(%i0, %i1, %i2)
 1:
 		ldx			[%sp + PTREGS_OFF + PT_V9_G6], %g6
 		ldx			[%sp + PTREGS_OFF + PT_V9_G7], %g7
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index c0545d089c9..ade991b7d07 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -86,7 +86,7 @@ fill_fixup:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6
 	ldx		[%g6 + TI_TASK], %g4
-	LOAD_PER_CPU_BASE(%g1, %g2)
+	LOAD_PER_CPU_BASE(%g1, %g2, %g3)
 
 	/* This is the same as below, except we handle this a bit special
 	 * since we must preserve %l5 and %l6, see comment above.
@@ -209,7 +209,7 @@ fill_fixup_mna:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6			! Get current back.
 	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-	LOAD_PER_CPU_BASE(%g1, %g2)
+	LOAD_PER_CPU_BASE(%g1, %g2, %g3)
 	call		mem_address_unaligned
 	 add		%sp, PTREGS_OFF, %o0
 
@@ -312,7 +312,7 @@ fill_fixup_dax:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6			! Get current back.
 	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-	LOAD_PER_CPU_BASE(%g1, %g2)
+	LOAD_PER_CPU_BASE(%g1, %g2, %g3)
 	call		spitfire_data_access_exception
 	 add		%sp, PTREGS_OFF, %o0
 
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 16d62891383..f83768883e9 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -101,20 +101,25 @@ extern void setup_tba(void);
 	ldx	[%g1 + %g6], %g6;
 
 /* Given the current thread info pointer in %g6, load the per-cpu
- * area base of the current processor into %g5.  REG1 and REG2 are
+ * area base of the current processor into %g5.  REG1, REG2, and REG3 are
  * clobbered.
+ *
+ * You absolutely cannot use %g5 as a temporary in this code.  The
+ * reason is that traps can happen during execution, and return from
+ * trap will load the fully resolved %g5 per-cpu base.  This can corrupt
+ * the calculations done by the macro mid-stream.
  */
 #ifdef CONFIG_SMP
-#define LOAD_PER_CPU_BASE(REG1, REG2)			\
+#define LOAD_PER_CPU_BASE(REG1, REG2, REG3)		\
 	ldub	[%g6 + TI_CPU], REG1;			\
-	sethi	%hi(__per_cpu_shift), %g5;		\
+	sethi	%hi(__per_cpu_shift), REG3;		\
 	sethi	%hi(__per_cpu_base), REG2;		\
-	ldx	[%g5 + %lo(__per_cpu_shift)], %g5;	\
+	ldx	[REG3 + %lo(__per_cpu_shift)], REG3;	\
 	ldx	[REG2 + %lo(__per_cpu_base)], REG2;	\
-	sllx	REG1, %g5, %g5;				\
-	add	%g5, REG2, %g5;
+	sllx	REG1, REG3, REG3;			\
+	add	REG3, REG2, %g5;
 #else
-#define LOAD_PER_CPU_BASE(REG1, REG2)
+#define LOAD_PER_CPU_BASE(REG1, REG2, REG3)
 #endif
 
 #endif /* _SPARC64_CPUDATA_H */
-- 
cgit v1.2.3-70-g09d2


From b0fd4e49aea8a460afab7bc67cd618e2d19291d4 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 31 Jan 2006 23:13:29 -0800
Subject: [SPARC64]: Kill out-of-date commentary in asm-sparc64/tsb.h

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/tsb.h | 8 --------
 1 file changed, 8 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 1addd91d720..f384565212f 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -37,14 +37,6 @@
  * choose to use bit 47 in the tag.  Also, since we never map anything
  * at page zero in context zero, we use zero as an invalid tag entry.
  * When the lock bit is set, this forces a tag comparison failure.
- *
- * Currently, we allocate an 8K TSB per-process and we use it for both
- * I-TLB and D-TLB misses.  Perhaps at some point we'll add code that
- * monitors the number of active pages in the process as we get
- * major/minor faults, and grow the TSB in response.  The only trick
- * in implementing that is synchronizing the freeing of the old TSB
- * wrt.  parallel TSB updates occuring on other processors.  On
- * possible solution is to use RCU for the freeing of the TSB.
  */
 
 #define TSB_TAG_LOCK_BIT	47
-- 
cgit v1.2.3-70-g09d2


From 517af33237ecfc3c8a93b335365fa61e741ceca4 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 1 Feb 2006 15:55:21 -0800
Subject: [SPARC64]: Access TSB with physical addresses when possible.

This way we don't need to lock the TSB into the TLB.
The trick is that every TSB load/store is registered into
a special instruction patch section.  The default uses
virtual addresses, and the patch instructions use physical
address load/stores.

We can't do this on all chips because only cheetah+ and later
have the physical variant of the atomic quad load.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/dtlb_miss.S   |  2 +-
 arch/sparc64/kernel/itlb_miss.S   |  2 +-
 arch/sparc64/kernel/ktlb.S        | 20 ++++-----
 arch/sparc64/kernel/tsb.S         | 35 ++++++++++++---
 arch/sparc64/kernel/vmlinux.lds.S |  4 ++
 arch/sparc64/mm/init.c            | 32 +++++++++++++
 arch/sparc64/mm/tsb.c             | 95 ++++++++++++++++++++++++++-------------
 include/asm-sparc64/mmu.h         |  3 +-
 include/asm-sparc64/tsb.h         | 94 +++++++++++++++++++++++++++++++++++---
 9 files changed, 234 insertions(+), 53 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
index d0f1565cb56..2ef6f6e6e72 100644
--- a/arch/sparc64/kernel/dtlb_miss.S
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -4,7 +4,7 @@
 	srlx	%g6, 48, %g5			! Get context
 	brz,pn	%g5, kvmap_dtlb			! Context 0 processing
 	 nop					! Delay slot (fill me)
-	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB entry
+	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
 	nop					! Push branch to next I$ line
 	cmp	%g4, %g6			! Compare TAG
 
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 6b6c8fee04b..97facce27aa 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -4,7 +4,7 @@
 	srlx	%g6, 48, %g5			! Get context
 	brz,pn	%g5, kvmap_itlb			! Context 0 processing
 	 nop					! Delay slot (fill me)
-	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB entry
+	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
 	cmp	%g4, %g6			! Compare TAG
 	sethi	%hi(_PAGE_EXEC), %g4		! Setup exec check
 
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 2b5e71b6888..9b415ab6db6 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -44,14 +44,14 @@ kvmap_itlb_tsb_miss:
 kvmap_itlb_vmalloc_addr:
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 
-	TSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g4)
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	brgez,a,pn	%g5, kvmap_itlb_longpath
-	 stx		%g0, [%g1]
+	 KTSB_STORE(%g1, %g0)
 
-	TSB_WRITE(%g1, %g5, %g6)
+	KTSB_WRITE(%g1, %g5, %g6)
 
 	/* fallthrough to TLB load */
 
@@ -69,9 +69,9 @@ kvmap_itlb_longpath:
 kvmap_itlb_obp:
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
 
-	TSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g4)
 
-	TSB_WRITE(%g1, %g5, %g6)
+	KTSB_WRITE(%g1, %g5, %g6)
 
 	ba,pt		%xcc, kvmap_itlb_load
 	 nop
@@ -79,9 +79,9 @@ kvmap_itlb_obp:
 kvmap_dtlb_obp:
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
 
-	TSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g4)
 
-	TSB_WRITE(%g1, %g5, %g6)
+	KTSB_WRITE(%g1, %g5, %g6)
 
 	ba,pt		%xcc, kvmap_dtlb_load
 	 nop
@@ -114,14 +114,14 @@ kvmap_linear_patch:
 kvmap_dtlb_vmalloc_addr:
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
 
-	TSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g4)
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	brgez,a,pn	%g5, kvmap_dtlb_longpath
-	 stx		%g0, [%g1]
+	 KTSB_STORE(%g1, %g0)
 
-	TSB_WRITE(%g1, %g5, %g6)
+	KTSB_WRITE(%g1, %g5, %g6)
 
 	/* fallthrough to TLB load */
 
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index e1dd37f5e53..ff6a79beb98 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -53,7 +53,7 @@ tsb_reload:
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	brgez,a,pn	%g5, tsb_do_fault
-	 stx		%g0, [%g1]
+	 TSB_STORE(%g1, %g0)
 
 	/* If it is larger than the base page size, don't
 	 * bother putting it into the TSB.
@@ -64,7 +64,7 @@ tsb_reload:
 	and		%g2, %g4, %g2
 	cmp		%g2, %g7
 	bne,a,pn	%xcc, tsb_tlb_reload
-	 stx		%g0, [%g1]
+	 TSB_STORE(%g1, %g0)
 
 	TSB_WRITE(%g1, %g5, %g6)
 
@@ -131,13 +131,13 @@ winfix_trampoline:
 
 	/* Insert an entry into the TSB.
 	 *
-	 * %o0: TSB entry pointer
+	 * %o0: TSB entry pointer (virt or phys address)
 	 * %o1: tag
 	 * %o2:	pte
 	 */
 	.align	32
-	.globl	tsb_insert
-tsb_insert:
+	.globl	__tsb_insert
+__tsb_insert:
 	rdpr	%pstate, %o5
 	wrpr	%o5, PSTATE_IE, %pstate
 	TSB_LOCK_TAG(%o0, %g2, %g3)
@@ -146,6 +146,31 @@ tsb_insert:
 	retl
 	 nop
 
+	/* Flush the given TSB entry if it has the matching
+	 * tag.
+	 *
+	 * %o0: TSB entry pointer (virt or phys address)
+	 * %o1:	tag
+	 */
+	.align	32
+	.globl	tsb_flush
+tsb_flush:
+	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
+1:	TSB_LOAD_TAG(%o0, %g1)
+	srlx	%g1, 32, %o3
+	andcc	%o3, %g2, %g0
+	bne,pn	%icc, 1b
+	 membar	#LoadLoad
+	cmp	%g1, %o1
+	bne,pt	%xcc, 2f
+	 clr	%o3
+	TSB_CAS_TAG(%o0, %g1, %o3)
+	cmp	%g1, %o3
+	bne,pn	%xcc, 1b
+	 nop
+2:	retl
+	 TSB_MEMBAR
+
 	/* Reload MMU related context switch state at
 	 * schedule() time.
 	 *
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 467d13a0d5c..71b943f1c9b 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -70,6 +70,10 @@ SECTIONS
   .con_initcall.init : { *(.con_initcall.init) }
   __con_initcall_end = .;
   SECURITY_INIT
+  . = ALIGN(4);
+  __tsb_phys_patch = .;
+  .tsb_phys_patch : { *(.tsb_phys_patch) }
+  __tsb_phys_patch_end = .;
   . = ALIGN(8192); 
   __initramfs_start = .;
   .init.ramfs : { *(.init.ramfs) }
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 2c21d85de78..4893f3e2c33 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -39,6 +39,7 @@
 #include <asm/tlb.h>
 #include <asm/spitfire.h>
 #include <asm/sections.h>
+#include <asm/tsb.h>
 
 extern void device_scan(void);
 
@@ -244,6 +245,16 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
 			     : "g1", "g7");
 }
 
+static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
+{
+	unsigned long tsb_addr = (unsigned long) ent;
+
+	if (tlb_type == cheetah_plus)
+		tsb_addr = __pa(tsb_addr);
+
+	__tsb_insert(tsb_addr, tag, pte);
+}
+
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
 	struct mm_struct *mm;
@@ -1040,6 +1051,24 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
 	return ~0UL;
 }
 
+static void __init tsb_phys_patch(void)
+{
+	struct tsb_phys_patch_entry *p;
+
+	p = &__tsb_phys_patch;
+	while (p < &__tsb_phys_patch_end) {
+		unsigned long addr = p->addr;
+
+		*(unsigned int *) addr = p->insn;
+		wmb();
+		__asm__ __volatile__("flush	%0"
+				     : /* no outputs */
+				     : "r" (addr));
+
+		p++;
+	}
+}
+
 /* paging_init() sets up the page tables */
 
 extern void cheetah_ecache_flush_init(void);
@@ -1052,6 +1081,9 @@ void __init paging_init(void)
 	unsigned long end_pfn, pages_avail, shift;
 	unsigned long real_end, i;
 
+	if (tlb_type == cheetah_plus)
+		tsb_phys_patch();
+
 	/* Find available physical memory... */
 	read_obp_memory("available", &pavail[0], &pavail_ents);
 
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 1c4e5c2dfc5..787533f0104 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -20,12 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries
 	return vaddr & (nentries - 1);
 }
 
-static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
+static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
 {
-	if (context == ~0UL)
-		return 1;
-
-	return (entry->tag == ((vaddr >> 22) | (context << 48)));
+	return (tag == ((vaddr >> 22) | (context << 48)));
 }
 
 /* TSB flushes need only occur on the processor initiating the address
@@ -41,7 +38,7 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
 		unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
 		struct tsb *ent = &swapper_tsb[hash];
 
-		if (tag_compare(ent, v, 0)) {
+		if (tag_compare(ent->tag, v, 0)) {
 			ent->tag = 0UL;
 			membar_storeload_storestore();
 		}
@@ -52,24 +49,31 @@ void flush_tsb_user(struct mmu_gather *mp)
 {
 	struct mm_struct *mm = mp->mm;
 	struct tsb *tsb = mm->context.tsb;
-	unsigned long ctx = ~0UL;
 	unsigned long nentries = mm->context.tsb_nentries;
+	unsigned long ctx, base;
 	int i;
 
-	if (CTX_VALID(mm->context))
-		ctx = CTX_HWBITS(mm->context);
+	if (unlikely(!CTX_VALID(mm->context)))
+		return;
+
+	ctx = CTX_HWBITS(mm->context);
 
+	if (tlb_type == cheetah_plus)
+		base = __pa(tsb);
+	else
+		base = (unsigned long) tsb;
+	
 	for (i = 0; i < mp->tlb_nr; i++) {
 		unsigned long v = mp->vaddrs[i];
-		struct tsb *ent;
+		unsigned long tag, ent, hash;
 
 		v &= ~0x1UL;
 
-		ent = &tsb[tsb_hash(v, nentries)];
-		if (tag_compare(ent, v, ctx)) {
-			ent->tag = 0UL;
-			membar_storeload_storestore();
-		}
+		hash = tsb_hash(v, nentries);
+		ent = base + (hash * sizeof(struct tsb));
+		tag = (v >> 22UL) | (ctx << 48UL);
+
+		tsb_flush(ent, tag);
 	}
 }
 
@@ -84,6 +88,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 	tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
 	       _PAGE_CV    | _PAGE_P | _PAGE_W);
 	tsb_paddr = __pa(mm->context.tsb);
+	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
 
 	/* Use the smallest page size that can map the whole TSB
 	 * in one TLB entry.
@@ -144,13 +149,23 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 		BUG();
 	};
 
-	tsb_reg |= base;
-	tsb_reg |= (tsb_paddr & (page_sz - 1UL));
-	tte |= (tsb_paddr & ~(page_sz - 1UL));
+	if (tlb_type == cheetah_plus) {
+		/* Physical mapping, no locked TLB entry for TSB.  */
+		tsb_reg |= tsb_paddr;
+
+		mm->context.tsb_reg_val = tsb_reg;
+		mm->context.tsb_map_vaddr = 0;
+		mm->context.tsb_map_pte = 0;
+	} else {
+		tsb_reg |= base;
+		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
+		tte |= (tsb_paddr & ~(page_sz - 1UL));
+
+		mm->context.tsb_reg_val = tsb_reg;
+		mm->context.tsb_map_vaddr = base;
+		mm->context.tsb_map_pte = tte;
+	}
 
-	mm->context.tsb_reg_val = tsb_reg;
-	mm->context.tsb_map_vaddr = base;
-	mm->context.tsb_map_pte = tte;
 }
 
 /* The page tables are locked against modifications while this
@@ -168,13 +183,21 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
 	for (i = 0; i < old_nentries; i++) {
 		register unsigned long tag asm("o4");
 		register unsigned long pte asm("o5");
-		unsigned long v;
-		unsigned int hash;
-
-		__asm__ __volatile__(
-			"ldda [%2] %3, %0"
-			: "=r" (tag), "=r" (pte)
-			: "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD));
+		unsigned long v, hash;
+
+		if (tlb_type == cheetah_plus) {
+			__asm__ __volatile__(
+				"ldda [%2] %3, %0"
+				: "=r" (tag), "=r" (pte)
+				: "r" (__pa(&old_tsb[i])),
+				  "i" (ASI_QUAD_LDD_PHYS));
+		} else {
+			__asm__ __volatile__(
+				"ldda [%2] %3, %0"
+				: "=r" (tag), "=r" (pte)
+				: "r" (&old_tsb[i]),
+				  "i" (ASI_NUCLEUS_QUAD_LDD));
+		}
 
 		if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
 			continue;
@@ -198,8 +221,20 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
 		v |= (i & (512UL - 1UL)) << 13UL;
 
 		hash = tsb_hash(v, new_nentries);
-		new_tsb[hash].tag = tag;
-		new_tsb[hash].pte = pte;
+		if (tlb_type == cheetah_plus) {
+			__asm__ __volatile__(
+				"stxa	%0, [%1] %2\n\t"
+				"stxa	%3, [%4] %2"
+				: /* no outputs */
+				: "r" (tag),
+				  "r" (__pa(&new_tsb[hash].tag)),
+				  "i" (ASI_PHYS_USE_EC),
+				  "r" (pte),
+				  "r" (__pa(&new_tsb[hash].pte)));
+		} else {
+			new_tsb[hash].tag = tag;
+			new_tsb[hash].pte = pte;
+		}
 	}
 }
 
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 18f98edfbcd..55e622711b9 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -97,7 +97,8 @@ struct tsb {
 	unsigned long pte;
 } __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
 
-extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte);
+extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
+extern void tsb_flush(unsigned long ent, unsigned long tag);
 
 typedef struct {
 	unsigned long	sparc64_ctx_val;
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index f384565212f..44709cde561 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -44,7 +44,89 @@
 
 #define TSB_MEMBAR	membar	#StoreStore
 
+/* Some cpus support physical address quad loads.  We want to use
+ * those if possible so we don't need to hard-lock the TSB mapping
+ * into the TLB.  We encode some instruction patching in order to
+ * support this.
+ *
+ * The kernel TSB is locked into the TLB by virtue of being in the
+ * kernel image, so we don't play these games for swapper_tsb access.
+ */
+#ifndef __ASSEMBLY__
+struct tsb_phys_patch_entry {
+	unsigned int	addr;
+	unsigned int	insn;
+};
+extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+#endif
+#define TSB_LOAD_QUAD(TSB, REG)	\
+661:	ldda		[TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
+	.section	.tsb_phys_patch, "ax"; \
+	.word		661b; \
+	ldda		[TSB] ASI_QUAD_LDD_PHYS, REG; \
+	.previous
+
+#define TSB_LOAD_TAG_HIGH(TSB, REG) \
+661:	lduwa		[TSB] ASI_N, REG; \
+	.section	.tsb_phys_patch, "ax"; \
+	.word		661b; \
+	lduwa		[TSB] ASI_PHYS_USE_EC, REG; \
+	.previous
+
+#define TSB_LOAD_TAG(TSB, REG) \
+661:	ldxa		[TSB] ASI_N, REG; \
+	.section	.tsb_phys_patch, "ax"; \
+	.word		661b; \
+	ldxa		[TSB] ASI_PHYS_USE_EC, REG; \
+	.previous
+
+#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
+661:	casa		[TSB] ASI_N, REG1, REG2; \
+	.section	.tsb_phys_patch, "ax"; \
+	.word		661b; \
+	casa		[TSB] ASI_PHYS_USE_EC, REG1, REG2; \
+	.previous
+
+#define TSB_CAS_TAG(TSB, REG1, REG2) \
+661:	casxa		[TSB] ASI_N, REG1, REG2; \
+	.section	.tsb_phys_patch, "ax"; \
+	.word		661b; \
+	casxa		[TSB] ASI_PHYS_USE_EC, REG1, REG2; \
+	.previous
+
+#define TSB_STORE(ADDR, VAL) \
+661:	stxa		VAL, [ADDR] ASI_N; \
+	.section	.tsb_phys_patch, "ax"; \
+	.word		661b; \
+	stxa		VAL, [ADDR] ASI_PHYS_USE_EC; \
+	.previous
+
 #define TSB_LOCK_TAG(TSB, REG1, REG2)	\
+99:	TSB_LOAD_TAG_HIGH(TSB, REG1);	\
+	sethi	%hi(TSB_TAG_LOCK_HIGH), REG2;\
+	andcc	REG1, REG2, %g0;	\
+	bne,pn	%icc, 99b;		\
+	 nop;				\
+	TSB_CAS_TAG_HIGH(TSB, REG1, REG2);	\
+	cmp	REG1, REG2;		\
+	bne,pn	%icc, 99b;		\
+	 nop;				\
+	TSB_MEMBAR
+
+#define TSB_WRITE(TSB, TTE, TAG) \
+	add	TSB, 0x8, TSB;   \
+	TSB_STORE(TSB, TTE);     \
+	sub	TSB, 0x8, TSB;   \
+	TSB_MEMBAR;              \
+	TSB_STORE(TSB, TAG);
+
+#define KTSB_LOAD_QUAD(TSB, REG) \
+	ldda		[TSB] ASI_NUCLEUS_QUAD_LDD, REG;
+
+#define KTSB_STORE(ADDR, VAL) \
+	stxa		VAL, [ADDR] ASI_N;
+
+#define KTSB_LOCK_TAG(TSB, REG1, REG2)	\
 99:	lduwa	[TSB] ASI_N, REG1;	\
 	sethi	%hi(TSB_TAG_LOCK_HIGH), REG2;\
 	andcc	REG1, REG2, %g0;	\
@@ -56,10 +138,12 @@
 	 nop;				\
 	TSB_MEMBAR
 
-#define TSB_WRITE(TSB, TTE, TAG)	   \
-	stx		TTE, [TSB + 0x08]; \
-	TSB_MEMBAR;			   \
-	stx		TAG, [TSB + 0x00];
+#define KTSB_WRITE(TSB, TTE, TAG) \
+	add	TSB, 0x8, TSB;   \
+	stxa	TTE, [TSB] ASI_N;     \
+	sub	TSB, 0x8, TSB;   \
+	TSB_MEMBAR;              \
+	stxa	TAG, [TSB] ASI_N;
 
 	/* Do a kernel page table walk.  Leaves physical PTE pointer in
 	 * REG1.  Jumps to FAIL_LABEL on early page table walk termination.
@@ -157,7 +241,7 @@
 	and		REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
 	sllx		REG2, 4, REG2; \
 	add		REG1, REG2, REG2; \
-	ldda		[REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \
+	KTSB_LOAD_QUAD(REG2, REG3); \
 	cmp		REG3, TAG; \
 	be,a,pt		%xcc, OK_LABEL; \
 	 mov		REG4, REG1;
-- 
cgit v1.2.3-70-g09d2


From 7bec08e38a7d0f088994f6eec9b6374652ea71fb Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Thu, 2 Feb 2006 01:20:18 -0800
Subject: [SPARC64]: Correctable ECC errors cannot occur at trap level > 0.

The are distrupting, which by the sparc v9 definition means they
can only occur when interrupts are enabled in the %pstate register.
This never occurs in any of the trap handling code running at
trap levels > 0.

So just mark it as an unexpected trap.

This allows us to kill off the cee_stuff member of struct thread_info.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/traps.c       |  1 -
 arch/sparc64/kernel/ttable.S      | 18 +-----------------
 include/asm-sparc64/thread_info.h |  9 +++------
 3 files changed, 4 insertions(+), 24 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index f47f4874253..7e52e897266 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2169,7 +2169,6 @@ void __init trap_init(void)
 	    TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
 	    TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
 	    TI_PCR != offsetof(struct thread_info, pcr_reg) ||
-	    TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
 	    TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
 	    TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
 	    TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 2fb7a33993c..99531424c59 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -222,23 +222,7 @@ tl1_resv05c:	BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
 tl1_ivec:	TRAP_IVEC
 tl1_paw:	TRAPTL1(do_paw_tl1)
 tl1_vaw:	TRAPTL1(do_vaw_tl1)
-
-		/* The grotty trick to save %g1 into current->thread.cee_stuff
-		 * is because when we take this trap we could be interrupting
-		 * trap code already using the trap alternate global registers.
-		 *
-		 * We cross our fingers and pray that this store/load does
-		 * not cause yet another CEE trap.
-		 */
-tl1_cee:	membar	#Sync
-		stx	%g1, [%g6 + TI_CEE_STUFF]
-		ldxa	[%g0] ASI_AFSR, %g1
-		membar	#Sync
-		stxa	%g1, [%g0] ASI_AFSR
-		membar	#Sync
-		ldx	[%g6 + TI_CEE_STUFF], %g1
-		retry
-
+tl1_cee:	BTRAPTL1(0x63)
 tl1_iamiss:	BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
 tl1_damiss:
 #include	"dtlb_miss.S"
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index ac9d068aab4..2ebf7f27bf9 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -64,8 +64,6 @@ struct thread_info {
 	__u64			kernel_cntd0, kernel_cntd1;
 	__u64			pcr_reg;
 
-	__u64			cee_stuff;
-
 	struct restart_block	restart_block;
 
 	struct pt_regs		*kern_una_regs;
@@ -104,10 +102,9 @@ struct thread_info {
 #define TI_KERN_CNTD0	0x00000480
 #define TI_KERN_CNTD1	0x00000488
 #define TI_PCR		0x00000490
-#define TI_CEE_STUFF	0x00000498
-#define TI_RESTART_BLOCK 0x000004a0
-#define TI_KUNA_REGS	0x000004c8
-#define TI_KUNA_INSN	0x000004d0
+#define TI_RESTART_BLOCK 0x00000498
+#define TI_KUNA_REGS	0x000004c0
+#define TI_KUNA_INSN	0x000004c8
 #define TI_FPREGS	0x00000500
 
 /* We embed this in the uppermost byte of thread_info->flags */
-- 
cgit v1.2.3-70-g09d2


From 92704a1c63c3b481870d02636d0b5a70c7e21cd1 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sun, 26 Feb 2006 23:27:19 -0800
Subject: [SPARC64]: Refine code sequences to get the cpu id.

On uniprocessor, it's always zero for optimize that.

On SMP, the jmpl to the stub kills the return address stack in the cpu
branch prediction logic, so expand the code sequence inline and use a
code patching section to fix things up.  This also always better and
explicit register selection, which will be taken advantage of in a
future changeset.

The hard_smp_processor_id() function is big, so do not inline it.

Fix up tests for Jalapeno to also test for Serrano chips too.  These
tests want "jbus Ultra-IIIi" cases to match, so that is what we should
test for.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S       | 84 +++---------------------------------
 arch/sparc64/kernel/irq.c         |  4 +-
 arch/sparc64/kernel/setup.c       | 56 +++++++++++++++++++++++-
 arch/sparc64/kernel/smp.c         |  9 ++--
 arch/sparc64/kernel/traps.c       |  4 +-
 arch/sparc64/kernel/vmlinux.lds.S |  3 ++
 include/asm-sparc64/cpudata.h     | 89 ++++++++++++++++++++++++++++++---------
 include/asm-sparc64/head.h        |  1 +
 include/asm-sparc64/smp.h         | 28 +-----------
 9 files changed, 144 insertions(+), 134 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 563fa4ec33f..b3511ff5d04 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1628,84 +1628,10 @@ __flushw_user:
 2:	retl
 	 nop
 
-	/* Read cpu ID from hardware, return in %g6.
-	 * (callers_pc - 4) is in %g1.  Patched at boot time.
-	 *
-	 * Default is spitfire implementation.
-	 *
-	 * The instruction sequence needs to be 5 instructions
-	 * in order to fit the longest implementation, which is
-	 * currently starfire.
-	 */
-	.align		32
-	.globl		__get_cpu_id
-__get_cpu_id:
-	ldxa		[%g0] ASI_UPA_CONFIG, %g6
-	srlx		%g6, 17, %g6
-	jmpl		%g1 + 0x4, %g0
-	 and		%g6, 0x1f, %g6
-	nop
-
-__get_cpu_id_cheetah_safari:
-	ldxa		[%g0] ASI_SAFARI_CONFIG, %g6
-	srlx		%g6, 17, %g6
-	jmpl		%g1 + 0x4, %g0
-	 and		%g6, 0x3ff, %g6
-	nop
-
-__get_cpu_id_cheetah_jbus:
-	ldxa		[%g0] ASI_JBUS_CONFIG, %g6
-	srlx		%g6, 17, %g6
-	jmpl		%g1 + 0x4, %g0
-	 and		%g6, 0x1f, %g6
-	nop
-
-__get_cpu_id_starfire:
-	sethi		%hi(0x1fff40000d0 >> 9), %g6
-	sllx		%g6, 9, %g6
-	or		%g6, 0xd0, %g6
-	jmpl		%g1 + 0x4, %g0
-	 lduwa		[%g6] ASI_PHYS_BYPASS_EC_E, %g6
-
-	.globl		per_cpu_patch
-per_cpu_patch:
-	sethi		%hi(this_is_starfire), %o0
-	lduw		[%o0 + %lo(this_is_starfire)], %o1
-	sethi		%hi(__get_cpu_id_starfire), %o0
-	brnz,pn		%o1, 10f
-	 or		%o0, %lo(__get_cpu_id_starfire), %o0
-	sethi		%hi(tlb_type), %o0
-	lduw		[%o0 + %lo(tlb_type)], %o1
-	brz,pt		%o1, 11f
-	 nop
-	rdpr		%ver, %o0
-	srlx		%o0, 32, %o0
-	sethi		%hi(0x003e0016), %o1
-	or		%o1, %lo(0x003e0016), %o1
-	cmp		%o0, %o1
-	sethi		%hi(__get_cpu_id_cheetah_jbus), %o0
-	be,pn		%icc, 10f
-	 or		%o0, %lo(__get_cpu_id_cheetah_jbus), %o0
-	sethi		%hi(__get_cpu_id_cheetah_safari), %o0
-	or		%o0, %lo(__get_cpu_id_cheetah_safari), %o0
-10:
-	sethi		%hi(__get_cpu_id), %o1
-	or		%o1, %lo(__get_cpu_id), %o1
-	lduw		[%o0 + 0x00], %o2
-	stw		%o2, [%o1 + 0x00]
-	flush		%o1 + 0x00
-	lduw		[%o0 + 0x04], %o2
-	stw		%o2, [%o1 + 0x04]
-	flush		%o1 + 0x04
-	lduw		[%o0 + 0x08], %o2
-	stw		%o2, [%o1 + 0x08]
-	flush		%o1 + 0x08
-	lduw		[%o0 + 0x0c], %o2
-	stw		%o2, [%o1 + 0x0c]
-	flush		%o1 + 0x0c
-	lduw		[%o0 + 0x10], %o2
-	stw		%o2, [%o1 + 0x10]
-	flush		%o1 + 0x10
-11:
+#ifdef CONFIG_SMP
+	.globl		hard_smp_processor_id
+hard_smp_processor_id:
+	__GET_CPUID(%o0)
 	retl
 	 nop
+#endif
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3e48af2769d..d069a6feb53 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -39,6 +39,7 @@
 #include <asm/cache.h>
 #include <asm/cpudata.h>
 #include <asm/auxio.h>
+#include <asm/head.h>
 
 #ifdef CONFIG_SMP
 static void distribute_irqs(void);
@@ -153,7 +154,8 @@ void enable_irq(unsigned int irq)
 		unsigned long ver;
 
 		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
-		if ((ver >> 32) == 0x003e0016) {
+		if ((ver >> 32) == __JALAPENO_ID ||
+		    (ver >> 32) == __SERRANO_ID) {
 			/* We set it to our JBUS ID. */
 			__asm__ __volatile__("ldxa [%%g0] %1, %0"
 					     : "=r" (tid)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 59a70301a6c..f751d11926b 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -490,6 +490,58 @@ void register_prom_callbacks(void)
 		   "' linux-.soft2 to .soft2");
 }
 
+static void __init per_cpu_patch(void)
+{
+#ifdef CONFIG_SMP
+	struct cpuid_patch_entry *p;
+	unsigned long ver;
+	int is_jbus;
+
+	if (tlb_type == spitfire && !this_is_starfire)
+		return;
+
+	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
+		   (ver >> 32) == __SERRANO_ID);
+
+	p = &__cpuid_patch;
+	while (p < &__cpuid_patch_end) {
+		unsigned long addr = p->addr;
+		unsigned int *insns;
+
+		switch (tlb_type) {
+		case spitfire:
+			insns = &p->starfire[0];
+			break;
+		case cheetah:
+		case cheetah_plus:
+			if (is_jbus)
+				insns = &p->cheetah_jbus[0];
+			else
+				insns = &p->cheetah_safari[0];
+			break;
+		default:
+			prom_printf("Unknown cpu type, halting.\n");
+			prom_halt();
+		};
+
+		*(unsigned int *) (addr +  0) = insns[0];
+		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
+
+		*(unsigned int *) (addr +  4) = insns[1];
+		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
+
+		*(unsigned int *) (addr +  8) = insns[2];
+		__asm__ __volatile__("flush	%0" : : "r" (addr +  8));
+
+		*(unsigned int *) (addr + 12) = insns[3];
+		__asm__ __volatile__("flush	%0" : : "r" (addr + 12));
+
+		p++;
+	}
+#endif
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	/* Initialize PROM console and command line. */
@@ -507,8 +559,8 @@ void __init setup_arch(char **cmdline_p)
 	/* Work out if we are starfire early on */
 	check_if_starfire();
 
-	/* Now we know enough to patch the __get_cpu_id()
-	 * trampoline used by trap code.
+	/* Now we know enough to patch the get_cpuid sequences
+	 * used by trap code.
 	 */
 	per_cpu_patch();
 
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 0e7552546d3..16b8eca9754 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -424,7 +424,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
 	u64 pstate, ver;
-	int nack_busy_id, is_jalapeno;
+	int nack_busy_id, is_jbus;
 
 	if (cpus_empty(mask))
 		return;
@@ -434,7 +434,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
 	 * derivative processor.
 	 */
 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
-	is_jalapeno = ((ver >> 32) == 0x003e0016);
+	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
+		   (ver >> 32) == __SERRANO_ID);
 
 	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 
@@ -459,7 +460,7 @@ retry:
 		for_each_cpu_mask(i, mask) {
 			u64 target = (i << 14) | 0x70;
 
-			if (!is_jalapeno)
+			if (!is_jbus)
 				target |= (nack_busy_id << 24);
 			__asm__ __volatile__(
 				"stxa	%%g0, [%0] %1\n\t"
@@ -512,7 +513,7 @@ retry:
 			for_each_cpu_mask(i, mask) {
 				u64 check_mask;
 
-				if (is_jalapeno)
+				if (is_jbus)
 					check_mask = (0x2UL << (2*i));
 				else
 					check_mask = (0x2UL <<
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 7e52e897266..1c4744c047a 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -38,6 +38,7 @@
 #include <asm/processor.h>
 #include <asm/timer.h>
 #include <asm/kdebug.h>
+#include <asm/head.h>
 #ifdef CONFIG_KMOD
 #include <linux/kmod.h>
 #endif
@@ -788,7 +789,8 @@ void __init cheetah_ecache_flush_init(void)
 		cheetah_error_log[i].afsr = CHAFSR_INVALID;
 
 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
-	if ((ver >> 32) == 0x003e0016) {
+	if ((ver >> 32) == __JALAPENO_ID ||
+	    (ver >> 32) == __SERRANO_ID) {
 		cheetah_error_table = &__jalapeno_error_table[0];
 		cheetah_afsr_errors = JPAFSR_ERRORS;
 	} else if ((ver >> 32) == 0x003e0015) {
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 71b943f1c9b..1639d9c935c 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -74,6 +74,9 @@ SECTIONS
   __tsb_phys_patch = .;
   .tsb_phys_patch : { *(.tsb_phys_patch) }
   __tsb_phys_patch_end = .;
+  __cpuid_patch = .;
+  .cpuid_patch : { *(.cpuid_patch) }
+  __cpuid_patch_end = .;
   . = ALIGN(8192); 
   __initramfs_start = .;
   .init.ramfs : { *(.init.ramfs) }
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index f83768883e9..da54b4f3540 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -60,9 +60,18 @@ struct trap_per_cpu {
 } __attribute__((aligned(64)));
 extern struct trap_per_cpu trap_block[NR_CPUS];
 extern void init_cur_cpu_trap(void);
-extern void per_cpu_patch(void);
 extern void setup_tba(void);
 
+#ifdef CONFIG_SMP
+struct cpuid_patch_entry {
+	unsigned int	addr;
+	unsigned int	cheetah_safari[4];
+	unsigned int	cheetah_jbus[4];
+	unsigned int	starfire[4];
+};
+extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
+#endif
+
 #endif /* !(__ASSEMBLY__) */
 
 #define TRAP_PER_CPU_THREAD	0x00
@@ -70,35 +79,58 @@ extern void setup_tba(void);
 
 #define TRAP_BLOCK_SZ_SHIFT	6
 
-/* Clobbers %g1, loads %g6 with local processor's cpuid */
-#define __GET_CPUID			\
-	ba,pt	%xcc, __get_cpu_id;	\
-	 rd	%pc, %g1;
+#ifdef CONFIG_SMP
+
+#define __GET_CPUID(REG)				\
+	/* Spitfire implementation (default). */	\
+661:	ldxa		[%g0] ASI_UPA_CONFIG, REG;	\
+	srlx		REG, 17, REG;			\
+	 and		REG, 0x1f, REG;			\
+	nop;						\
+	.section	.cpuid_patch, "ax";		\
+	/* Instruction location. */			\
+	.word		661b;				\
+	/* Cheetah Safari implementation. */		\
+	ldxa		[%g0] ASI_SAFARI_CONFIG, REG;	\
+	srlx		REG, 17, REG;			\
+	and		REG, 0x3ff, REG;		\
+	nop;						\
+	/* Cheetah JBUS implementation. */		\
+	ldxa		[%g0] ASI_JBUS_CONFIG, REG;	\
+	srlx		REG, 17, REG;			\
+	and		REG, 0x1f, REG;			\
+	nop;						\
+	/* Starfire implementation. */			\
+	sethi		%hi(0x1fff40000d0 >> 9), REG;	\
+	sllx		REG, 9, REG;			\
+	or		REG, 0xd0, REG;			\
+	lduwa		[REG] ASI_PHYS_BYPASS_EC_E, REG;\
+	.previous;
 
 /* Clobbers %g1, current address space PGD phys address into %g7.  */
 #define TRAP_LOAD_PGD_PHYS			\
-	__GET_CPUID				\
-	sllx	%g6, TRAP_BLOCK_SZ_SHIFT, %g6;	\
+	__GET_CPUID(%g1)			\
 	sethi	%hi(trap_block), %g7;		\
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1;	\
 	or	%g7, %lo(trap_block), %g7;	\
-	add	%g7, %g6, %g7;			\
+	add	%g7, %g1, %g7;			\
 	ldx	[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7;
 
 /* Clobbers %g1, loads local processor's IRQ work area into %g6.  */
 #define TRAP_LOAD_IRQ_WORK			\
-	__GET_CPUID				\
-	sethi	%hi(__irq_work), %g1;		\
-	sllx	%g6, 6, %g6;			\
-	or	%g1, %lo(__irq_work), %g1;	\
-	add	%g1, %g6, %g6;
+	__GET_CPUID(%g1)			\
+	sethi	%hi(__irq_work), %g6;		\
+	sllx	%g1, 6, %g1;			\
+	or	%g6, %lo(__irq_work), %g6;	\
+	add	%g6, %g1, %g6;
 
 /* Clobbers %g1, loads %g6 with current thread info pointer.  */
 #define TRAP_LOAD_THREAD_REG			\
-	__GET_CPUID				\
-	sllx	%g6, TRAP_BLOCK_SZ_SHIFT, %g6;	\
-	sethi	%hi(trap_block), %g1;		\
-	or	%g1, %lo(trap_block), %g1;	\
-	ldx	[%g1 + %g6], %g6;
+	__GET_CPUID(%g1)			\
+	sethi	%hi(trap_block), %g6;		\
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1;	\
+	or	%g6, %lo(trap_block), %g6;	\
+	ldx	[%g6 + %g1], %g6;
 
 /* Given the current thread info pointer in %g6, load the per-cpu
  * area base of the current processor into %g5.  REG1, REG2, and REG3 are
@@ -109,7 +141,6 @@ extern void setup_tba(void);
  * trap will load the fully resolved %g5 per-cpu base.  This can corrupt
  * the calculations done by the macro mid-stream.
  */
-#ifdef CONFIG_SMP
 #define LOAD_PER_CPU_BASE(REG1, REG2, REG3)		\
 	ldub	[%g6 + TI_CPU], REG1;			\
 	sethi	%hi(__per_cpu_shift), REG3;		\
@@ -118,8 +149,26 @@ extern void setup_tba(void);
 	ldx	[REG2 + %lo(__per_cpu_base)], REG2;	\
 	sllx	REG1, REG3, REG3;			\
 	add	REG3, REG2, %g5;
+
 #else
+
+/* Uniprocessor versions, we know the cpuid is zero.  */
+#define TRAP_LOAD_PGD_PHYS			\
+	sethi	%hi(trap_block), %g7;		\
+	or	%g7, %lo(trap_block), %g7;	\
+	ldx	[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7;
+
+#define TRAP_LOAD_IRQ_WORK			\
+	sethi	%hi(__irq_work), %g6;		\
+	or	%g6, %lo(__irq_work), %g6;
+
+#define TRAP_LOAD_THREAD_REG			\
+	sethi	%hi(trap_block), %g6;		\
+	ldx	[%g6 + %lo(trap_block)], %g6;
+
+/* No per-cpu areas on uniprocessor, so no need to load %g5.  */
 #define LOAD_PER_CPU_BASE(REG1, REG2, REG3)
-#endif
+
+#endif /* !(CONFIG_SMP) */
 
 #endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index 0abd3a674e8..731c842f3d1 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -10,6 +10,7 @@
 
 #define __CHEETAH_ID	0x003e0014
 #define __JALAPENO_ID	0x003e0016
+#define __SERRANO_ID	0x003e0022
 
 #define CHEETAH_MANUF		0x003e
 #define CHEETAH_IMPL		0x0014 /* Ultra-III   */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 473edb2603e..ad1d35a7d13 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -37,33 +37,7 @@ extern cpumask_t phys_cpu_present_map;
  *	General functions that each host system must provide.
  */
 
-static __inline__ int hard_smp_processor_id(void)
-{
-	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-		unsigned long cfg, ver;
-		__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
-		if ((ver >> 32) == 0x003e0016) {
-			__asm__ __volatile__("ldxa [%%g0] %1, %0"
-					     : "=r" (cfg)
-					     : "i" (ASI_JBUS_CONFIG));
-			return ((cfg >> 17) & 0x1f);
-		} else {
-			__asm__ __volatile__("ldxa [%%g0] %1, %0"
-					     : "=r" (cfg)
-					     : "i" (ASI_SAFARI_CONFIG));
-			return ((cfg >> 17) & 0x3ff);
-		}
-	} else if (this_is_starfire != 0) {
-		return starfire_hard_smp_processor_id();
-	} else {
-		unsigned long upaconfig;
-		__asm__ __volatile__("ldxa	[%%g0] %1, %0"
-				     : "=r" (upaconfig)
-				     : "i" (ASI_UPA_CONFIG));
-		return ((upaconfig >> 17) & 0x1f);
-	}
-}
-
+extern int hard_smp_processor_id(void);
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern void smp_setup_cpu_possible_map(void);
-- 
cgit v1.2.3-70-g09d2


From ffe483d55229fadbaf4cc7316d47024a24ecd1a2 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 2 Feb 2006 21:55:10 -0800
Subject: [SPARC64]: Add explicit register args to trap state loading macros.

This, as well as making the code cleaner, allows a simplification in
the TSB miss handling path.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S    |  8 ++--
 arch/sparc64/kernel/etrap.S    | 10 ++---
 arch/sparc64/kernel/rtrap.S    |  2 +-
 arch/sparc64/kernel/tsb.S      |  9 +----
 arch/sparc64/kernel/winfixup.S | 18 ++++-----
 include/asm-sparc64/cpudata.h  | 88 +++++++++++++++++++++---------------------
 6 files changed, 64 insertions(+), 71 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index b3511ff5d04..4ca3ea0beaf 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -50,7 +50,7 @@ do_fpdis:
 	add		%g0, %g0, %g0
 	ba,a,pt		%xcc, rtrap_clr_l6
 
-1:	TRAP_LOAD_THREAD_REG
+1:	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	ldub		[%g6 + TI_FPSAVED], %g5
 	wr		%g0, FPRS_FEF, %fprs
 	andcc		%g5, FPRS_FEF, %g0
@@ -190,7 +190,7 @@ fp_other_bounce:
 	.globl		do_fpother_check_fitos
 	.align		32
 do_fpother_check_fitos:
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	sethi		%hi(fp_other_bounce - 4), %g7
 	or		%g7, %lo(fp_other_bounce - 4), %g7
 
@@ -378,7 +378,7 @@ do_ivec:
 	sllx		%g2, %g4, %g2
 	sllx		%g4, 2, %g4
 
-	TRAP_LOAD_IRQ_WORK
+	TRAP_LOAD_IRQ_WORK(%g6, %g1)
 
 	lduw		[%g6 + %g4], %g5	/* g5 = irq_work(cpu, pil) */
 	stw		%g5, [%g3 + 0x00]	/* bucket->irq_chain = g5 */
@@ -422,7 +422,7 @@ setcc:
 
 	.globl		utrap_trap
 utrap_trap:		/* %g3=handler,%g4=level */
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	ldx		[%g6 + TI_UTRAPS], %g1
 	brnz,pt		%g1, invoke_utrap
 	 nop
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index d974d18b15b..b5f6bc52d91 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -31,7 +31,7 @@
 		.globl	etrap, etrap_irq, etraptl1
 etrap:		rdpr	%pil, %g2
 etrap_irq:
-		TRAP_LOAD_THREAD_REG
+		TRAP_LOAD_THREAD_REG(%g6, %g1)
 		rdpr	%tstate, %g1
 		sllx	%g2, 20, %g3
 		andcc	%g1, TSTATE_PRIV, %g0
@@ -100,7 +100,7 @@ etrap_irq:
 		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
 		wrpr	%g0, ETRAP_PSTATE2, %pstate
 		mov	%l6, %g6
-		LOAD_PER_CPU_BASE(%g4, %g3, %l1)
+		LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1)
 		jmpl	%l2 + 0x4, %g0
 		 ldx	[%g6 + TI_TASK], %g4
 
@@ -124,7 +124,7 @@ etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
 		 *	0x58	TL4's TT
 		 *	0x60	TL
 		 */
-		TRAP_LOAD_THREAD_REG
+		TRAP_LOAD_THREAD_REG(%g6, %g1)
 		sub	%sp, ((4 * 8) * 4) + 8, %g2
 		rdpr	%tl, %g1
 
@@ -179,7 +179,7 @@ etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
 		.align	64
 		.globl	scetrap
 scetrap:
-		TRAP_LOAD_THREAD_REG
+		TRAP_LOAD_THREAD_REG(%g6, %g1)
 		rdpr	%pil, %g2
 		rdpr	%tstate, %g1
 		sllx	%g2, 20, %g3
@@ -250,7 +250,7 @@ scetrap:
 		stx	%i6, [%sp + PTREGS_OFF + PT_V9_I6]
 		mov	%l6, %g6
 		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
-		LOAD_PER_CPU_BASE(%g4, %g3, %l1)
+		LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1)
 		ldx	[%g6 + TI_TASK], %g4
 		done
 
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 64bc03610bc..61bd45e7697 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -226,7 +226,7 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 		brz,pt			%l3, 1f
 		 nop
 		/* Must do this before thread reg is clobbered below.  */
-		LOAD_PER_CPU_BASE(%i0, %i1, %i2)
+		LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
 1:
 		ldx			[%sp + PTREGS_OFF + PT_V9_G6], %g6
 		ldx			[%sp + PTREGS_OFF + PT_V9_G7], %g7
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index ff6a79beb98..28e38b168dd 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -36,14 +36,7 @@ tsb_miss_itlb:
 	 nop
 
 tsb_miss_page_table_walk:
-	/* This clobbers %g1 and %g6, preserve them... */
-	mov		%g1, %g5
-	mov		%g6, %g2
-
-	TRAP_LOAD_PGD_PHYS
-
-	mov		%g2, %g6
-	mov		%g5, %g1
+	TRAP_LOAD_PGD_PHYS(%g7, %g5)
 
 	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
 
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 320a762d051..211021ae6e8 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -40,7 +40,7 @@ set_pcontext:
 	 */
 	.globl	fill_fixup, spill_fixup
 fill_fixup:
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	rdpr		%tstate, %g1
 	andcc		%g1, TSTATE_PRIV, %g0
 	or		%g4, FAULT_CODE_WINFIXUP, %g4
@@ -86,7 +86,7 @@ fill_fixup:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6
 	ldx		[%g6 + TI_TASK], %g4
-	LOAD_PER_CPU_BASE(%g1, %g2, %g3)
+	LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
 
 	/* This is the same as below, except we handle this a bit special
 	 * since we must preserve %l5 and %l6, see comment above.
@@ -105,7 +105,7 @@ fill_fixup:
 	 * do not touch %g7 or %g2 so we handle the two cases fine.
 	 */
 spill_fixup:
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	ldx		[%g6 + TI_FLAGS], %g1
 	andcc		%g1, _TIF_32BIT, %g0
 	ldub		[%g6 + TI_WSAVED], %g1
@@ -181,7 +181,7 @@ winfix_mna:
 	wrpr		%g3, %tnpc
 	done
 fill_fixup_mna:
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	rdpr		%tstate, %g1
 	andcc		%g1, TSTATE_PRIV, %g0
 	be,pt		%xcc, window_mna_from_user_common
@@ -209,14 +209,14 @@ fill_fixup_mna:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6			! Get current back.
 	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-	LOAD_PER_CPU_BASE(%g1, %g2, %g3)
+	LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
 	call		mem_address_unaligned
 	 add		%sp, PTREGS_OFF, %o0
 
 	b,pt		%xcc, rtrap
 	 nop						! yes, the nop is correct
 spill_fixup_mna:
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	ldx		[%g6 + TI_FLAGS], %g1
 	andcc		%g1, _TIF_32BIT, %g0
 	ldub		[%g6 + TI_WSAVED], %g1
@@ -284,7 +284,7 @@ winfix_dax:
 	wrpr		%g3, %tnpc
 	done
 fill_fixup_dax:
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	rdpr		%tstate, %g1
 	andcc		%g1, TSTATE_PRIV, %g0
 	be,pt		%xcc, window_dax_from_user_common
@@ -312,14 +312,14 @@ fill_fixup_dax:
 	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
 	mov		%o7, %g6			! Get current back.
 	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-	LOAD_PER_CPU_BASE(%g1, %g2, %g3)
+	LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
 	call		spitfire_data_access_exception
 	 add		%sp, PTREGS_OFF, %o0
 
 	b,pt		%xcc, rtrap
 	 nop						! yes, the nop is correct
 spill_fixup_dax:
-	TRAP_LOAD_THREAD_REG
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
 	ldx		[%g6 + TI_FLAGS], %g1
 	andcc		%g1, _TIF_32BIT, %g0
 	ldub		[%g6 + TI_WSAVED], %g1
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index da54b4f3540..c15514f82c3 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -107,67 +107,67 @@ extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
 	lduwa		[REG] ASI_PHYS_BYPASS_EC_E, REG;\
 	.previous;
 
-/* Clobbers %g1, current address space PGD phys address into %g7.  */
-#define TRAP_LOAD_PGD_PHYS			\
-	__GET_CPUID(%g1)			\
-	sethi	%hi(trap_block), %g7;		\
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1;	\
-	or	%g7, %lo(trap_block), %g7;	\
-	add	%g7, %g1, %g7;			\
-	ldx	[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7;
-
-/* Clobbers %g1, loads local processor's IRQ work area into %g6.  */
-#define TRAP_LOAD_IRQ_WORK			\
-	__GET_CPUID(%g1)			\
-	sethi	%hi(__irq_work), %g6;		\
-	sllx	%g1, 6, %g1;			\
-	or	%g6, %lo(__irq_work), %g6;	\
-	add	%g6, %g1, %g6;
-
-/* Clobbers %g1, loads %g6 with current thread info pointer.  */
-#define TRAP_LOAD_THREAD_REG			\
-	__GET_CPUID(%g1)			\
-	sethi	%hi(trap_block), %g6;		\
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1;	\
-	or	%g6, %lo(trap_block), %g6;	\
-	ldx	[%g6 + %g1], %g6;
-
-/* Given the current thread info pointer in %g6, load the per-cpu
- * area base of the current processor into %g5.  REG1, REG2, and REG3 are
+/* Clobbers TMP, current address space PGD phys address into DEST.  */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
+	__GET_CPUID(TMP)			\
+	sethi	%hi(trap_block), DEST;		\
+	sllx	TMP, TRAP_BLOCK_SZ_SHIFT, TMP;	\
+	or	DEST, %lo(trap_block), DEST;	\
+	add	DEST, TMP, DEST;		\
+	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
+#define TRAP_LOAD_IRQ_WORK(DEST, TMP)		\
+	__GET_CPUID(TMP)			\
+	sethi	%hi(__irq_work), DEST;		\
+	sllx	TMP, 6, TMP;			\
+	or	DEST, %lo(__irq_work), DEST;	\
+	add	DEST, TMP, DEST;
+
+/* Clobbers TMP, loads DEST with current thread info pointer.  */
+#define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
+	__GET_CPUID(TMP)			\
+	sethi	%hi(trap_block), DEST;		\
+	sllx	TMP, TRAP_BLOCK_SZ_SHIFT, TMP;	\
+	or	DEST, %lo(trap_block), DEST;	\
+	ldx	[DEST + TMP], DEST;
+
+/* Given the current thread info pointer in THR, load the per-cpu
+ * area base of the current processor into DEST.  REG1, REG2, and REG3 are
  * clobbered.
  *
- * You absolutely cannot use %g5 as a temporary in this code.  The
+ * You absolutely cannot use DEST as a temporary in this code.  The
  * reason is that traps can happen during execution, and return from
- * trap will load the fully resolved %g5 per-cpu base.  This can corrupt
+ * trap will load the fully resolved DEST per-cpu base.  This can corrupt
  * the calculations done by the macro mid-stream.
  */
-#define LOAD_PER_CPU_BASE(REG1, REG2, REG3)		\
-	ldub	[%g6 + TI_CPU], REG1;			\
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)	\
+	ldub	[THR + TI_CPU], REG1;			\
 	sethi	%hi(__per_cpu_shift), REG3;		\
 	sethi	%hi(__per_cpu_base), REG2;		\
 	ldx	[REG3 + %lo(__per_cpu_shift)], REG3;	\
 	ldx	[REG2 + %lo(__per_cpu_base)], REG2;	\
 	sllx	REG1, REG3, REG3;			\
-	add	REG3, REG2, %g5;
+	add	REG3, REG2, DEST;
 
 #else
 
 /* Uniprocessor versions, we know the cpuid is zero.  */
-#define TRAP_LOAD_PGD_PHYS			\
-	sethi	%hi(trap_block), %g7;		\
-	or	%g7, %lo(trap_block), %g7;	\
-	ldx	[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7;
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
+	sethi	%hi(trap_block), DEST;		\
+	or	DEST, %lo(trap_block), DEST;	\
+	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
 
-#define TRAP_LOAD_IRQ_WORK			\
-	sethi	%hi(__irq_work), %g6;		\
-	or	%g6, %lo(__irq_work), %g6;
+#define TRAP_LOAD_IRQ_WORK(DEST, TMP)		\
+	sethi	%hi(__irq_work), DEST;		\
+	or	DEST, %lo(__irq_work), DEST;
 
-#define TRAP_LOAD_THREAD_REG			\
-	sethi	%hi(trap_block), %g6;		\
-	ldx	[%g6 + %lo(trap_block)], %g6;
+#define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
+	sethi	%hi(trap_block), DEST;		\
+	ldx	[DEST + %lo(trap_block)], DEST;
 
-/* No per-cpu areas on uniprocessor, so no need to load %g5.  */
-#define LOAD_PER_CPU_BASE(REG1, REG2, REG3)
+/* No per-cpu areas on uniprocessor, so no need to load DEST.  */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
 
 #endif /* !(CONFIG_SMP) */
 
-- 
cgit v1.2.3-70-g09d2


From 314ef6859750b6539eac48d78059bb7986f29cb1 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 4 Feb 2006 00:10:01 -0800
Subject: [SPARC64]: Refine register window trap handling.

When saving and restoing trap state, do the window spill/fill
handling inline so that we never trap deeper than 2 trap levels.
This is important for chips like Niagara.

The window fixup code is massively simplified, and many more
improvements are now possible.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/etrap.S    | 104 +++-------
 arch/sparc64/kernel/process.c  |  20 +-
 arch/sparc64/kernel/rtrap.S    |  58 +++++-
 arch/sparc64/kernel/tsb.S      |   1 -
 arch/sparc64/kernel/ttable.S   |  16 +-
 arch/sparc64/kernel/winfixup.S | 454 ++++++++++-------------------------------
 include/asm-sparc64/ttable.h   | 234 ++++++++++++++++++++-
 7 files changed, 447 insertions(+), 440 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index b5f6bc52d91..4a0e01b1404 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -55,7 +55,31 @@ etrap_irq:
 		rd	%y, %g3
 		stx	%g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
 		st	%g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
-		save	%g2, -STACK_BIAS, %sp	! Ordering here is critical
+
+		rdpr	%cansave, %g1
+		brnz,pt %g1, etrap_save
+		 nop
+
+		rdpr	%cwp, %g1
+		add	%g1, 2, %g1
+		wrpr	%g1, %cwp
+		be,pt	%xcc, etrap_user_spill
+		 mov	ASI_AIUP, %g3
+
+		rdpr	%otherwin, %g3
+		brz	%g3, etrap_kernel_spill
+		 mov	ASI_AIUS, %g3
+
+etrap_user_spill:
+
+		wr	%g3, 0x0, %asi
+		ldx	[%g6 + TI_FLAGS], %g3
+		and	%g3, _TIF_32BIT, %g3
+		brnz,pt	%g3, etrap_user_spill_32bit
+		 nop
+		ba,a,pt	%xcc, etrap_user_spill_64bit
+
+etrap_save:	save	%g2, -STACK_BIAS, %sp
 		mov	%g6, %l6
 
 		bne,pn	%xcc, 3f
@@ -176,83 +200,5 @@ etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
 		ba,pt	%xcc, 1b
 		 andcc	%g1, TSTATE_PRIV, %g0
 
-		.align	64
-		.globl	scetrap
-scetrap:
-		TRAP_LOAD_THREAD_REG(%g6, %g1)
-		rdpr	%pil, %g2
-		rdpr	%tstate, %g1
-		sllx	%g2, 20, %g3
-		andcc	%g1, TSTATE_PRIV, %g0
-		or	%g1, %g3, %g1
-		bne,pn	%xcc, 1f
-		 sub	%sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2
-		wrpr	%g0, 7, %cleanwin
-
-		sllx	%g1, 51, %g3
-		sethi	%hi(TASK_REGOFF), %g2
-		or	%g2, %lo(TASK_REGOFF), %g2
-		brlz,pn	%g3, 1f
-		 add	%g6, %g2, %g2
-		wr	%g0, 0, %fprs
-1:		rdpr	%tpc, %g3
-		stx	%g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
-
-		rdpr	%tnpc, %g1
-		stx	%g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
-		stx	%g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
-		save	%g2, -STACK_BIAS, %sp	! Ordering here is critical
-		mov	%g6, %l6
-		bne,pn	%xcc, 2f
-		 mov	ASI_P, %l7
-		rdpr	%canrestore, %g3
-
-		rdpr	%wstate, %g2
-		wrpr	%g0, 0, %canrestore
-		sll	%g2, 3, %g2
-		mov	PRIMARY_CONTEXT, %l4
-		wrpr	%g3, 0, %otherwin
-		wrpr	%g2, 0, %wstate
-		sethi	%hi(sparc64_kern_pri_context), %g2
-		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g3
-		stxa	%g3, [%l4] ASI_DMMU
-		sethi	%hi(KERNBASE), %l4
-		flush	%l4
-
-		mov	ASI_AIUS, %l7
-2:		mov	%g4, %l4
-		mov	%g5, %l5
-		add	%g7, 0x4, %l2
-		wrpr	%g0, ETRAP_PSTATE1, %pstate
-		stx	%g1, [%sp + PTREGS_OFF + PT_V9_G1]
-		stx	%g2, [%sp + PTREGS_OFF + PT_V9_G2]
-		sllx	%l7, 24, %l7
-
-		stx	%g3, [%sp + PTREGS_OFF + PT_V9_G3]
-		rdpr	%cwp, %l0
-		stx	%g4, [%sp + PTREGS_OFF + PT_V9_G4]
-		stx	%g5, [%sp + PTREGS_OFF + PT_V9_G5]
-		stx	%g6, [%sp + PTREGS_OFF + PT_V9_G6]
-		stx	%g7, [%sp + PTREGS_OFF + PT_V9_G7]
-		or	%l7, %l0, %l7
-		sethi	%hi(TSTATE_RMO | TSTATE_PEF), %l0
-
-		or	%l7, %l0, %l7
-		wrpr	%l2, %tnpc
-		wrpr	%l7, (TSTATE_PRIV | TSTATE_IE), %tstate
-		stx	%i0, [%sp + PTREGS_OFF + PT_V9_I0]
-		stx	%i1, [%sp + PTREGS_OFF + PT_V9_I1]
-		stx	%i2, [%sp + PTREGS_OFF + PT_V9_I2]
-		stx	%i3, [%sp + PTREGS_OFF + PT_V9_I3]
-		stx	%i4, [%sp + PTREGS_OFF + PT_V9_I4]
-
-		stx	%i5, [%sp + PTREGS_OFF + PT_V9_I5]
-		stx	%i6, [%sp + PTREGS_OFF + PT_V9_I6]
-		mov	%l6, %g6
-		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
-		LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1)
-		ldx	[%g6 + TI_TASK], %g4
-		done
-
 #undef TASK_REGOFF
 #undef ETRAP_PSTATE1
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 26548fc604b..803eea4dc4f 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -541,6 +541,18 @@ void synchronize_user_stack(void)
 	}
 }
 
+static void stack_unaligned(unsigned long sp)
+{
+	siginfo_t info;
+
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_ADRALN;
+	info.si_addr = (void __user *) sp;
+	info.si_trapno = 0;
+	force_sig_info(SIGBUS, &info, current);
+}
+
 void fault_in_user_windows(void)
 {
 	struct thread_info *t = current_thread_info();
@@ -556,13 +568,17 @@ void fault_in_user_windows(void)
 	flush_user_windows();
 	window = get_thread_wsaved();
 
-	if (window != 0) {
+	if (likely(window != 0)) {
 		window -= 1;
 		do {
 			unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
 			struct reg_window *rwin = &t->reg_window[window];
 
-			if (copy_to_user((char __user *)sp, rwin, winsize))
+			if (unlikely(sp & 0x7UL))
+				stack_unaligned(sp);
+
+			if (unlikely(copy_to_user((char __user *)sp,
+						  rwin, winsize)))
 				goto barf;
 		} while (window--);
 	}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 61bd45e7697..ecfbbdc5612 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -267,15 +267,69 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 
 		wrpr			%l2, %g0, %canrestore
 		wrpr			%l1, %g0, %wstate
-		wrpr			%g0, %g0, %otherwin
+		brnz,pt			%l2, user_rtt_restore
+		 wrpr			%g0, %g0, %otherwin
+
+		ldx			[%g6 + TI_FLAGS], %g3
+		wr			%g0, ASI_AIUP, %asi
+		rdpr			%cwp, %g1
+		andcc			%g3, _TIF_32BIT, %g0
+		sub			%g1, 1, %g1
+		bne,pt			%xcc, user_rtt_fill_32bit
+		 wrpr			%g1, %cwp
+		ba,a,pt			%xcc, user_rtt_fill_64bit
+
+user_rtt_fill_fixup:
+		rdpr	%cwp, %g1
+		add	%g1, 1, %g1
+		wrpr	%g1, 0x0, %cwp
+
+		rdpr	%wstate, %g2
+		sll	%g2, 3, %g2
+		wrpr	%g2, 0x0, %wstate
+
+		/* We know %canrestore and %otherwin are both zero.  */
+
+		sethi	%hi(sparc64_kern_pri_context), %g2
+		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
+		mov	PRIMARY_CONTEXT, %g1
+		stxa	%g2, [%g1] ASI_DMMU
+		sethi	%hi(KERNBASE), %g1
+		flush	%g1
+
+		or	%g4, FAULT_CODE_WINFIXUP, %g4
+		stb	%g4, [%g6 + TI_FAULT_CODE]
+		stx	%g5, [%g6 + TI_FAULT_ADDR]
+
+		mov	%g6, %l1
+		wrpr	%g0, 0x0, %tl
+		wrpr	%g0, RTRAP_PSTATE, %pstate
+		mov	%l1, %g6
+		ldx	[%g6 + TI_TASK], %g4
+		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+		call	do_sparc64_fault
+		 add	%sp, PTREGS_OFF, %o0
+		ba,pt	%xcc, rtrap
+		 nop
+
+user_rtt_pre_restore:
+		add			%g1, 1, %g1
+		wrpr			%g1, 0x0, %cwp
+
+user_rtt_restore:
 		restore
 		rdpr			%canrestore, %g1
 		wrpr			%g1, 0x0, %cleanwin
 		retry
 		nop
 
-kern_rtt:	restore
+kern_rtt:	rdpr			%canrestore, %g1
+		brz,pn			%g1, kern_rtt_fill
+		 nop
+kern_rtt_restore:
+		restore
 		retry
+
 to_kernel:
 #ifdef CONFIG_PREEMPT
 		ldsw			[%g6 + TI_PRE_COUNT], %l5
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 28e38b168dd..3b45db98005 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -115,7 +115,6 @@ sparc64_realfault_common:
 	ba,pt	%xcc, rtrap_clr_l6		! Restore cpu state
 	 nop					! Delay slot (fill me)
 
-	.globl	winfix_trampoline
 winfix_trampoline:
 	rdpr	%tpc, %g3			! Prepare winfixup TNPC
 	or	%g3, 0x7c, %g3			! Compute branch offset
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 99531424c59..2679b6e253a 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -92,11 +92,11 @@ tl0_resv07c:	BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
 tl0_s0n:	SPILL_0_NORMAL
 tl0_s1n:	SPILL_1_NORMAL
 tl0_s2n:	SPILL_2_NORMAL
-tl0_s3n:	SPILL_3_NORMAL
-tl0_s4n:	SPILL_4_NORMAL
-tl0_s5n:	SPILL_5_NORMAL
-tl0_s6n:	SPILL_6_NORMAL
-tl0_s7n:	SPILL_7_NORMAL
+tl0_s3n:	SPILL_0_NORMAL_ETRAP
+tl0_s4n:	SPILL_1_GENERIC_ETRAP
+tl0_s5n:	SPILL_1_GENERIC_ETRAP_FIXUP
+tl0_s6n:	SPILL_2_GENERIC_ETRAP
+tl0_s7n:	SPILL_2_GENERIC_ETRAP_FIXUP
 tl0_s0o:	SPILL_0_OTHER
 tl0_s1o:	SPILL_1_OTHER
 tl0_s2o:	SPILL_2_OTHER
@@ -110,9 +110,9 @@ tl0_f1n:	FILL_1_NORMAL
 tl0_f2n:	FILL_2_NORMAL
 tl0_f3n:	FILL_3_NORMAL
 tl0_f4n:	FILL_4_NORMAL
-tl0_f5n:	FILL_5_NORMAL
-tl0_f6n:	FILL_6_NORMAL
-tl0_f7n:	FILL_7_NORMAL
+tl0_f5n:	FILL_0_NORMAL_RTRAP
+tl0_f6n:	FILL_1_GENERIC_RTRAP
+tl0_f7n:	FILL_2_GENERIC_RTRAP
 tl0_f0o:	FILL_0_OTHER
 tl0_f1o:	FILL_1_OTHER
 tl0_f2o:	FILL_2_OTHER
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 211021ae6e8..efe2770e8f5 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -1,8 +1,6 @@
-/* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $
+/* winfixup.S: Handle cases where user stack pointer is found to be bogus.
  *
- * winfixup.S: Handle cases where user stack pointer is found to be bogus.
- *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net)
  */
 
 #include <asm/asi.h>
@@ -15,367 +13,129 @@
 
 	.text
 
-set_pcontext:
-	sethi	%hi(sparc64_kern_pri_context), %l1
-	ldx	[%l1 + %lo(sparc64_kern_pri_context)], %l1
-	mov	PRIMARY_CONTEXT, %g1
-	stxa	%l1, [%g1] ASI_DMMU
-	sethi	%hi(KERNBASE), %l1
-	flush	%l1
-	retl
-	 nop
+	/* It used to be the case that these register window fault
+	 * handlers could run via the save and restore instructions
+	 * done by the trap entry and exit code.  They now do the
+	 * window spill/fill by hand, so that case no longer can occur.
+	 */
 
 	.align	32
-
-	/* Here are the rules, pay attention.
-	 *
-	 * The kernel is disallowed from touching user space while
-	 * the trap level is greater than zero, except for from within
-	 * the window spill/fill handlers.  This must be followed
-	 * so that we can easily detect the case where we tried to
-	 * spill/fill with a bogus (or unmapped) user stack pointer.
-	 *
-	 * These are layed out in a special way for cache reasons,
-	 * don't touch...
-	 */
-	.globl	fill_fixup, spill_fixup
 fill_fixup:
 	TRAP_LOAD_THREAD_REG(%g6, %g1)
-	rdpr		%tstate, %g1
-	andcc		%g1, TSTATE_PRIV, %g0
-	or		%g4, FAULT_CODE_WINFIXUP, %g4
-	be,pt		%xcc, window_scheisse_from_user_common
-	 and		%g1, TSTATE_CWP, %g1
-
-	/* This is the extremely complex case, but it does happen from
-	 * time to time if things are just right.  Essentially the restore
-	 * done in rtrap right before going back to user mode, with tl=1
-	 * and that levels trap stack registers all setup, took a fill trap,
-	 * the user stack was not mapped in the tlb, and tlb miss occurred,
-	 * the pte found was not valid, and a simple ref bit watch update
-	 * could not satisfy the miss, so we got here.
-	 *
-	 * We must carefully unwind the state so we get back to tl=0, preserve
-	 * all the register values we were going to give to the user.  Luckily
-	 * most things are where they need to be, we also have the address
-	 * which triggered the fault handy as well.
-	 *
-	 * Also note that we must preserve %l5 and %l6.  If the user was
-	 * returning from a system call, we must make it look this way
-	 * after we process the fill fault on the users stack.
-	 *
-	 * First, get into the window where the original restore was executed.
-	 */
-
-	rdpr		%wstate, %g2			! Grab user mode wstate.
-	wrpr		%g1, %cwp			! Get into the right window.
-	sll		%g2, 3, %g2			! NORMAL-->OTHER
-
-	wrpr		%g0, 0x0, %canrestore		! Standard etrap stuff.
-	wrpr		%g2, 0x0, %wstate		! This must be consistent.
-	wrpr		%g0, 0x0, %otherwin		! We know this.
-	call		set_pcontext			! Change contexts...
+	rdpr	%tstate, %g1
+	and	%g1, TSTATE_CWP, %g1
+	or	%g4, FAULT_CODE_WINFIXUP, %g4
+	stb	%g4, [%g6 + TI_FAULT_CODE]
+	stx	%g5, [%g6 + TI_FAULT_ADDR]
+	wrpr	%g1, %cwp
+	ba,pt	%xcc, etrap
+	 rd	%pc, %g7
+	call	do_sparc64_fault
+	 add	%sp, PTREGS_OFF, %o0
+	ba,pt	%xcc, rtrap_clr_l6
 	 nop
-	rdpr		%pstate, %l1			! Prepare to change globals.
-	mov		%g6, %o7			! Get current.
-
-	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO
-	stb		%g4, [%g6 + TI_FAULT_CODE]
-	stx		%g5, [%g6 + TI_FAULT_ADDR]
-	wrpr		%g0, 0x0, %tl			! Out of trap levels.
-	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
-	mov		%o7, %g6
-	ldx		[%g6 + TI_TASK], %g4
-	LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
 
-	/* This is the same as below, except we handle this a bit special
-	 * since we must preserve %l5 and %l6, see comment above.
-	 */
-	call		do_sparc64_fault
-	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop						! yes, nop is correct
-
-	/* Be very careful about usage of the alternate globals here.
-	 * You cannot touch %g4/%g5 as that has the fault information
-	 * should this be from usermode.  Also be careful for the case
-	 * where we get here from the save instruction in etrap.S when
-	 * coming from either user or kernel (does not matter which, it
-	 * is the same problem in both cases).  Essentially this means
-	 * do not touch %g7 or %g2 so we handle the two cases fine.
+	/* Be very careful about usage of the trap globals here.
+	 * You cannot touch %g5 as that has the fault information.
 	 */
 spill_fixup:
+spill_fixup_mna:
+spill_fixup_dax:
 	TRAP_LOAD_THREAD_REG(%g6, %g1)
-	ldx		[%g6 + TI_FLAGS], %g1
-	andcc		%g1, _TIF_32BIT, %g0
-	ldub		[%g6 + TI_WSAVED], %g1
-
-	sll		%g1, 3, %g3
-	add		%g6, %g3, %g3
-	stx		%sp, [%g3 + TI_RWIN_SPTRS]
-	sll		%g1, 7, %g3
-	bne,pt		%xcc, 1f
-	 add		%g6, %g3, %g3
-	stx		%l0, [%g3 + TI_REG_WINDOW + 0x00]
-	stx		%l1, [%g3 + TI_REG_WINDOW + 0x08]
-
-	stx		%l2, [%g3 + TI_REG_WINDOW + 0x10]
-	stx		%l3, [%g3 + TI_REG_WINDOW + 0x18]
-	stx		%l4, [%g3 + TI_REG_WINDOW + 0x20]
-	stx		%l5, [%g3 + TI_REG_WINDOW + 0x28]
-	stx		%l6, [%g3 + TI_REG_WINDOW + 0x30]
-	stx		%l7, [%g3 + TI_REG_WINDOW + 0x38]
-	stx		%i0, [%g3 + TI_REG_WINDOW + 0x40]
-	stx		%i1, [%g3 + TI_REG_WINDOW + 0x48]
-
-	stx		%i2, [%g3 + TI_REG_WINDOW + 0x50]
-	stx		%i3, [%g3 + TI_REG_WINDOW + 0x58]
-	stx		%i4, [%g3 + TI_REG_WINDOW + 0x60]
-	stx		%i5, [%g3 + TI_REG_WINDOW + 0x68]
-	stx		%i6, [%g3 + TI_REG_WINDOW + 0x70]
-	b,pt		%xcc, 2f
-	 stx		%i7, [%g3 + TI_REG_WINDOW + 0x78]
-1:	stw		%l0, [%g3 + TI_REG_WINDOW + 0x00]
-
-	stw		%l1, [%g3 + TI_REG_WINDOW + 0x04]
-	stw		%l2, [%g3 + TI_REG_WINDOW + 0x08]
-	stw		%l3, [%g3 + TI_REG_WINDOW + 0x0c]
-	stw		%l4, [%g3 + TI_REG_WINDOW + 0x10]
-	stw		%l5, [%g3 + TI_REG_WINDOW + 0x14]
-	stw		%l6, [%g3 + TI_REG_WINDOW + 0x18]
-	stw		%l7, [%g3 + TI_REG_WINDOW + 0x1c]
-	stw		%i0, [%g3 + TI_REG_WINDOW + 0x20]
-
-	stw		%i1, [%g3 + TI_REG_WINDOW + 0x24]
-	stw		%i2, [%g3 + TI_REG_WINDOW + 0x28]
-	stw		%i3, [%g3 + TI_REG_WINDOW + 0x2c]
-	stw		%i4, [%g3 + TI_REG_WINDOW + 0x30]
-	stw		%i5, [%g3 + TI_REG_WINDOW + 0x34]
-	stw		%i6, [%g3 + TI_REG_WINDOW + 0x38]
-	stw		%i7, [%g3 + TI_REG_WINDOW + 0x3c]
-2:	add		%g1, 1, %g1
-
-	stb		%g1, [%g6 + TI_WSAVED]
-	rdpr		%tstate, %g1
-	andcc		%g1, TSTATE_PRIV, %g0
+	ldx	[%g6 + TI_FLAGS], %g1
+	andcc	%g1, _TIF_32BIT, %g0
+	ldub	[%g6 + TI_WSAVED], %g1
+	sll	%g1, 3, %g3
+	add	%g6, %g3, %g3
+	stx	%sp, [%g3 + TI_RWIN_SPTRS]
+	sll	%g1, 7, %g3
+	bne,pt	%xcc, 1f
+	 add	%g6, %g3, %g3
+	stx	%l0, [%g3 + TI_REG_WINDOW + 0x00]
+	stx	%l1, [%g3 + TI_REG_WINDOW + 0x08]
+	stx	%l2, [%g3 + TI_REG_WINDOW + 0x10]
+	stx	%l3, [%g3 + TI_REG_WINDOW + 0x18]
+	stx	%l4, [%g3 + TI_REG_WINDOW + 0x20]
+	stx	%l5, [%g3 + TI_REG_WINDOW + 0x28]
+	stx	%l6, [%g3 + TI_REG_WINDOW + 0x30]
+	stx	%l7, [%g3 + TI_REG_WINDOW + 0x38]
+	stx	%i0, [%g3 + TI_REG_WINDOW + 0x40]
+	stx	%i1, [%g3 + TI_REG_WINDOW + 0x48]
+	stx	%i2, [%g3 + TI_REG_WINDOW + 0x50]
+	stx	%i3, [%g3 + TI_REG_WINDOW + 0x58]
+	stx	%i4, [%g3 + TI_REG_WINDOW + 0x60]
+	stx	%i5, [%g3 + TI_REG_WINDOW + 0x68]
+	stx	%i6, [%g3 + TI_REG_WINDOW + 0x70]
+	ba,pt	%xcc, 2f
+	 stx	%i7, [%g3 + TI_REG_WINDOW + 0x78]
+1:	stw	%l0, [%g3 + TI_REG_WINDOW + 0x00]
+	stw	%l1, [%g3 + TI_REG_WINDOW + 0x04]
+	stw	%l2, [%g3 + TI_REG_WINDOW + 0x08]
+	stw	%l3, [%g3 + TI_REG_WINDOW + 0x0c]
+	stw	%l4, [%g3 + TI_REG_WINDOW + 0x10]
+	stw	%l5, [%g3 + TI_REG_WINDOW + 0x14]
+	stw	%l6, [%g3 + TI_REG_WINDOW + 0x18]
+	stw	%l7, [%g3 + TI_REG_WINDOW + 0x1c]
+	stw	%i0, [%g3 + TI_REG_WINDOW + 0x20]
+	stw	%i1, [%g3 + TI_REG_WINDOW + 0x24]
+	stw	%i2, [%g3 + TI_REG_WINDOW + 0x28]
+	stw	%i3, [%g3 + TI_REG_WINDOW + 0x2c]
+	stw	%i4, [%g3 + TI_REG_WINDOW + 0x30]
+	stw	%i5, [%g3 + TI_REG_WINDOW + 0x34]
+	stw	%i6, [%g3 + TI_REG_WINDOW + 0x38]
+	stw	%i7, [%g3 + TI_REG_WINDOW + 0x3c]
+2:	add	%g1, 1, %g1
+	stb	%g1, [%g6 + TI_WSAVED]
+	rdpr	%tstate, %g1
+	andcc	%g1, TSTATE_PRIV, %g0
 	saved
-	and		%g1, TSTATE_CWP, %g1
-	be,a,pn		%xcc, window_scheisse_from_user_common
-	 mov		FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
+	be,pn	%xcc, 1f
+	 and	%g1, TSTATE_CWP, %g1
 	retry
+1:	mov	FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
+	stb	%g4, [%g6 + TI_FAULT_CODE]
+	stx	%g5, [%g6 + TI_FAULT_ADDR]
+	wrpr	%g1, %cwp
+	ba,pt	%xcc, etrap
+	 rd	%pc, %g7
+	call	do_sparc64_fault
+	 add	%sp, PTREGS_OFF, %o0
+	ba,a,pt	%xcc, rtrap_clr_l6
 
-window_scheisse_from_user_common:
-	stb		%g4, [%g6 + TI_FAULT_CODE]
-	stx		%g5, [%g6 + TI_FAULT_ADDR]
-	wrpr		%g1, %cwp
-	ba,pt		%xcc, etrap
-	 rd		%pc, %g7
-	call		do_sparc64_fault
-	 add		%sp, PTREGS_OFF, %o0
-	ba,a,pt		%xcc, rtrap_clr_l6
-
-	.globl		winfix_mna, fill_fixup_mna, spill_fixup_mna
 winfix_mna:
-	andn		%g3, 0x7f, %g3
-	add		%g3, 0x78, %g3
-	wrpr		%g3, %tnpc
+	andn	%g3, 0x7f, %g3
+	add	%g3, 0x78, %g3
+	wrpr	%g3, %tnpc
 	done
-fill_fixup_mna:
-	TRAP_LOAD_THREAD_REG(%g6, %g1)
-	rdpr		%tstate, %g1
-	andcc		%g1, TSTATE_PRIV, %g0
-	be,pt		%xcc, window_mna_from_user_common
-	 and		%g1, TSTATE_CWP, %g1
-
-	/* Please, see fill_fixup commentary about why we must preserve
-	 * %l5 and %l6 to preserve absolute correct semantics.
-	 */
-	rdpr		%wstate, %g2			! Grab user mode wstate.
-	wrpr		%g1, %cwp			! Get into the right window.
-	sll		%g2, 3, %g2			! NORMAL-->OTHER
-	wrpr		%g0, 0x0, %canrestore		! Standard etrap stuff.
-
-	wrpr		%g2, 0x0, %wstate		! This must be consistent.
-	wrpr		%g0, 0x0, %otherwin		! We know this.
-	call		set_pcontext			! Change contexts...
-	 nop
-	rdpr		%pstate, %l1			! Prepare to change globals.
-	mov		%g4, %o2			! Setup args for
-	mov		%g5, %o1			! final call to mem_address_unaligned.
-	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO
-
-	mov		%g6, %o7			! Stash away current.
-	wrpr		%g0, 0x0, %tl			! Out of trap levels.
-	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
-	mov		%o7, %g6			! Get current back.
-	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-	LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
-	call		mem_address_unaligned
-	 add		%sp, PTREGS_OFF, %o0
 
-	b,pt		%xcc, rtrap
-	 nop						! yes, the nop is correct
-spill_fixup_mna:
+fill_fixup_mna:
 	TRAP_LOAD_THREAD_REG(%g6, %g1)
-	ldx		[%g6 + TI_FLAGS], %g1
-	andcc		%g1, _TIF_32BIT, %g0
-	ldub		[%g6 + TI_WSAVED], %g1
-	sll		%g1, 3, %g3
-	add		%g6, %g3, %g3
-	stx		%sp, [%g3 + TI_RWIN_SPTRS]
-
-	sll		%g1, 7, %g3
-	bne,pt		%xcc, 1f
-	 add		%g6, %g3, %g3
-	stx		%l0, [%g3 + TI_REG_WINDOW + 0x00]
-	stx		%l1, [%g3 + TI_REG_WINDOW + 0x08]
-	stx		%l2, [%g3 + TI_REG_WINDOW + 0x10]
-	stx		%l3, [%g3 + TI_REG_WINDOW + 0x18]
-	stx		%l4, [%g3 + TI_REG_WINDOW + 0x20]
-
-	stx		%l5, [%g3 + TI_REG_WINDOW + 0x28]
-	stx		%l6, [%g3 + TI_REG_WINDOW + 0x30]
-	stx		%l7, [%g3 + TI_REG_WINDOW + 0x38]
-	stx		%i0, [%g3 + TI_REG_WINDOW + 0x40]
-	stx		%i1, [%g3 + TI_REG_WINDOW + 0x48]
-	stx		%i2, [%g3 + TI_REG_WINDOW + 0x50]
-	stx		%i3, [%g3 + TI_REG_WINDOW + 0x58]
-	stx		%i4, [%g3 + TI_REG_WINDOW + 0x60]
-
-	stx		%i5, [%g3 + TI_REG_WINDOW + 0x68]
-	stx		%i6, [%g3 + TI_REG_WINDOW + 0x70]
-	stx		%i7, [%g3 + TI_REG_WINDOW + 0x78]
-	b,pt		%xcc, 2f
-	 add		%g1, 1, %g1
-1:	std		%l0, [%g3 + TI_REG_WINDOW + 0x00]
-	std		%l2, [%g3 + TI_REG_WINDOW + 0x08]
-	std		%l4, [%g3 + TI_REG_WINDOW + 0x10]
+	rdpr	%tstate, %g1
+	and	%g1, TSTATE_CWP, %g1
+	wrpr	%g1, %cwp
+	ba,pt	%xcc, etrap
+	 rd	%pc, %g7
+	mov	%l4, %o2
+	mov	%l5, %o1
+	call	mem_address_unaligned
+	 add	%sp, PTREGS_OFF, %o0
+	ba,a,pt	%xcc, rtrap_clr_l6
 
-	std		%l6, [%g3 + TI_REG_WINDOW + 0x18]
-	std		%i0, [%g3 + TI_REG_WINDOW + 0x20]
-	std		%i2, [%g3 + TI_REG_WINDOW + 0x28]
-	std		%i4, [%g3 + TI_REG_WINDOW + 0x30]
-	std		%i6, [%g3 + TI_REG_WINDOW + 0x38]
-	add		%g1, 1, %g1
-2:	stb		%g1, [%g6 + TI_WSAVED]
-	rdpr		%tstate, %g1
-
-	andcc		%g1, TSTATE_PRIV, %g0
-	saved
-	be,pn		%xcc, window_mna_from_user_common
-	 and		%g1, TSTATE_CWP, %g1
-	retry
-window_mna_from_user_common:
-	wrpr		%g1, %cwp
-	sethi		%hi(109f), %g7
-	ba,pt		%xcc, etrap
-109:	 or		%g7, %lo(109b), %g7
-	mov		%l4, %o2
-	mov		%l5, %o1
-	call		mem_address_unaligned
-	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 clr		%l6
-	
-	.globl		winfix_dax, fill_fixup_dax, spill_fixup_dax
 winfix_dax:
-	andn		%g3, 0x7f, %g3
-	add		%g3, 0x74, %g3
-	wrpr		%g3, %tnpc
+	andn	%g3, 0x7f, %g3
+	add	%g3, 0x74, %g3
+	wrpr	%g3, %tnpc
 	done
-fill_fixup_dax:
-	TRAP_LOAD_THREAD_REG(%g6, %g1)
-	rdpr		%tstate, %g1
-	andcc		%g1, TSTATE_PRIV, %g0
-	be,pt		%xcc, window_dax_from_user_common
-	 and		%g1, TSTATE_CWP, %g1
-
-	/* Please, see fill_fixup commentary about why we must preserve
-	 * %l5 and %l6 to preserve absolute correct semantics.
-	 */
-	rdpr		%wstate, %g2			! Grab user mode wstate.
-	wrpr		%g1, %cwp			! Get into the right window.
-	sll		%g2, 3, %g2			! NORMAL-->OTHER
-	wrpr		%g0, 0x0, %canrestore		! Standard etrap stuff.
-
-	wrpr		%g2, 0x0, %wstate		! This must be consistent.
-	wrpr		%g0, 0x0, %otherwin		! We know this.
-	call		set_pcontext			! Change contexts...
-	 nop
-	rdpr		%pstate, %l1			! Prepare to change globals.
-	mov		%g4, %o1			! Setup args for
-	mov		%g5, %o2			! final call to spitfire_data_access_exception.
-	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO
-
-	mov		%g6, %o7			! Stash away current.
-	wrpr		%g0, 0x0, %tl			! Out of trap levels.
-	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
-	mov		%o7, %g6			! Get current back.
-	ldx		[%g6 + TI_TASK], %g4		! Finish it.
-	LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
-	call		spitfire_data_access_exception
-	 add		%sp, PTREGS_OFF, %o0
 
-	b,pt		%xcc, rtrap
-	 nop						! yes, the nop is correct
-spill_fixup_dax:
+fill_fixup_dax:
 	TRAP_LOAD_THREAD_REG(%g6, %g1)
-	ldx		[%g6 + TI_FLAGS], %g1
-	andcc		%g1, _TIF_32BIT, %g0
-	ldub		[%g6 + TI_WSAVED], %g1
-	sll		%g1, 3, %g3
-	add		%g6, %g3, %g3
-	stx		%sp, [%g3 + TI_RWIN_SPTRS]
-
-	sll		%g1, 7, %g3
-	bne,pt		%xcc, 1f
-	 add		%g6, %g3, %g3
-	stx		%l0, [%g3 + TI_REG_WINDOW + 0x00]
-	stx		%l1, [%g3 + TI_REG_WINDOW + 0x08]
-	stx		%l2, [%g3 + TI_REG_WINDOW + 0x10]
-	stx		%l3, [%g3 + TI_REG_WINDOW + 0x18]
-	stx		%l4, [%g3 + TI_REG_WINDOW + 0x20]
-
-	stx		%l5, [%g3 + TI_REG_WINDOW + 0x28]
-	stx		%l6, [%g3 + TI_REG_WINDOW + 0x30]
-	stx		%l7, [%g3 + TI_REG_WINDOW + 0x38]
-	stx		%i0, [%g3 + TI_REG_WINDOW + 0x40]
-	stx		%i1, [%g3 + TI_REG_WINDOW + 0x48]
-	stx		%i2, [%g3 + TI_REG_WINDOW + 0x50]
-	stx		%i3, [%g3 + TI_REG_WINDOW + 0x58]
-	stx		%i4, [%g3 + TI_REG_WINDOW + 0x60]
-
-	stx		%i5, [%g3 + TI_REG_WINDOW + 0x68]
-	stx		%i6, [%g3 + TI_REG_WINDOW + 0x70]
-	stx		%i7, [%g3 + TI_REG_WINDOW + 0x78]
-	b,pt		%xcc, 2f
-	 add		%g1, 1, %g1
-1:	std		%l0, [%g3 + TI_REG_WINDOW + 0x00]
-	std		%l2, [%g3 + TI_REG_WINDOW + 0x08]
-	std		%l4, [%g3 + TI_REG_WINDOW + 0x10]
-
-	std		%l6, [%g3 + TI_REG_WINDOW + 0x18]
-	std		%i0, [%g3 + TI_REG_WINDOW + 0x20]
-	std		%i2, [%g3 + TI_REG_WINDOW + 0x28]
-	std		%i4, [%g3 + TI_REG_WINDOW + 0x30]
-	std		%i6, [%g3 + TI_REG_WINDOW + 0x38]
-	add		%g1, 1, %g1
-2:	stb		%g1, [%g6 + TI_WSAVED]
-	rdpr		%tstate, %g1
-
-	andcc		%g1, TSTATE_PRIV, %g0
-	saved
-	be,pn		%xcc, window_dax_from_user_common
-	 and		%g1, TSTATE_CWP, %g1
-	retry
-window_dax_from_user_common:
-	wrpr		%g1, %cwp
-	sethi		%hi(109f), %g7
-	ba,pt		%xcc, etrap
-109:	 or		%g7, %lo(109b), %g7
-	mov		%l4, %o1
-	mov		%l5, %o2
-	call		spitfire_data_access_exception
-	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 clr		%l6
+	rdpr	%tstate, %g1
+	and	%g1, TSTATE_CWP, %g1
+	wrpr	%g1, %cwp
+	ba,pt	%xcc, etrap
+	 rd	%pc, %g7
+	mov	%l4, %o1
+	mov	%l5, %o2
+	call	spitfire_data_access_exception
+	 add	%sp, PTREGS_OFF, %o0
+	ba,a,pt	%xcc, rtrap_clr_l6
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index f557db4faf8..f912f52c0c7 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -93,7 +93,7 @@
 	
 #define SYSCALL_TRAP(routine, systbl)			\
 	sethi	%hi(109f), %g7;				\
-	ba,pt	%xcc, scetrap;				\
+	ba,pt	%xcc, etrap;				\
 109:	 or	%g7, %lo(109b), %g7;			\
 	sethi	%hi(systbl), %l7;			\
 	ba,pt	%xcc, routine;				\
@@ -219,6 +219,31 @@
 	saved; retry; nop; nop; nop; nop; nop; nop;	\
 	nop; nop; nop; nop; nop; nop; nop; nop;
 
+#define SPILL_0_NORMAL_ETRAP				\
+etrap_kernel_spill:					\
+	stx	%l0, [%sp + STACK_BIAS + 0x00];		\
+	stx	%l1, [%sp + STACK_BIAS + 0x08];		\
+	stx	%l2, [%sp + STACK_BIAS + 0x10];		\
+	stx	%l3, [%sp + STACK_BIAS + 0x18];		\
+	stx	%l4, [%sp + STACK_BIAS + 0x20];		\
+	stx	%l5, [%sp + STACK_BIAS + 0x28];		\
+	stx	%l6, [%sp + STACK_BIAS + 0x30];		\
+	stx	%l7, [%sp + STACK_BIAS + 0x38];		\
+	stx	%i0, [%sp + STACK_BIAS + 0x40];		\
+	stx	%i1, [%sp + STACK_BIAS + 0x48];		\
+	stx	%i2, [%sp + STACK_BIAS + 0x50];		\
+	stx	%i3, [%sp + STACK_BIAS + 0x58];		\
+	stx	%i4, [%sp + STACK_BIAS + 0x60];		\
+	stx	%i5, [%sp + STACK_BIAS + 0x68];		\
+	stx	%i6, [%sp + STACK_BIAS + 0x70];		\
+	stx	%i7, [%sp + STACK_BIAS + 0x78];		\
+	saved;						\
+	sub	%g1, 2, %g1;				\
+	ba,pt	%xcc, etrap_save;			\
+	wrpr	%g1, %cwp;				\
+	nop; nop; nop; nop; nop; nop; nop; nop;		\
+	nop; nop; nop; nop;
+
 /* Normal 64bit spill */
 #define SPILL_1_GENERIC(ASI)				\
 	add	%sp, STACK_BIAS + 0x00, %g1;		\
@@ -252,6 +277,67 @@
 	b,a,pt	%xcc, spill_fixup_mna;			\
 	b,a,pt	%xcc, spill_fixup;
 
+#define SPILL_1_GENERIC_ETRAP				\
+etrap_user_spill_64bit:					\
+	stxa	%l0, [%sp + STACK_BIAS + 0x00] %asi;	\
+	stxa	%l1, [%sp + STACK_BIAS + 0x08] %asi;	\
+	stxa	%l2, [%sp + STACK_BIAS + 0x10] %asi;	\
+	stxa	%l3, [%sp + STACK_BIAS + 0x18] %asi;	\
+	stxa	%l4, [%sp + STACK_BIAS + 0x20] %asi;	\
+	stxa	%l5, [%sp + STACK_BIAS + 0x28] %asi;	\
+	stxa	%l6, [%sp + STACK_BIAS + 0x30] %asi;	\
+	stxa	%l7, [%sp + STACK_BIAS + 0x38] %asi;	\
+	stxa	%i0, [%sp + STACK_BIAS + 0x40] %asi;	\
+	stxa	%i1, [%sp + STACK_BIAS + 0x48] %asi;	\
+	stxa	%i2, [%sp + STACK_BIAS + 0x50] %asi;	\
+	stxa	%i3, [%sp + STACK_BIAS + 0x58] %asi;	\
+	stxa	%i4, [%sp + STACK_BIAS + 0x60] %asi;	\
+	stxa	%i5, [%sp + STACK_BIAS + 0x68] %asi;	\
+	stxa	%i6, [%sp + STACK_BIAS + 0x70] %asi;	\
+	stxa	%i7, [%sp + STACK_BIAS + 0x78] %asi;	\
+	saved;						\
+	sub	%g1, 2, %g1;				\
+	ba,pt	%xcc, etrap_save;			\
+	 wrpr	%g1, %cwp;				\
+	nop; nop; nop; nop; nop;			\
+	nop; nop; nop; nop;				\
+	ba,a,pt	%xcc, etrap_spill_fixup_64bit;		\
+	ba,a,pt	%xcc, etrap_spill_fixup_64bit;		\
+	ba,a,pt	%xcc, etrap_spill_fixup_64bit;
+
+#define SPILL_1_GENERIC_ETRAP_FIXUP			\
+etrap_spill_fixup_64bit:				\
+	ldub	[%g6 + TI_WSAVED], %g1;			\
+	sll	%g1, 3, %g3;				\
+	add	%g6, %g3, %g3;				\
+	stx	%sp, [%g3 + TI_RWIN_SPTRS];		\
+	sll	%g1, 7, %g3;				\
+	add	%g6, %g3, %g3;				\
+	stx	%l0, [%g3 + TI_REG_WINDOW + 0x00];	\
+	stx	%l1, [%g3 + TI_REG_WINDOW + 0x08];	\
+	stx	%l2, [%g3 + TI_REG_WINDOW + 0x10];	\
+	stx	%l3, [%g3 + TI_REG_WINDOW + 0x18];	\
+	stx	%l4, [%g3 + TI_REG_WINDOW + 0x20];	\
+	stx	%l5, [%g3 + TI_REG_WINDOW + 0x28];	\
+	stx	%l6, [%g3 + TI_REG_WINDOW + 0x30];	\
+	stx	%l7, [%g3 + TI_REG_WINDOW + 0x38];	\
+	stx	%i0, [%g3 + TI_REG_WINDOW + 0x40];	\
+	stx	%i1, [%g3 + TI_REG_WINDOW + 0x48];	\
+	stx	%i2, [%g3 + TI_REG_WINDOW + 0x50];	\
+	stx	%i3, [%g3 + TI_REG_WINDOW + 0x58];	\
+	stx	%i4, [%g3 + TI_REG_WINDOW + 0x60];	\
+	stx	%i5, [%g3 + TI_REG_WINDOW + 0x68];	\
+	stx	%i6, [%g3 + TI_REG_WINDOW + 0x70];	\
+	stx	%i7, [%g3 + TI_REG_WINDOW + 0x78];	\
+	add	%g1, 1, %g1;				\
+	stb	%g1, [%g6 + TI_WSAVED];			\
+	saved;						\
+	rdpr	%cwp, %g1;				\
+	sub	%g1, 2, %g1;				\
+	ba,pt	%xcc, etrap_save;			\
+	 wrpr	%g1, %cwp;				\
+	nop; nop; nop
+
 /* Normal 32bit spill */
 #define SPILL_2_GENERIC(ASI)				\
 	srl	%sp, 0, %sp;				\
@@ -285,6 +371,68 @@
 	b,a,pt	%xcc, spill_fixup_mna;			\
 	b,a,pt	%xcc, spill_fixup;
 
+#define SPILL_2_GENERIC_ETRAP		\
+etrap_user_spill_32bit:			\
+	srl	%sp, 0, %sp;		\
+	stwa	%l0, [%sp + 0x00] %asi;	\
+	stwa	%l1, [%sp + 0x04] %asi;	\
+	stwa	%l2, [%sp + 0x08] %asi;	\
+	stwa	%l3, [%sp + 0x0c] %asi;	\
+	stwa	%l4, [%sp + 0x10] %asi;	\
+	stwa	%l5, [%sp + 0x14] %asi;	\
+	stwa	%l6, [%sp + 0x18] %asi;	\
+	stwa	%l7, [%sp + 0x1c] %asi;	\
+	stwa	%i0, [%sp + 0x20] %asi;	\
+	stwa	%i1, [%sp + 0x24] %asi;	\
+	stwa	%i2, [%sp + 0x28] %asi;	\
+	stwa	%i3, [%sp + 0x2c] %asi;	\
+	stwa	%i4, [%sp + 0x30] %asi;	\
+	stwa	%i5, [%sp + 0x34] %asi;	\
+	stwa	%i6, [%sp + 0x38] %asi;	\
+	stwa	%i7, [%sp + 0x3c] %asi;	\
+	saved;				\
+	sub	%g1, 2, %g1;		\
+	ba,pt	%xcc, etrap_save;	\
+	 wrpr	%g1, %cwp;		\
+	nop; nop; nop; nop;		\
+	nop; nop; nop; nop;		\
+	ba,a,pt	%xcc, etrap_spill_fixup_32bit; \
+	ba,a,pt	%xcc, etrap_spill_fixup_32bit; \
+	ba,a,pt	%xcc, etrap_spill_fixup_32bit;
+
+#define SPILL_2_GENERIC_ETRAP_FIXUP			\
+etrap_spill_fixup_32bit:				\
+	ldub	[%g6 + TI_WSAVED], %g1;			\
+	sll	%g1, 3, %g3;				\
+	add	%g6, %g3, %g3;				\
+	stx	%sp, [%g3 + TI_RWIN_SPTRS];		\
+	sll	%g1, 7, %g3;				\
+	add	%g6, %g3, %g3;				\
+	stw	%l0, [%g3 + TI_REG_WINDOW + 0x00];	\
+	stw	%l1, [%g3 + TI_REG_WINDOW + 0x04];	\
+	stw	%l2, [%g3 + TI_REG_WINDOW + 0x08];	\
+	stw	%l3, [%g3 + TI_REG_WINDOW + 0x0c];	\
+	stw	%l4, [%g3 + TI_REG_WINDOW + 0x10];	\
+	stw	%l5, [%g3 + TI_REG_WINDOW + 0x14];	\
+	stw	%l6, [%g3 + TI_REG_WINDOW + 0x18];	\
+	stw	%l7, [%g3 + TI_REG_WINDOW + 0x1c];	\
+	stw	%i0, [%g3 + TI_REG_WINDOW + 0x20];	\
+	stw	%i1, [%g3 + TI_REG_WINDOW + 0x24];	\
+	stw	%i2, [%g3 + TI_REG_WINDOW + 0x28];	\
+	stw	%i3, [%g3 + TI_REG_WINDOW + 0x2c];	\
+	stw	%i4, [%g3 + TI_REG_WINDOW + 0x30];	\
+	stw	%i5, [%g3 + TI_REG_WINDOW + 0x34];	\
+	stw	%i6, [%g3 + TI_REG_WINDOW + 0x38];	\
+	stw	%i7, [%g3 + TI_REG_WINDOW + 0x3c];	\
+	add	%g1, 1, %g1;				\
+	stb	%g1, [%g6 + TI_WSAVED];			\
+	saved;						\
+	rdpr	%cwp, %g1;				\
+	sub	%g1, 2, %g1;				\
+	ba,pt	%xcc, etrap_save;			\
+	 wrpr	%g1, %cwp;				\
+	nop; nop; nop
+
 #define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP)
 #define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP)
 #define SPILL_3_NORMAL SPILL_0_NORMAL
@@ -323,6 +471,35 @@
 	restored; retry; nop; nop; nop; nop; nop; nop;	\
 	nop; nop; nop; nop; nop; nop; nop; nop;
 
+#define FILL_0_NORMAL_RTRAP				\
+kern_rtt_fill:						\
+	rdpr	%cwp, %g1;				\
+	sub	%g1, 1, %g1;				\
+	wrpr	%g1, %cwp;				\
+	ldx	[%sp + STACK_BIAS + 0x00], %l0;		\
+	ldx	[%sp + STACK_BIAS + 0x08], %l1;		\
+	ldx	[%sp + STACK_BIAS + 0x10], %l2;		\
+	ldx	[%sp + STACK_BIAS + 0x18], %l3;		\
+	ldx	[%sp + STACK_BIAS + 0x20], %l4;		\
+	ldx	[%sp + STACK_BIAS + 0x28], %l5;		\
+	ldx	[%sp + STACK_BIAS + 0x30], %l6;		\
+	ldx	[%sp + STACK_BIAS + 0x38], %l7;		\
+	ldx	[%sp + STACK_BIAS + 0x40], %i0;		\
+	ldx	[%sp + STACK_BIAS + 0x48], %i1;		\
+	ldx	[%sp + STACK_BIAS + 0x50], %i2;		\
+	ldx	[%sp + STACK_BIAS + 0x58], %i3;		\
+	ldx	[%sp + STACK_BIAS + 0x60], %i4;		\
+	ldx	[%sp + STACK_BIAS + 0x68], %i5;		\
+	ldx	[%sp + STACK_BIAS + 0x70], %i6;		\
+	ldx	[%sp + STACK_BIAS + 0x78], %i7;		\
+	restored;					\
+	add	%g1, 1, %g1;				\
+	ba,pt	%xcc, kern_rtt_restore;			\
+	 wrpr	%g1, %cwp;				\
+	nop; nop; nop; nop; nop;			\
+	nop; nop; nop; nop;
+
+
 /* Normal 64bit fill */
 #define FILL_1_GENERIC(ASI)				\
 	add	%sp, STACK_BIAS + 0x00, %g1;		\
@@ -354,6 +531,33 @@
 	b,a,pt	%xcc, fill_fixup_mna;			\
 	b,a,pt	%xcc, fill_fixup;
 
+#define FILL_1_GENERIC_RTRAP				\
+user_rtt_fill_64bit:					\
+	ldxa	[%sp + STACK_BIAS + 0x00] %asi, %l0;	\
+	ldxa	[%sp + STACK_BIAS + 0x08] %asi, %l1;	\
+	ldxa	[%sp + STACK_BIAS + 0x10] %asi, %l2;	\
+	ldxa	[%sp + STACK_BIAS + 0x18] %asi, %l3;	\
+	ldxa	[%sp + STACK_BIAS + 0x20] %asi, %l4;	\
+	ldxa	[%sp + STACK_BIAS + 0x28] %asi, %l5;	\
+	ldxa	[%sp + STACK_BIAS + 0x30] %asi, %l6;	\
+	ldxa	[%sp + STACK_BIAS + 0x38] %asi, %l7;	\
+	ldxa	[%sp + STACK_BIAS + 0x40] %asi, %i0;	\
+	ldxa	[%sp + STACK_BIAS + 0x48] %asi, %i1;	\
+	ldxa	[%sp + STACK_BIAS + 0x50] %asi, %i2;	\
+	ldxa	[%sp + STACK_BIAS + 0x58] %asi, %i3;	\
+	ldxa	[%sp + STACK_BIAS + 0x60] %asi, %i4;	\
+	ldxa	[%sp + STACK_BIAS + 0x68] %asi, %i5;	\
+	ldxa	[%sp + STACK_BIAS + 0x70] %asi, %i6;	\
+	ldxa	[%sp + STACK_BIAS + 0x78] %asi, %i7;	\
+	ba,pt	%xcc, user_rtt_pre_restore;		\
+	 restored;					\
+	nop; nop; nop; nop; nop; nop;			\
+	nop; nop; nop; nop; nop;			\
+	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup;
+
+
 /* Normal 32bit fill */
 #define FILL_2_GENERIC(ASI)				\
 	srl	%sp, 0, %sp;				\
@@ -385,6 +589,34 @@
 	b,a,pt	%xcc, fill_fixup_mna;			\
 	b,a,pt	%xcc, fill_fixup;
 
+#define FILL_2_GENERIC_RTRAP				\
+user_rtt_fill_32bit:					\
+	srl	%sp, 0, %sp;				\
+	lduwa	[%sp + 0x00] %asi, %l0;			\
+	lduwa	[%sp + 0x04] %asi, %l1;			\
+	lduwa	[%sp + 0x08] %asi, %l2;			\
+	lduwa	[%sp + 0x0c] %asi, %l3;			\
+	lduwa	[%sp + 0x10] %asi, %l4;			\
+	lduwa	[%sp + 0x14] %asi, %l5;			\
+	lduwa	[%sp + 0x18] %asi, %l6;			\
+	lduwa	[%sp + 0x1c] %asi, %l7;			\
+	lduwa	[%sp + 0x20] %asi, %i0;			\
+	lduwa	[%sp + 0x24] %asi, %i1;			\
+	lduwa	[%sp + 0x28] %asi, %i2;			\
+	lduwa	[%sp + 0x2c] %asi, %i3;			\
+	lduwa	[%sp + 0x30] %asi, %i4;			\
+	lduwa	[%sp + 0x34] %asi, %i5;			\
+	lduwa	[%sp + 0x38] %asi, %i6;			\
+	lduwa	[%sp + 0x3c] %asi, %i7;			\
+	ba,pt	%xcc, user_rtt_pre_restore;		\
+	 restored;					\
+	nop; nop; nop; nop; nop;			\
+	nop; nop; nop; nop; nop;			\
+	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup;
+		
+
 #define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP)
 #define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP)
 #define FILL_3_NORMAL FILL_0_NORMAL
-- 
cgit v1.2.3-70-g09d2


From 766f861fbbd968a1850295ed6dec4504b4500dcc Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sat, 4 Feb 2006 03:01:45 -0800
Subject: [SPARC64]: SUN4V hypervisor interface defines.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/hypervisor.h | 2072 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 2072 insertions(+)
 create mode 100644 include/asm-sparc64/hypervisor.h

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
new file mode 100644
index 00000000000..9c8e453abe9
--- /dev/null
+++ b/include/asm-sparc64/hypervisor.h
@@ -0,0 +1,2072 @@
+#ifndef _SPARC64_HYPERVISOR_H
+#define _SPARC64_HYPERVISOR_H
+
+/* Sun4v hypervisor interfaces and defines.
+ *
+ * Hypervisor calls are made via traps to software traps number 0x80
+ * and above.  Registers %o0 to %o5 serve as argument, status, and
+ * return value registers.
+ *
+ * There are two kinds of these traps.  First there are the normal
+ * "fast traps" which use software trap 0x80 and encode the function
+ * to invoke by number in register %o5.  Argument and return value
+ * handling is as follows:
+ *
+ * -----------------------------------------------
+ * |  %o5  | function number |     undefined     |
+ * |  %o0  |   argument 0    |   return status   |
+ * |  %o1  |   argument 1    |   return value 1  |
+ * |  %o2  |   argument 2    |   return value 2  |
+ * |  %o3  |   argument 3    |   return value 3  |
+ * |  %o4  |   argument 4    |   return value 4  |
+ * -----------------------------------------------
+ *
+ * The second type are "hyper-fast traps" which encode the function
+ * number in the software trap number itself.  So these use trap
+ * numbers > 0x80.  The register usage for hyper-fast traps is as
+ * follows:
+ *
+ * -----------------------------------------------
+ * |  %o0  |   argument 0    |   return status   |
+ * |  %o1  |   argument 1    |   return value 1  |
+ * |  %o2  |   argument 2    |   return value 2  |
+ * |  %o3  |   argument 3    |   return value 3  |
+ * |  %o4  |   argument 4    |   return value 4  |
+ * -----------------------------------------------
+ *
+ * Registers providing explicit arguments to the hypervisor calls
+ * are volatile across the call.  Upon return their values are
+ * undefined unless explicitly specified as containing a particular
+ * return value by the specific call.  The return status is always
+ * returned in register %o0, zero indicates a successful execution of
+ * the hypervisor call and other values indicate an error status as
+ * defined below.  So, for example, if a hyper-fast trap takes
+ * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across
+ * the call and %o3, %o4, and %o5 would be preserved.
+ *
+ * If the hypervisor trap is invalid, or the fast trap function number
+ * is invalid, HV_EBADTRAP will be returned in %o0.  Also, all 64-bits
+ * of the argument and return values are significant.
+ */
+
+/* Trap numbers.  */
+#define HV_FAST_TRAP		0x80
+#define HV_MMU_MAP_ADDR_TRAP	0x83
+#define HV_MMU_UNMAP_ADDR_TRAP	0x84
+#define HV_TTRACE_ADDENTRY_TRAP	0x85
+#define HV_CORE_TRAP		0xff
+
+/* Error codes.  */
+#define HV_EOK				0  /* Successful return            */
+#define HV_ENOCPU			1  /* Invalid CPU id               */
+#define HV_ENORADDR			2  /* Invalid real address         */
+#define HV_ENOINTR			3  /* Invalid interrupt id         */
+#define HV_EBADPGSZ			4  /* Invalid pagesize encoding    */
+#define HV_EBADTSB			5  /* Invalid TSB description      */
+#define HV_EINVAL			6  /* Invalid argument             */
+#define HV_EBADTRAP			7  /* Invalid function number      */
+#define HV_EBADALIGN			8  /* Invalid address alignment    */
+#define HV_EWOULDBLOCK			9  /* Cannot complete w/o blocking */
+#define HV_ENOACCESS			10 /* No access to resource        */
+#define HV_EIO				11 /* I/O error                    */
+#define HV_ECPUERROR			12 /* CPU in error state           */
+#define HV_ENOTSUPPORTED		13 /* Function not supported       */
+#define HV_ENOMAP			14 /* No mapping found             */
+#define HV_ETOOMANY			15 /* Too many items specified     */
+
+/* mach_exit()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MACH_EXIT
+ * ARG0:	exit code
+ * ERRORS:	This service does not return.
+ *
+ * Stop all CPUs in the virtual domain and place them into the stopped
+ * state.  The 64-bit exit code may be passed to a service entity as
+ * the domain's exit status.  On systems without a service entity, the
+ * domain will undergo a reset, and the boot firmware will be
+ * reloaded.
+ *
+ * This function will never return to the guest that invokes it.
+ *
+ * Note: By convention an exit code of zero denotes a successful exit by
+ *       the guest code.  A non-zero exit code denotes a guest specific
+ *       error indication.
+ *
+ */
+#define HV_FAST_MACH_EXIT		0x00
+
+/* Domain services.  */
+
+/* mach_desc()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MACH_DESC
+ * ARG0:	buffer
+ * ARG1:	length
+ * RET0:	status
+ * RET1:	length
+ * ERRORS:	HV_EBADALIGN	Buffer is badly aligned
+ *		HV_ENORADDR	Buffer is to an illegal real address.
+ *		HV_EINVAL	Buffer length is too small for complete
+ *				machine description.
+ *
+ * Copy the most current machine description into the buffer indicated
+ * by the real address in ARG0.  The buffer provided must be 16 byte
+ * aligned.  Upon success or HV_EINVAL, this service returns the
+ * actual size of the machine description in the RET1 return value.
+ *
+ * Note: A method of determining the appropriate buffer size for the
+ *       machine description is to first call this service with a buffer
+ *       length of 0 bytes.
+ */
+#define HV_FAST_MACH_DESC		0x01
+
+/* mach_exit()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MACH_SIR
+ * ERRORS:	This service does not return.
+ *
+ * Perform a software initiated reset of the virtual machine domain.
+ * All CPUs are captured as soon as possible, all hardware devices are
+ * returned to the entry default state, and the domain is restarted at
+ * the SIR (trap type 0x04) real trap table (RTBA) entry point on one
+ * of the CPUs.  The single CPU restarted is selected as determined by
+ * platform specific policy.  Memory is preserved across this
+ * operation.
+ */
+#define HV_FAST_MACH_SIR		0x02
+
+/* mach_set_soft_state()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MACH_SET_SOFT_STATE
+ * ARG0:	software state
+ * ARG1:	software state description pointer
+ * RET0:	status
+ * ERRORS:	EINVAL		software state not valid or software state
+ *				description is not NULL terminated
+ *		ENORADDR	software state description pointer is not a
+ *				valid real address
+ *		EBADALIGNED	software state description is not correctly
+ *				aligned
+ *
+ * This allows the guest to report it's soft state to the hypervisor.  There
+ * are two primary components to this state.  The first part states whether
+ * the guest software is running or not.  The second containts optional
+ * details specific to the software.
+ *
+ * The software state argument is defined below in HV_SOFT_STATE_*, and
+ * indicates whether the guest is operating normally or in a transitional
+ * state.
+ *
+ * The software state description argument is a real address of a data buffer
+ * of size 32-bytes aligned on a 32-byte boundary.  It is treated as a NULL
+ * terminated 7-bit ASCII string of up to 31 characters not including the
+ * NULL termination.
+ */
+#define HV_FAST_MACH_SET_SOFT_STATE	0x03
+#define  HV_SOFT_STATE_NORMAL		 0x01
+#define  HV_SOFT_STATE_TRANSITION	 0x02
+
+/* mach_get_soft_state()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MACH_GET_SOFT_STATE
+ * ARG0:	software state description pointer
+ * RET0:	status
+ * RET1:	software state
+ * ERRORS:	ENORADDR	software state description pointer is not a
+ *				valid real address
+ *		EBADALIGNED	software state description is not correctly
+ *				aligned
+ *
+ * Retrieve the current value of the guest's software state.  The rules
+ * for the software state pointer are the same as for mach_set_soft_state()
+ * above.
+ */
+#define HV_FAST_MACH_GET_SOFT_STATE	0x04
+
+/* CPU services.
+ *
+ * CPUs represent devices that can execute software threads.  A single
+ * chip that contains multiple cores or strands is represented as
+ * multiple CPUs with unique CPU identifiers.  CPUs are exported to
+ * OBP via the machine description (and to the OS via the OBP device
+ * tree).  CPUs are always in one of three states: stopped, running,
+ * or error.
+ *
+ * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a
+ * CPU within a logical domain.  Operations that are to be performed
+ * on multiple CPUs specify them via a CPU list.  A CPU list is an
+ * array in real memory, of which each 16-bit word is a CPU ID.  CPU
+ * lists are passed through the API as two arguments.  The first is
+ * the number of entries (16-bit words) in the CPU list, and the
+ * second is the (real address) pointer to the CPU ID list.
+ */
+
+/* cpu_start()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_START
+ * ARG0:	CPU ID
+ * ARG1:	PC
+ * ARG1:	RTBA
+ * ARG1:	target ARG0
+ * RET0:	status
+ * ERRORS:	ENOCPU		Invalid CPU ID
+ *		EINVAL		Target CPU ID is not in the stopped state
+ *		ENORADDR	Invalid PC or RTBA real address
+ *		EBADALIGN	Unaligned PC or unaligned RTBA
+ *		EWOULDBLOCK	Starting resources are not available
+ *
+ * Start CPU with given CPU ID with PC in %pc and with a real trap
+ * base address value of RTBA.  The indicated CPU must be in the
+ * stopped state.  The supplied RTBA must be aligned on a 256 byte
+ * boundary.  On successful completion, the specified CPU will be in
+ * the running state and will be supplied with "target ARG0" in %o0
+ * and RTBA in %tba.
+ */
+#define HV_FAST_CPU_START		0x10
+
+/* cpu_stop()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_STOP
+ * ARG0:	CPU ID
+ * RET0:	status
+ * ERRORS:	ENOCPU		Invalid CPU ID
+ *		EINVAL		Target CPU ID is the current cpu
+ *		EINVAL		Target CPU ID is not in the running state
+ *		EWOULDBLOCK	Stopping resources are not available
+ *		ENOTSUPPORTED	Not supported on this platform
+ *
+ * The specified CPU is stopped.  The indicated CPU must be in the
+ * running state.  On completion, it will be in the stopped state.  It
+ * is not legal to stop the current CPU.
+ *
+ * Note: As this service cannot be used to stop the current cpu, this service
+ *       may not be used to stop the last running CPU in a domain.  To stop
+ *       and exit a running domain, a guest must use the mach_exit() service.
+ */
+#define HV_FAST_CPU_STOP		0x11
+
+/* cpu_yield()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_YIELD
+ * RET0:	status
+ * ERRORS:	No possible error.
+ *
+ * Suspend execution on the current CPU.  Execution will resume when
+ * an interrupt (device, %stick_compare, or cross-call) is targeted to
+ * the CPU.  On some CPUs, this API may be used by the hypervisor to
+ * save power by disabling hardware strands.
+ */
+#define HV_FAST_CPU_YIELD		0x12
+
+
+/* cpu_qconf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_QCONF
+ * ARG0:	queue
+ * ARG1:	base real address
+ * ARG2:	number of entries
+ * RET0:	status
+ * ERRORS:	ENORADDR	Invalid base real address
+ *		EINVAL		Invalid queue or number of entries is less
+ *				than 2 or too large.
+ *		EBADALIGN	Base real address is not correctly aligned
+ *				for size.
+ *
+ * Configure the given queue to be placed at the givem base real
+ * address, with the given number of entries.  The number of entries
+ * must be a power of 2.  The base real address must be aligned
+ * exactly to match the queue size.  Each queue entry is 64 bytes
+ * long, so for example a 32 entry queue must be aligned on a 2048
+ * byte real address boundary.
+ *
+ * The specified queue is unconfigured is number of entries is given as zero.
+ *
+ * For the current version of this API service, the argument queue is defined
+ * as follows:
+ *	queue		description
+ *	-----		-------------------------
+ *	0x3c		cpu mondo queue
+ *	0x3d		device mondo queue
+ *	0x3e		resumable error queue
+ *	0x3f		non-resumable error queue
+ *
+ * Note: The maximum number of entries for each queue for a specific cpu may
+ *       be determined from the machine description.
+ */
+#define HV_FAST_CPU_QCONF		0x14
+#define  HV_CPU_QUEUE_CPU_MONDO		 0x3c
+#define  HV_CPU_QUEUE_DEVICE_MONDO	 0x3d
+#define  HV_CPU_QUEUE_RES_ERROR		 0x3e
+#define  HV_CPU_QUEUE_NONRES_ERROR	 0x3f
+
+/* cpu_qinfo()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_QINFO
+ * ARG0:	queue
+ * RET0:	status
+ * RET1:	base real address
+ * RET1:	number of entries
+ * ERRORS:	EINVAL		Invalid queue
+ *
+ * Return the configuration info for the given queue.  The base real
+ * address and number of entries of the defined queue are returned.
+ * The queue argument values are the same as for cpu_qconf() above.
+ *
+ * If the specified queue is a valid queue number, but no queue has
+ * been defined, the number of entries will be set to zero and the
+ * base real address returned is undefined.
+ */
+#define HV_FAST_CPU_QINFO		0x15
+
+/* cpu_mondo_send()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_MONDO_SEND
+ * ARG0-1:	CPU list
+ * ARG2:	data real address
+ * RET0:	status
+ * ERRORS:	EBADALIGN	Mondo data is not 64-byte aligned or CPU list
+ *				is not 2-byte aligned.
+ *		ENORADDR	Invalid data mondo address, or invalid cpu list
+ *				address.
+ *		ENOCPU		Invalid cpu in CPU list
+ *		EWOULDBLOCK	Some or all of the listed CPUs did not receive
+ *				the mondo
+ *		EINVAL		CPU list includes caller's CPU ID
+ *
+ * Send a mondo interrupt to the CPUs in the given CPU list with the
+ * 64-bytes at the given data real address.  The data must be 64-byte
+ * aligned.  The mondo data will be delivered to the cpu_mondo queues
+ * of the recipient CPUs.
+ *
+ * In all cases, error or not, the CPUs in the CPU list to which the
+ * mondo has been successfully delivered will be indicated by having
+ * their entry in CPU list updated with the value 0xffff.
+ */
+#define HV_FAST_CPU_MONDO_SEND		0x42
+
+/* cpu_myid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_MYID
+ * RET0:	status
+ * RET1:	CPU ID
+ * ERRORS:	No errors defined.
+ *
+ * Return the hypervisor ID handle for the current CPU.  Use by a
+ * virtual CPU to discover it's own identity.
+ */
+#define HV_FAST_CPU_MYID		0x16
+
+/* cpu_state()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_STATE
+ * ARG0:	CPU ID
+ * RET0:	status
+ * RET1:	state
+ * ERRORS:	ENOCPU		Invalid CPU ID
+ *
+ * Retrieve the current state of the CPU with the given CPU ID.
+ */
+#define HV_FAST_CPU_STATE		0x17
+#define  HV_CPU_STATE_STOPPED		 0x01
+#define  HV_CPU_STATE_RUNNING		 0x02
+#define  HV_CPU_STATE_ERROR		 0x03
+
+/* cpu_set_rtba()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_SET_RTBA
+ * ARG0:	RTBA
+ * RET0:	status
+ * RET1:	previous RTBA
+ * ERRORS:	ENORADDR	Invalid RTBA real address
+ *		EBADALIGN	RTBA is incorrectly aligned for a trap table
+ *
+ * Set the real trap base address of the local cpu to the given RTBA.
+ * The supplied RTBA must be aligned on a 256 byte boundary.  Upon
+ * success the previous value of the RTBA is returned in RET1.
+ *
+ * Note: This service does not affect %tba
+ */
+#define HV_FAST_CPU_SET_RTBA		0x18
+
+/* cpu_set_rtba()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CPU_GET_RTBA
+ * RET0:	status
+ * RET1:	previous RTBA
+ * ERRORS:	No possible error.
+ *
+ * Returns the current value of RTBA in RET1.
+ */
+#define HV_FAST_CPU_GET_RTBA		0x19
+
+/* MMU services.
+ *
+ * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls.
+ */
+#ifndef __ASSEMBLY__
+struct hv_tsb_descr {
+	unsigned short		pgsz_idx;
+	unsigned short		assoc;
+	unsigned int		num_ttes;	/* in TTEs */
+	unsigned int		ctx_idx;
+	unsigned int		pgsz_mask;
+	unsigned long		tsb_base;
+	unsigned long		resv;
+};
+#endif
+#define HV_TSB_DESCR_PGSZ_IDX_OFFSET	0x00
+#define HV_TSB_DESCR_ASSOC_OFFSET	0x02
+#define HV_TSB_DESCR_NUM_TTES_OFFSET	0x04
+#define HV_TSB_DESCR_CTX_IDX_OFFSET	0x08
+#define HV_TSB_DESCR_PGSZ_MASK_OFFSET	0x0c
+#define HV_TSB_DESCR_TSB_BASE_OFFSET	0x10
+#define HV_TSB_DESCR_RESV_OFFSET	0x18
+
+/* Page size bitmask.  */
+#define HV_PGSZ_MASK_8K			(1 << 0)
+#define HV_PGSZ_MASK_64K		(1 << 1)
+#define HV_PGSZ_MASK_512K		(1 << 2)
+#define HV_PGSZ_MASK_4MB		(1 << 3)
+#define HV_PGSZ_MASK_32MB		(1 << 4)
+#define HV_PGSZ_MASK_256MB		(1 << 5)
+#define HV_PGSZ_MASK_2GB		(1 << 6)
+#define HV_PGSZ_MASK_16GB		(1 << 7)
+
+/* Page size index.  The value given in the TSB descriptor must correspond
+ * to the smallest page size specified in the pgsz_mask page size bitmask.
+ */
+#define HV_PGSZ_IDX_8K			0
+#define HV_PGSZ_IDX_64K			1
+#define HV_PGSZ_IDX_512K		2
+#define HV_PGSZ_IDX_4MB			3
+#define HV_PGSZ_IDX_32MB		4
+#define HV_PGSZ_IDX_256MB		5
+#define HV_PGSZ_IDX_2GB			6
+#define HV_PGSZ_IDX_16GB		7
+
+/* MMU fault status area.
+ *
+ * MMU related faults have their status and fault address information
+ * placed into a memory region made available by privileged code.  Each
+ * virtual processor must make a mmu_fault_area_conf() call to tell the
+ * hypervisor where that processor's fault status should be stored.
+ *
+ * The fault status block is a multiple of 64-bytes and must be aligned
+ * on a 64-byte boundary.
+ */
+#ifndef __ASSEMBLY__
+struct hv_fault_status {
+	unsigned long		i_fault_type;
+	unsigned long		i_fault_addr;
+	unsigned long		i_fault_ctx;
+	unsigned long		i_reserved[5];
+	unsigned long		d_fault_type;
+	unsigned long		d_fault_addr;
+	unsigned long		d_fault_ctx;
+	unsigned long		d_reserved[5];
+};
+#endif
+#define HV_FAULT_I_TYPE_OFFSET	0x00
+#define HV_FAULT_I_ADDR_OFFSET	0x08
+#define HV_FAULT_I_CTX_OFFSET	0x10
+#define HV_FAULT_D_TYPE_OFFSET	0x40
+#define HV_FAULT_D_ADDR_OFFSET	0x48
+#define HV_FAULT_D_CTX_OFFSET	0x50
+
+#define HV_FAULT_TYPE_FAST_MISS	1
+#define HV_FAULT_TYPE_FAST_PROT	2
+#define HV_FAULT_TYPE_MMU_MISS	3
+#define HV_FAULT_TYPE_INV_RA	4
+#define HV_FAULT_TYPE_PRIV_VIOL	5
+#define HV_FAULT_TYPE_PROT_VIOL	6
+#define HV_FAULT_TYPE_NFO	7
+#define HV_FAULT_TYPE_NFO_SEFF	8
+#define HV_FAULT_TYPE_INV_VA	9
+#define HV_FAULT_TYPE_INV_ASI	10
+#define HV_FAULT_TYPE_NC_ATOMIC	11
+#define HV_FAULT_TYPE_PRIV_ACT	12
+#define HV_FAULT_TYPE_RESV1	13
+#define HV_FAULT_TYPE_UNALIGNED	14
+#define HV_FAULT_TYPE_INV_PGSZ	15
+/* Values 16 --> -2 are reserved.  */
+#define HV_FAULT_TYPE_MULTIPLE	-1
+
+/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(),
+ * and mmu_{map,unmap}_perm_addr().
+ */
+#define HV_MMU_DMMU			0x01
+#define HV_MMU_IMMU			0x02
+#define HV_MMU_ALL			(HV_MMU_DMMU | HV_MMU_IMMU)
+
+/* mmu_map_addr()
+ * TRAP:	HV_MMU_MAP_ADDR_TRAP
+ * ARG0:	virtual address
+ * ARG1:	mmu context
+ * ARG2:	TTE
+ * ARG3:	flags (HV_MMU_{IMMU,DMMU})
+ * ERRORS:	EINVAL		Invalid virtual address, mmu context, or flags
+ *		EBADPGSZ	Invalid page size value
+ *		ENORADDR	Invalid real address in TTE
+ *
+ * Create a non-permanent mapping using the given TTE, virtual
+ * address, and mmu context.  The flags argument determines which
+ * (data, or instruction, or both) TLB the mapping gets loaded into.
+ *
+ * The behavior is undefined if the valid bit is clear in the TTE.
+ *
+ * Note: This API call is for privileged code to specify temporary translation
+ *       mappings without the need to create and manage a TSB.
+ */
+
+/* mmu_unmap_addr()
+ * TRAP:	HV_MMU_UNMAP_ADDR_TRAP
+ * ARG0:	virtual address
+ * ARG1:	mmu context
+ * ARG2:	flags (HV_MMU_{IMMU,DMMU})
+ * ERRORS:	EINVAL		Invalid virtual address, mmu context, or flags
+ *
+ * Demaps the given virtual address in the given mmu context on this
+ * CPU.  This function is intended to be used to demap pages mapped
+ * with mmu_map_addr.  This service is equivalent to invoking
+ * mmu_demap_page() with only the current CPU in the CPU list. The
+ * flags argument determines which (data, or instruction, or both) TLB
+ * the mapping gets unmapped from.
+ *
+ * Attempting to perform an unmap operation for a previously defined
+ * permanent mapping will have undefined results.
+ */
+
+/* mmu_tsb_ctx0()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_TSB_CTX0
+ * ARG0:	number of TSB descriptions
+ * ARG1:	TSB descriptions pointer
+ * RET0:	status
+ * ERRORS:	ENORADDR		Invalid TSB descriptions pointer or
+ *					TSB base within a descriptor
+ *		EBADALIGN		TSB descriptions pointer is not aligned
+ *					to an 8-byte boundary, or TSB base
+ *					within a descriptor is not aligned for
+ *					the given TSB size
+ *		EBADPGSZ		Invalid page size in a TSB descriptor
+ *		EBADTSB			Invalid associativity or size in a TSB
+ *					descriptor
+ *		EINVAL			Invalid number of TSB descriptions, or
+ *					invalid context index in a TSB
+ *					descriptor, or index page size not
+ *					equal to smallest page size in page
+ *					size bitmask field.
+ *
+ * Configures the TSBs for the current CPU for virtual addresses with
+ * context zero.  The TSB descriptions pointer is a pointer to an
+ * array of the given number of TSB descriptions.
+ *
+ * Note: The maximum number of TSBs available to a virtual CPU is given by the
+ *       mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the
+ *       machine description.
+ */
+#define HV_FAST_MMU_TSB_CTX0		0x20
+
+/* mmu_tsb_ctxnon0()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_TSB_CTXNON0
+ * ARG0:	number of TSB descriptions
+ * ARG1:	TSB descriptions pointer
+ * RET0:	status
+ * ERRORS:	Same as for mmu_tsb_ctx0() above.
+ *
+ * Configures the TSBs for the current CPU for virtual addresses with
+ * non-zero contexts.  The TSB descriptions pointer is a pointer to an
+ * array of the given number of TSB descriptions.
+ *
+ * Note: A maximum of 16 TSBs may be specified in the TSB description list.
+ */
+#define HV_FAST_MMU_TSB_CTXNON0		0x21
+
+/* mmu_demap_page()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_DEMAP_PAGE
+ * ARG0:	reserved, must be zero
+ * ARG1:	reserved, must be zero
+ * ARG2:	virtual address
+ * ARG3:	mmu context
+ * ARG4:	flags (HV_MMU_{IMMU,DMMU})
+ * RET0:	status
+ * ERRORS:	EINVAL			Invalid virutal address, context, or
+ *					flags value
+ *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
+ *
+ * Demaps any page mapping of the given virtual address in the given
+ * mmu context for the current virtual CPU.  Any virtually tagged
+ * caches are guaranteed to be kept consistent.  The flags argument
+ * determines which TLB (instruction, or data, or both) participate in
+ * the operation.
+ *
+ * ARG0 and ARG1 are both reserved and must be set to zero.
+ */
+#define HV_FAST_MMU_DEMAP_PAGE		0x22
+
+/* mmu_demap_ctx()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_DEMAP_CTX
+ * ARG0:	reserved, must be zero
+ * ARG1:	reserved, must be zero
+ * ARG2:	mmu context
+ * ARG3:	flags (HV_MMU_{IMMU,DMMU})
+ * RET0:	status
+ * ERRORS:	EINVAL			Invalid context or flags value
+ *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
+ *
+ * Demaps all non-permanent virtual page mappings previously specified
+ * for the given context for the current virtual CPU.  Any virtual
+ * tagged caches are guaranteed to be kept consistent.  The flags
+ * argument determines which TLB (instruction, or data, or both)
+ * participate in the operation.
+ *
+ * ARG0 and ARG1 are both reserved and must be set to zero.
+ */
+#define HV_FAST_MMU_DEMAP_CTX		0x23
+
+/* mmu_demap_all()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_DEMAP_ALL
+ * ARG0:	reserved, must be zero
+ * ARG1:	reserved, must be zero
+ * ARG2:	flags (HV_MMU_{IMMU,DMMU})
+ * RET0:	status
+ * ERRORS:	EINVAL			Invalid flags value
+ *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
+ *
+ * Demaps all non-permanent virtual page mappings previously specified
+ * for the current virtual CPU.  Any virtual tagged caches are
+ * guaranteed to be kept consistent.  The flags argument determines
+ * which TLB (instruction, or data, or both) participate in the
+ * operation.
+ *
+ * ARG0 and ARG1 are both reserved and must be set to zero.
+ */
+#define HV_FAST_MMU_DEMAP_ALL		0x24
+
+/* mmu_map_perm_addr()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_MAP_PERM_ADDR
+ * ARG0:	virtual address
+ * ARG1:	reserved, must be zero
+ * ARG2:	TTE
+ * ARG3:	flags (HV_MMU_{IMMU,DMMU})
+ * RET0:	status
+ * ERRORS:	EINVAL			Invalid virutal address or flags value
+ *		EBADPGSZ		Invalid page size value
+ *		ENORADDR		Invalid real address in TTE
+ *		ETOOMANY		Too many mappings (max of 8 reached)
+ *
+ * Create a permanent mapping using the given TTE and virtual address
+ * for context 0 on the calling virtual CPU.  A maximum of 8 such
+ * permanent mappings may be specified by privileged code.  Mappings
+ * may be removed with mmu_unmap_perm_addr().
+ *
+ * The behavior is undefined if a TTE with the valid bit clear is given.
+ *
+ * Note: This call is used to specify address space mappings for which
+ *       privileged code does not expect to receive misses.  For example,
+ *       this mechanism can be used to map kernel nucleus code and data.
+ */
+#define HV_FAST_MMU_MAP_PERM_ADDR	0x25
+
+/* mmu_fault_area_conf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_FAULT_AREA_CONF
+ * ARG0:	real address
+ * RET0:	status
+ * RET1:	previous mmu fault area real address
+ * ERRORS:	ENORADDR		Invalid real address
+ *		EBADALIGN		Invalid alignment for fault area
+ *
+ * Configure the MMU fault status area for the calling CPU.  A 64-byte
+ * aligned real address specifies where MMU fault status information
+ * is placed.  The return value is the previously specified area, or 0
+ * for the first invocation.  Specifying a fault area at real address
+ * 0 is not allowed.
+ */
+#define HV_FAST_MMU_FAULT_AREA_CONF	0x26
+
+/* mmu_enable()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_ENABLE
+ * ARG0:	enable flag
+ * ARG1:	return target address
+ * RET0:	status
+ * ERRORS:	ENORADDR		Invalid real address when disabling
+ *					translation.
+ *		EBADALIGN		The return target address is not
+ *					aligned to an instruction.
+ *		EINVAL			The enable flag request the current
+ *					operating mode (e.g. disable if already
+ *					disabled)
+ *
+ * Enable or disable virtual address translation for the calling CPU
+ * within the virtual machine domain.  If the enable flag is zero,
+ * translation is disabled, any non-zero value will enable
+ * translation.
+ *
+ * When this function returns, the newly selected translation mode
+ * will be active.  If the mmu is being enabled, then the return
+ * target address is a virtual address else it is a real address.
+ *
+ * Upon successful completion, control will be returned to the given
+ * return target address (ie. the cpu will jump to that address).  On
+ * failure, the previous mmu mode remains and the trap simply returns
+ * as normal with the appropriate error code in RET0.
+ */
+#define HV_FAST_MMU_ENABLE		0x27
+
+/* mmu_unmap_perm_addr()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_UNMAP_PERM_ADDR
+ * ARG0:	virtual address
+ * ARG1:	reserved, must be zero
+ * ARG2:	flags (HV_MMU_{IMMU,DMMU})
+ * RET0:	status
+ * ERRORS:	EINVAL			Invalid virutal address or flags value
+ *		ENOMAP			Specified mapping was not found
+ *
+ * Demaps any permanent page mapping (established via
+ * mmu_map_perm_addr()) at the given virtual address for context 0 on
+ * the current virtual CPU.  Any virtual tagged caches are guaranteed
+ * to be kept consistent.
+ */
+#define HV_FAST_MMU_UNMAP_PERM_ADDR	0x28
+
+/* mmu_tsb_ctx0_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_TSB_CTX0_INFO
+ * ARG0:	max TSBs
+ * ARG1:	buffer pointer
+ * RET0:	status
+ * RET1:	number of TSBs
+ * ERRORS:	EINVAL			Supplied buffer is too small
+ *		EBADALIGN		The buffer pointer is badly aligned
+ *		ENORADDR		Invalid real address for buffer pointer
+ *
+ * Return the TSB configuration as previous defined by mmu_tsb_ctx0()
+ * into the provided buffer.  The size of the buffer is given in ARG1
+ * in terms of the number of TSB description entries.
+ *
+ * Upon return, RET1 always contains the number of TSB descriptions
+ * previously configured.  If zero TSBs were configured, EOK is
+ * returned with RET1 containing 0.
+ */
+#define HV_FAST_MMU_TSB_CTX0_INFO	0x29
+
+/* mmu_tsb_ctxnon0_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_TSB_CTXNON0_INFO
+ * ARG0:	max TSBs
+ * ARG1:	buffer pointer
+ * RET0:	status
+ * RET1:	number of TSBs
+ * ERRORS:	EINVAL			Supplied buffer is too small
+ *		EBADALIGN		The buffer pointer is badly aligned
+ *		ENORADDR		Invalid real address for buffer pointer
+ *
+ * Return the TSB configuration as previous defined by
+ * mmu_tsb_ctxnon0() into the provided buffer.  The size of the buffer
+ * is given in ARG1 in terms of the number of TSB description entries.
+ *
+ * Upon return, RET1 always contains the number of TSB descriptions
+ * previously configured.  If zero TSBs were configured, EOK is
+ * returned with RET1 containing 0.
+ */
+#define HV_FAST_MMU_TSB_CTXNON0_INFO	0x2a
+
+/* mmu_fault_area_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMU_FAULT_AREA_INFO
+ * RET0:	status
+ * RET1:	fault area real address
+ * ERRORS:	No errors defined.
+ *
+ * Return the currently defined MMU fault status area for the current
+ * CPU.  The real address of the fault status area is returned in
+ * RET1, or 0 is returned in RET1 if no fault status area is defined.
+ *
+ * Note: mmu_fault_area_conf() may be called with the return value (RET1)
+ *       from this service if there is a need to save and restore the fault
+ *	 area for a cpu.
+ */
+#define HV_FAST_MMU_FAULT_AREA_INFO	0x2b
+
+/* Cache and Memory services. */
+
+/* mem_scrub()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MEM_SCRUB
+ * ARG0:	real address
+ * ARG1:	length
+ * RET0:	status
+ * RET1:	length scrubbed
+ * ERRORS:	ENORADDR	Invalid real address
+ *		EBADALIGN	Start address or length are not correctly
+ *				aligned
+ *		EINVAL		Length is zero
+ *
+ * Zero the memory contents in the range real address to real address
+ * plus length minus 1.  Also, valid ECC will be generated for that
+ * memory address range.  Scrubbing is started at the given real
+ * address, but may not scrub the entire given length.  The actual
+ * length scrubbed will be returned in RET1.
+ *
+ * The real address and length must be aligned on an 8K boundary, or
+ * contain the start address and length from a sun4v error report.
+ *
+ * Note: There are two uses for this function.  The first use is to block clear
+ *       and initialize memory and the second is to scrub an u ncorrectable
+ *       error reported via a resumable or non-resumable trap.  The second
+ *       use requires the arguments to be equal to the real address and length
+ *       provided in a sun4v memory error report.
+ */
+#define HV_FAST_MEM_SCRUB		0x31
+
+/* mem_sync()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MEM_SYNC
+ * ARG0:	real address
+ * ARG1:	length
+ * RET0:	status
+ * RET1:	length synced
+ * ERRORS:	ENORADDR	Invalid real address
+ *		EBADALIGN	Start address or length are not correctly
+ *				aligned
+ *		EINVAL		Length is zero
+ *
+ * Force the next access within the real address to real address plus
+ * length minus 1 to be fetches from main system memory.  Less than
+ * the given length may be synced, the actual amount synced is
+ * returned in RET1.  The real address and length must be aligned on
+ * an 8K boundary.
+ */
+#define HV_FAST_MEM_SYNC		0x32
+
+/* Time of day services.
+ *
+ * The hypervisor maintains the time of day on a per-domain basis.
+ * Changing the time of day in one domain does not affect the time of
+ * day on any other domain.
+ *
+ * Time is described by a single unsigned 64-bit word which is the
+ * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1,
+ * 1970).
+ */
+
+/* tod_get()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_TOD_GET
+ * RET0:	status
+ * RET1:	TOD
+ * ERRORS:	EWOULDBLOCK	TOD resource is temporarily unavailable
+ *		ENOTSUPPORTED	If TOD not supported on this platform
+ *
+ * Return the current time of day.  May block if TOD access is
+ * temporarily not possible.
+ */
+#define HV_FAST_TOD_GET			0x50
+
+/* tod_set()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_TOD_SET
+ * ARG0:	TOD
+ * RET0:	status
+ * ERRORS:	EWOULDBLOCK	TOD resource is temporarily unavailable
+ *		ENOTSUPPORTED	If TOD not supported on this platform
+ *
+ * The current time of day is set to the value specified in ARG0.  May
+ * block if TOD access is temporarily not possible.
+ */
+#define HV_FAST_TOD_SET			0x51
+
+/* Console services */
+
+/* con_getchar()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CONS_GETCHAR
+ * RET0:	status
+ * RET1:	character
+ * ERRORS:	EWOULDBLOCK	No character available.
+ *
+ * Returns a character from the console device.  If no character is
+ * available then an EWOULDBLOCK error is returned.  If a character is
+ * available, then the returned status is EOK and the character value
+ * is in RET1.
+ *
+ * A virtual BREAK is represented by the 64-bit value -1.
+ *
+ * A virtual HUP signal is represented by the 64-bit value -2.
+ */
+#define HV_FAST_CONS_GETCHAR		0x60
+
+/* con_putchar()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_CONS_PUTCHAR
+ * ARG0:	character
+ * RET0:	status
+ * ERRORS:	EINVAL		Illegal character
+ *		EWOULDBLOCK	Output buffer currentl full, would block
+ *
+ * Send a character to the console device.  Only character values
+ * between 0 and 255 may be used.  Values outside this range are
+ * invalid except for the 64-bit value -1 which is used to send a
+ * virtual BREAK.
+ */
+#define HV_FAST_CONS_PUTCHAR		0x61
+
+/* Trap trace services.
+ *
+ * The hypervisor provides a trap tracing capability for privileged
+ * code running on each virtual CPU.  Privileged code provides a
+ * round-robin trap trace queue within which the hypervisor writes
+ * 64-byte entries detailing hyperprivileged traps taken n behalf of
+ * privileged code.  This is provided as a debugging capability for
+ * privileged code.
+ *
+ * The trap trace control structure is 64-bytes long and placed at the
+ * start (offset 0) of the trap trace buffer, and is described as
+ * follows:
+ */
+#ifndef __ASSEMBLY__
+struct hv_trap_trace_control {
+	unsigned long		head_offset;
+	unsigned long		tail_offset;
+	unsigned long		__reserved[0x30 / sizeof(unsigned long)];
+};
+#endif
+#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET	0x00
+#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET	0x08
+
+/* The head offset is the offset of the most recently completed entry
+ * in the trap-trace buffer.  The tail offset is the offset of the
+ * next entry to be written.  The control structure is owned and
+ * modified by the hypervisor.  A guest may not modify the control
+ * structure contents.  Attempts to do so will result in undefined
+ * behavior for the guest.
+ *
+ * Each trap trace buffer entry is layed out as follows:
+ */
+#ifndef __ASSEMBLY__
+struct hv_trap_trace_entry {
+	unsigned char	type;		/* Hypervisor or guest entry?	*/
+	unsigned char	hpstate;	/* Hyper-privileged state	*/
+	unsigned char	tl;		/* Trap level			*/
+	unsigned char	gl;		/* Global register level	*/
+	unsigned short	tt;		/* Trap type			*/
+	unsigned short	tag;		/* Extended trap identifier	*/
+	unsigned long	tstate;		/* Trap state			*/
+	unsigned long	tick;		/* Tick				*/
+	unsigned long	tpc;		/* Trap PC			*/
+	unsigned long	f1;		/* Entry specific		*/
+	unsigned long	f2;		/* Entry specific		*/
+	unsigned long	f3;		/* Entry specific		*/
+	unsigned long	f4;		/* Entry specific		*/
+};
+#endif
+#define HV_TRAP_TRACE_ENTRY_TYPE	0x00
+#define HV_TRAP_TRACE_ENTRY_HPSTATE	0x01
+#define HV_TRAP_TRACE_ENTRY_TL		0x02
+#define HV_TRAP_TRACE_ENTRY_GL		0x03
+#define HV_TRAP_TRACE_ENTRY_TT		0x04
+#define HV_TRAP_TRACE_ENTRY_TAG		0x06
+#define HV_TRAP_TRACE_ENTRY_TSTATE	0x08
+#define HV_TRAP_TRACE_ENTRY_TICK	0x10
+#define HV_TRAP_TRACE_ENTRY_TPC		0x18
+#define HV_TRAP_TRACE_ENTRY_F1		0x20
+#define HV_TRAP_TRACE_ENTRY_F2		0x28
+#define HV_TRAP_TRACE_ENTRY_F3		0x30
+#define HV_TRAP_TRACE_ENTRY_F4		0x38
+
+/* The type field is encoded as follows.  */
+#define HV_TRAP_TYPE_UNDEF		0x00 /* Entry content undefined     */
+#define HV_TRAP_TYPE_HV			0x01 /* Hypervisor trap entry       */
+#define HV_TRAP_TYPE_GUEST		0xff /* Added via ttrace_addentry() */
+
+/* ttrace_buf_conf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_TTRACE_BUF_CONF
+ * ARG0:	real address
+ * ARG1:	number of entries
+ * RET0:	status
+ * RET1:	number of entries
+ * ERRORS:	ENORADDR	Invalid real address
+ *		EINVAL		Size is too small
+ *		EBADALIGN	Real address not aligned on 64-byte boundary
+ *
+ * Requests hypervisor trap tracing and declares a virtual CPU's trap
+ * trace buffer to the hypervisor.  The real address supplies the real
+ * base address of the trap trace queue and must be 64-byte aligned.
+ * Specifying a value of 0 for the number of entries disables trap
+ * tracing for the calling virtual CPU.  The buffer allocated must be
+ * sized for a power of two number of 64-byte trap trace entries plus
+ * an initial 64-byte control structure.
+ * 
+ * This may be invoked any number of times so that a virtual CPU may
+ * relocate a trap trace buffer or create "snapshots" of information.
+ *
+ * If the real address is illegal or badly aligned, then trap tracing
+ * is disabled and an error is returned.
+ *
+ * Upon failure with EINVAL, this service call returns in RET1 the
+ * minimum number of buffer entries required.  Upon other failures
+ * RET1 is undefined.
+ */
+#define HV_FAST_TTRACE_BUF_CONF		0x90
+
+/* ttrace_buf_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_TTRACE_BUF_INFO
+ * RET0:	status
+ * RET1:	real address
+ * RET2:	size
+ * ERRORS:	None defined.
+ *
+ * Returns the size and location of the previously declared trap-trace
+ * buffer.  In the event that no buffer was previously defined, or the
+ * buffer is disabled, this call will return a size of zero bytes.
+ */
+#define HV_FAST_TTRACE_BUF_INFO		0x91
+
+/* ttrace_enable()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_TTRACE_ENABLE
+ * ARG0:	enable
+ * RET0:	status
+ * RET1:	previous enable state
+ * ERRORS:	EINVAL		No trap trace buffer currently defined
+ *
+ * Enable or disable trap tracing, and return the previous enabled
+ * state in RET1.  Future systems may define various flags for the
+ * enable argument (ARG0), for the moment a guest should pass
+ * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all
+ * tracing - which will ensure future compatability.
+ */
+#define HV_FAST_TTRACE_ENABLE		0x92
+
+/* ttrace_freeze()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_TTRACE_FREEZE
+ * ARG0:	freeze
+ * RET0:	status
+ * RET1:	previous freeze state
+ * ERRORS:	EINVAL		No trap trace buffer currently defined
+ *
+ * Freeze or unfreeze trap tracing, returning the previous freeze
+ * state in RET1.  A guest should pass a non-zero value to freeze and
+ * a zero value to unfreeze all tracing.  The returned previous state
+ * is 0 for not frozen and 1 for frozen.
+ */
+#define HV_FAST_TTRACE_FREEZE		0x93
+
+/* ttrace_addentry()
+ * TRAP:	HV_TTRACE_ADDENTRY_TRAP
+ * ARG0:	tag (16-bits)
+ * ARG1:	data word 0
+ * ARG2:	data word 1
+ * ARG3:	data word 2
+ * ARG4:	data word 3
+ * RET0:	status
+ * ERRORS:	EINVAL		No trap trace buffer currently defined
+ *
+ * Add an entry to the trap trace buffer.  Upon return only ARG0/RET0
+ * is modified - none of the other registers holding arguments are
+ * volatile across this hypervisor service.
+ */
+
+/* Core dump services.
+ *
+ * Since the hypervisor viraulizes and thus obscures a lot of the
+ * physical machine layout and state, traditional OS crash dumps can
+ * be difficult to diagnose especially when the problem is a
+ * configuration error of some sort.
+ *
+ * The dump services provide an opaque buffer into which the
+ * hypervisor can place it's internal state in order to assist in
+ * debugging such situations.  The contents are opaque and extremely
+ * platform and hypervisor implementation specific.  The guest, during
+ * a core dump, requests that the hypervisor update any information in
+ * the dump buffer in preparation to being dumped as part of the
+ * domain's memory image.
+ */
+
+/* dump_buf_update()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_DUMP_BUF_UPDATE
+ * ARG0:	real address
+ * ARG1:	size
+ * RET0:	status
+ * RET1:	required size of dump buffer
+ * ERRORS:	ENORADDR	Invalid real address
+ *		EBADALIGN	Real address is not aligned on a 64-byte
+ *				boundary
+ *		EINVAL		Size is non-zero but less than minimum size
+ *				required
+ *		ENOTSUPPORTED	Operation not supported on current logical
+ *				domain
+ *
+ * Declare a domain dump buffer to the hypervisor.  The real address
+ * provided for the domain dump buffer must be 64-byte aligned.  The
+ * size specifies the size of the dump buffer and may be larger than
+ * the minimum size specified in the machine description.  The
+ * hypervisor will fill the dump buffer with opaque data.
+ *
+ * Note: A guest may elect to include dump buffer contents as part of a crash
+ *       dump to assist with debugging.  This function may be called any number
+ *       of times so that a guest may relocate a dump buffer, or create
+ *       "snapshots" of any dump-buffer information.  Each call to
+ *       dump_buf_update() atomically declares the new dump buffer to the
+ *       hypervisor.
+ *
+ * A specified size of 0 unconfigures the dump buffer.  If the real
+ * address is illegal or badly aligned, then any currently active dump
+ * buffer is disabled and an error is returned.
+ *
+ * In the event that the call fails with EINVAL, RET1 contains the
+ * minimum size requires by the hypervisor for a valid dump buffer.
+ */
+#define HV_FAST_DUMP_BUF_UPDATE		0x94
+
+/* dump_buf_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_DUMP_BUF_INFO
+ * RET0:	status
+ * RET1:	real address of current dump buffer
+ * RET2:	size of current dump buffer
+ * ERRORS:	No errors defined.
+ *
+ * Return the currently configures dump buffer description.  A
+ * returned size of 0 bytes indicates an undefined dump buffer.  In
+ * this case the return address in RET1 is undefined.
+ */
+#define HV_FAST_DUMP_BUF_INFO		0x95
+
+/* Device interrupt services.
+ *
+ * Device interrupts are allocated to system bus bridges by the hypervisor,
+ * and described to OBP in the machine description.  OBP then describes
+ * these interrupts to the OS via properties in the device tree.
+ *
+ * Terminology:
+ *
+ *	cpuid		Unique opaque value which represents a target cpu.
+ *
+ *	devhandle	Device handle.  It uniquely identifies a device, and
+ *			consistes of the lower 28-bits of the hi-cell of the
+ *			first entry of the device's "reg" property in the
+ *			OBP device tree.
+ *
+ *	devino		Device interrupt number.  Specifies the relative
+ *			interrupt number within the device.  The unique
+ *			combination of devhandle and devino are used to
+ *			identify a specific device interrupt.
+ *
+ *			Note: The devino value is the same as the values in the
+ *			      "interrupts" property or "interrupt-map" property
+ *			      in the OBP device tree for that device.
+ *
+ *	sysino		System interrupt number.  A 64-bit unsigned interger
+ *			representing a unique interrupt within a virtual
+ *			machine.
+ *
+ *	intr_state	A flag representing the interrupt state for a given
+ *			sysino.  The state values are defined below.
+ *
+ *	intr_enabled	A flag representing the 'enabled' state for a given
+ *			sysino.  The enable values are defined below.
+ */
+
+#define HV_INTR_STATE_IDLE		0 /* Nothing pending */
+#define HV_INTR_STATE_RECEIVED		1 /* Interrupt received by hardware */
+#define HV_INTR_STATE_DELIVERED		2 /* Interrupt delivered to queue */
+
+#define HV_INTR_DISABLED		0 /* sysino not enabled */
+#define HV_INTR_ENABLED			1 /* sysino enabled */
+
+/* intr_devino_to_sysino()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_INTR_DEVINO2SYSINO
+ * ARG0:	devhandle
+ * ARG1:	devino
+ * RET0:	status
+ * RET1:	sysino
+ * ERRORS:	EINVAL		Invalid devhandle/devino
+ *
+ * Converts a device specific interrupt number of the given
+ * devhandle/devino into a system specific ino (sysino).
+ */
+#define HV_FAST_INTR_DEVINO2SYSINO	0xa0
+
+/* intr_getenabled()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_INTR_GETENABLED
+ * ARG0:	sysino
+ * RET0:	status
+ * RET1:	intr_enabled (HV_INTR_{DISABLED,ENABLED})
+ * ERRORS:	EINVAL		Invalid sysino
+ *
+ * Returns interrupt enabled state in RET1 for the interrupt defined
+ * by the given sysino.
+ */
+#define HV_FAST_INTR_GETENABLED		0xa1
+
+/* intr_setenabled()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_INTR_SETENABLED
+ * ARG0:	sysino
+ * ARG1:	intr_enabled (HV_INTR_{DISABLED,ENABLED})
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid sysino or intr_enabled value
+ *
+ * Set the 'enabled' state of the interrupt sysino.
+ */
+#define HV_FAST_INTR_SETENABLED		0xa2
+
+/* intr_getstate()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_INTR_GETSTATE
+ * ARG0:	sysino
+ * RET0:	status
+ * RET1:	intr_state (HV_INTR_STATE_*)
+ * ERRORS:	EINVAL		Invalid sysino
+ *
+ * Returns current state of the interrupt defined by the given sysino.
+ */
+#define HV_FAST_INTR_GETSTATE		0xa3
+
+/* intr_setstate()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_INTR_SETSTATE
+ * ARG0:	sysino
+ * ARG1:	intr_state (HV_INTR_STATE_*)
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid sysino or intr_state value
+ *
+ * Sets the current state of the interrupt described by the given sysino
+ * value.
+ *
+ * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending
+ *       interrupt for sysino.
+ */
+#define HV_FAST_INTR_SETSTATE		0xa4
+
+/* intr_gettarget()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_INTR_GETTARGET
+ * ARG0:	sysino
+ * RET0:	status
+ * RET1:	cpuid
+ * ERRORS:	EINVAL		Invalid sysino
+ *
+ * Returns CPU that is the current target of the interrupt defined by
+ * the given sysino.  The CPU value returned is undefined if the target
+ * has not been set via intr_settarget().
+ */
+#define HV_FAST_INTR_GETTARGET		0xa5
+
+/* intr_settarget()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_INTR_SETTARGET
+ * ARG0:	sysino
+ * ARG1:	cpuid
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid sysino
+ *		ENOCPU		Invalid cpuid
+ *
+ * Set the target CPU for the interrupt defined by the given sysino.
+ */
+#define HV_FAST_INTR_SETTARGET		0xa6
+
+/* PCI IO services.
+ *
+ * See the terminology descriptions in the device interrupt services
+ * section above as those apply here too.  Here are terminology
+ * definitions specific to these PCI IO services:
+ *
+ *	tsbnum		TSB number.  Indentifies which io-tsb is used.
+ *			For this version of the specification, tsbnum
+ *			must be zero.
+ *
+ *	tsbindex	TSB index.  Identifies which entry in the TSB
+ *			is used.  The first entry is zero.
+ *
+ *	tsbid		A 64-bit aligned data structure which contains
+ *			a tsbnum and a tsbindex.  Bits 63:32 contain the
+ *			tsbnum and bits 31:00 contain the tsbindex.
+ *
+ *	io_attributes	IO attributes for IOMMU mappings.  One of more
+ *			of the attritbute bits are stores in a 64-bit
+ *			value.  The values are defined below.
+ *
+ *	r_addr		64-bit real address
+ *
+ *	pci_device	PCI device address.  A PCI device address identifies
+ *			a specific device on a specific PCI bus segment.
+ *			A PCI device address ia a 32-bit unsigned integer
+ *			with the following format:
+ *
+ *				00000000.bbbbbbbb.dddddfff.00000000
+ *
+ *			Use the HV_PCI_DEVICE_BUILD() macro to construct
+ *			such values.
+ *
+ *	pci_config_offset
+ *			PCI configureation space offset.  For conventional
+ *			PCI a value between 0 and 255.  For extended
+ *			configuration space, a value between 0 and 4095.
+ *
+ *			Note: For PCI configuration space accesses, the offset
+ *			      must be aligned to the access size.
+ *
+ *	error_flag	A return value which specifies if the action succeeded
+ *			or failed.  0 means no error, non-0 means some error
+ *			occurred while performing the service.
+ *
+ *	io_sync_direction
+ *			Direction definition for pci_dma_sync(), defined
+ *			below in HV_PCI_SYNC_*.
+ *
+ *	io_page_list	A list of io_page_addresses, an io_page_address is
+ *			a real address.
+ *
+ *	io_page_list_p	A pointer to an io_page_list.
+ *
+ *	"size based byte swap" - Some functions do size based byte swapping
+ *				 which allows sw to access pointers and
+ *				 counters in native form when the processor
+ *				 operates in a different endianness than the
+ *				 IO bus.  Size-based byte swapping converts a
+ *				 multi-byte field between big-endian and
+ *				 little-endian format.
+ */
+
+#define HV_PCI_MAP_ATTR_READ		0x01
+#define HV_PCI_MAP_ATTR_WRITE		0x02
+
+#define HV_PCI_DEVICE_BUILD(b,d,f)	\
+	((((b) & 0xff) << 16) | \
+	 (((d) & 0x1f) << 11) | \
+	 (((f) & 0x07) <<  8))
+
+#define HV_PCI_SYNC_FOR_DEVICE		0x01
+#define HV_PCI_SYNC_FOR_CPU		0x02
+
+/* pci_iommu_map()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOMMU_MAP
+ * ARG0:	devhandle
+ * ARG1:	tsbid
+ * ARG2:	#ttes
+ * ARG3:	io_attributes
+ * ARG4:	io_page_list_p
+ * RET0:	status
+ * RET1:	#ttes mapped
+ * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex/io_attributes
+ *		EBADALIGN	Improperly aligned real address
+ *		ENORADDR	Invalid real address
+ *
+ * Create IOMMU mappings in the sun4v device defined by the given
+ * devhandle.  The mappings are created in the TSB defined by the
+ * tsbnum component of the given tsbid.  The first mapping is created
+ * in the TSB i ndex defined by the tsbindex component of the given tsbid.
+ * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex,
+ * the second at tsbnum, tsbindex + 1, etc.
+ *
+ * All mappings are created with the attributes defined by the io_attributes
+ * argument.  The page mapping addresses are described in the io_page_list
+ * defined by the given io_page_list_p, which is a pointer to the io_page_list.
+ * The first entry in the io_page_list is the address for the first iotte, the
+ * 2nd for the 2nd iotte, and so on.
+ *
+ * Each io_page_address in the io_page_list must be appropriately aligned.
+ * #ttes must be greater than zero.  For this version of the spec, the tsbnum
+ * component of the given tsbid must be zero.
+ *
+ * Returns the actual number of mappings creates, which may be less than
+ * or equal to the argument #ttes.  If the function returns a value which
+ * is less than the #ttes, the caller may continus to call the function with
+ * an updated tsbid, #ttes, io_page_list_p arguments until all pages are
+ * mapped.
+ *
+ * Note: This function does not imply an iotte cache flush.  The guest must
+ *       demap an entry before re-mapping it.
+ */
+#define HV_FAST_PCI_IOMMU_MAP		0xb0
+
+/* pci_iommu_demap()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOMMU_DEMAP
+ * ARG0:	devhandle
+ * ARG1:	tsbid
+ * ARG2:	#ttes
+ * RET0:	status
+ * RET1:	#ttes demapped
+ * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex
+ *
+ * Demap and flush IOMMU mappings in the device defined by the given
+ * devhandle.  Demaps up to #ttes entries in the TSB defined by the tsbnum
+ * component of the given tsbid, starting at the TSB index defined by the
+ * tsbindex component of the given tsbid.
+ *
+ * For this version of the spec, the tsbnum of the given tsbid must be zero.
+ * #ttes must be greater than zero.
+ *
+ * Returns the actual number of ttes demapped, which may be less than or equal
+ * to the argument #ttes.  If #ttes demapped is less than #ttes, the caller
+ * may continue to call this function with updated tsbid and #ttes arguments
+ * until all pages are demapped.
+ *
+ * Note: Entries do not have to be mapped to be demapped.  A demap of an
+ *       unmapped page will flush the entry from the tte cache.
+ */
+#define HV_FAST_PCI_IOMMU_DEMAP		0xb1
+
+/* pci_iommu_getmap()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOMMU_GETMAP
+ * ARG0:	devhandle
+ * ARG1:	tsbid
+ * RET0:	status
+ * RET1:	io_attributes
+ * RET2:	real address
+ * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex
+ *		ENOMAP		Mapping is not valid, no translation exists
+ *
+ * Read and return the mapping in the device described by the given devhandle
+ * and tsbid.  If successful, the io_attributes shall be returned in RET1
+ * and the page address of the mapping shall be returned in RET2.
+ *
+ * For this version of the spec, the tsbnum component of the given tsbid
+ * must be zero.
+ */
+#define HV_FAST_PCI_IOMMU_GETMAP	0xb2
+
+/* pci_iommu_getbypass()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOMMU_GETBYPASS
+ * ARG0:	devhandle
+ * ARG1:	real address
+ * ARG2:	io_attributes
+ * RET0:	status
+ * RET1:	io_addr
+ * ERRORS:	EINVAL		Invalid devhandle/io_attributes
+ *		ENORADDR	Invalid real address
+ *		ENOTSUPPORTED	Function not supported in this implementation.
+ *
+ * Create a "special" mapping in the device described by the given devhandle,
+ * for the given real address and attributes.  Return the IO address in RET1
+ * if successful.
+ */
+#define HV_FAST_PCI_IOMMU_GETBYPASS	0xb3
+
+/* pci_config_get()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_CONFIG_GET
+ * ARG0:	devhandle
+ * ARG1:	pci_device
+ * ARG2:	pci_config_offset
+ * ARG3:	size
+ * RET0:	status
+ * RET1:	error_flag
+ * RET2:	data
+ * ERRORS:	EINVAL		Invalid devhandle/pci_device/offset/size
+ *		EBADALIGN	pci_config_offset not size aligned
+ *		ENOACCESS	Access to this offset is not permitted
+ *
+ * Read PCI configuration space for the adapter described by the given
+ * devhandle.  Read size (1, 2, or 4) bytes of data from the given
+ * pci_device, at pci_config_offset from the beginning of the device's
+ * configuration space.  If there was no error, RET1 is set to zero and
+ * RET2 is set to the data read.  Insignificant bits in RET2 are not
+ * guarenteed to have any specific value and therefore must be ignored.
+ *
+ * The data returned in RET2 is size based byte swapped.
+ *
+ * If an error occurs during the read, set RET1 to a non-zero value.  The
+ * given pci_config_offset must be 'size' aligned.
+ */
+#define HV_FAST_PCI_CONFIG_GET		0xb4
+
+/* pci_config_put()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_CONFIG_PUT
+ * ARG0:	devhandle
+ * ARG1:	pci_device
+ * ARG2:	pci_config_offset
+ * ARG3:	size
+ * ARG4:	data
+ * RET0:	status
+ * RET1:	error_flag
+ * ERRORS:	EINVAL		Invalid devhandle/pci_device/offset/size
+ *		EBADALIGN	pci_config_offset not size aligned
+ *		ENOACCESS	Access to this offset is not permitted
+ *
+ * Write PCI configuration space for the adapter described by the given
+ * devhandle.  Write size (1, 2, or 4) bytes of data in a single operation,
+ * at pci_config_offset from the beginning of the device's configuration
+ * space.  The data argument contains the data to be written to configuration
+ * space.  Prior to writing, the data is size based byte swapped.
+ *
+ * If an error occurs during the write access, do not generate an error
+ * report, do set RET1 to a non-zero value.  Otherwise RET1 is zero.
+ * The given pci_config_offset must be 'size' aligned.
+ *
+ * This function is permitted to read from offset zero in the configuration
+ * space described by the given pci_device if necessary to ensure that the
+ * write access to config space completes.
+ */
+#define HV_FAST_PCI_CONFIG_PUT		0xb5
+
+/* pci_peek()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_PEEK
+ * ARG0:	devhandle
+ * ARG1:	real address
+ * ARG2:	size
+ * RET0:	status
+ * RET1:	error_flag
+ * RET2:	data
+ * ERRORS:	EINVAL		Invalid devhandle or size
+ *		EBADALIGN	Improperly aligned real address
+ *		ENORADDR	Bad real address
+ *		ENOACCESS	Guest access prohibited
+ *
+ * Attempt to read the IO address given by the given devhandle, real address,
+ * and size.  Size must be 1, 2, 4, or 8.  The read is performed as a single
+ * access operation using the given size.  If an error occurs when reading
+ * from the given location, do not generate an error report, but return a
+ * non-zero value in RET1.  If the read was successful, return zero in RET1
+ * and return the actual data read in RET2.  The data returned is size based
+ * byte swapped.
+ *
+ * Non-significant bits in RET2 are not guarenteed to have any specific value
+ * and therefore must be ignored.  If RET1 is returned as non-zero, the data 
+ * value is not guarenteed to have any specific value and should be ignored.
+ *
+ * The caller must have permission to read from the given devhandle, real
+ * address, which must be an IO address.  The argument real address must be a
+ * size aligned address.
+ *
+ * The hypervisor implementation of this function must block access to any
+ * IO address that the guest does not have explicit permission to access.
+ */
+#define HV_FAST_PCI_PEEK		0xb6
+
+/* pci_poke()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_POKE
+ * ARG0:	devhandle
+ * ARG1:	real address
+ * ARG2:	size
+ * ARG3:	data
+ * ARG4:	pci_device
+ * RET0:	status
+ * RET1:	error_flag
+ * ERRORS:	EINVAL		Invalid devhandle, size, or pci_device
+ *		EBADALIGN	Improperly aligned real address
+ *		ENORADDR	Bad real address
+ *		ENOACCESS	Guest access prohibited
+ *		ENOTSUPPORTED	Function is not supported by implementation
+ *
+ * Attempt to write data to the IO address given by the given devhandle,
+ * real address, and size.  Size must be 1, 2, 4, or 8.  The write is
+ * performed as a single access operation using the given size. Prior to
+ * writing the data is size based swapped.
+ *
+ * If an error occurs when writing to the given location, do not generate an
+ * error report, but return a non-zero value in RET1.  If the write was
+ * successful, return zero in RET1.
+ *
+ * pci_device describes the configuration address of the device being
+ * written to.  The implementation may safely read from offset 0 with
+ * the configuration space of the device described by devhandle and
+ * pci_device in order to guarantee that the write portion of the operation
+ * completes
+ *
+ * Any error that occurs due to the read shall be reported using the normal
+ * error reporting mechanisms .. the read error is not suppressed.
+ *
+ * The caller must have permission to write to the given devhandle, real
+ * address, which must be an IO address.  The argument real address must be a
+ * size aligned address.  The caller must have permission to read from
+ * the given devhandle, pci_device cofiguration space offset 0.
+ *
+ * The hypervisor implementation of this function must block access to any
+ * IO address that the guest does not have explicit permission to access.
+ */
+#define HV_FAST_PCI_POKE		0xb7
+
+/* pci_dma_sync()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_DMA_SYNC
+ * ARG0:	devhandle
+ * ARG1:	real address
+ * ARG2:	size
+ * ARG3:	io_sync_direction
+ * RET0:	status
+ * RET1:	#synced
+ * ERRORS:	EINVAL		Invalid devhandle or io_sync_direction
+ *		ENORADDR	Bad real address
+ *
+ * Synchronize a memory region described by the given real address and size,
+ * for the device defined by the given devhandle using the direction(s)
+ * defined by the given io_sync_direction.  The argument size is the size of
+ * the memory region in bytes.
+ *
+ * Return the actual number of bytes synchronized in the return value #synced,
+ * which may be less than or equal to the argument size.  If the return
+ * value #synced is less than size, the caller must continue to call this
+ * function with updated real address and size arguments until the entire
+ * memory region is synchronized.
+ */
+#define HV_FAST_PCI_DMA_SYNC		0xb8
+
+/* PCI MSI services.  */
+
+#define HV_MSITYPE_MSI32		0x00
+#define HV_MSITYPE_MSI64		0x01
+
+#define HV_MSIQSTATE_IDLE		0x00
+#define HV_MSIQSTATE_ERROR		0x01
+
+#define HV_MSIQ_INVALID			0x00
+#define HV_MSIQ_VALID			0x01
+
+#define HV_MSISTATE_IDLE		0x00
+#define HV_MSISTATE_DELIVERED		0x01
+
+#define HV_MSIVALID_INVALID		0x00
+#define HV_MSIVALID_VALID		0x01
+
+#define HV_PCIE_MSGTYPE_PME_MSG		0x18
+#define HV_PCIE_MSGTYPE_PME_ACK_MSG	0x1b
+#define HV_PCIE_MSGTYPE_CORR_MSG	0x30
+#define HV_PCIE_MSGTYPE_NONFATAL_MSG	0x31
+#define HV_PCIE_MSGTYPE_FATAL_MSG	0x33
+
+#define HV_MSG_INVALID			0x00
+#define HV_MSG_VALID			0x01
+
+/* pci_msiq_conf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_CONF
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * ARG2:	real address
+ * ARG3:	number of entries
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle, msiqid or nentries
+ *		EBADALIGN	Improperly aligned real address
+ *		ENORADDR	Bad real address
+ *
+ * Configure the MSI queue given by the devhandle and msiqid arguments,
+ * and to be placed at the given real address and be of the given
+ * number of entries.  The real address must be aligned exactly to match
+ * the queue size.  Each queue entry is 64-bytes long, so f.e. a 32 entry
+ * queue must be aligned on a 2048 byte real address boundary.  The MSI-EQ
+ * Head and Tail are initialized so that the MSI-EQ is 'empty'.
+ *
+ * Implementation Note: Certain implementations have fixed sized queues.  In
+ *                      that case, number of entries must contain the correct
+ *                      value.
+ */
+#define HV_FAST_PCI_MSIQ_CONF		0xc0
+
+/* pci_msiq_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_INFO
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * RET0:	status
+ * RET1:	real address
+ * RET2:	number of entries
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid
+ *
+ * Return the configuration information for the MSI queue described
+ * by the given devhandle and msiqid.  The base address of the queue
+ * is returned in ARG1 and the number of entries is returned in ARG2.
+ * If the queue is unconfigured, the real address is undefined and the
+ * number of entries will be returned as zero.
+ */
+#define HV_FAST_PCI_MSIQ_INFO		0xc1
+
+/* pci_msiq_getvalid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_GETVALID
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * RET0:	status
+ * RET1:	msiqvalid	(HV_MSIQ_VALID or HV_MSIQ_INVALID)
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid
+ *
+ * Get the valid state of the MSI-EQ described by the given devhandle and
+ * msiqid.
+ */
+#define HV_FAST_PCI_MSIQ_GETVALID	0xc2
+
+/* pci_msiq_setvalid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_SETVALID
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * ARG2:	msiqvalid	(HV_MSIQ_VALID or HV_MSIQ_INVALID)
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqvalid
+ *				value or MSI EQ is uninitialized
+ *
+ * Set the valid state of the MSI-EQ described by the given devhandle and
+ * msiqid to the given msiqvalid.
+ */
+#define HV_FAST_PCI_MSIQ_SETVALID	0xc3
+
+/* pci_msiq_getstate()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_GETSTATE
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * RET0:	status
+ * RET1:	msiqstate	(HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid
+ *
+ * Get the state of the MSI-EQ described by the given devhandle and
+ * msiqid.
+ */
+#define HV_FAST_PCI_MSIQ_GETSTATE	0xc4
+
+/* pci_msiq_getvalid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_GETVALID
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * ARG2:	msiqstate	(HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqstate
+ *				value or MSI EQ is uninitialized
+ *
+ * Set the state of the MSI-EQ described by the given devhandle and
+ * msiqid to the given msiqvalid.
+ */
+#define HV_FAST_PCI_MSIQ_SETSTATE	0xc5
+
+/* pci_msiq_gethead()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_GETHEAD
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * RET0:	status
+ * RET1:	msiqhead
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid
+ *
+ * Get the current MSI EQ queue head for the MSI-EQ described by the
+ * given devhandle and msiqid.
+ */
+#define HV_FAST_PCI_MSIQ_GETHEAD	0xc6
+
+/* pci_msiq_sethead()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_SETHEAD
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * ARG2:	msiqhead
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqhead,
+ *				or MSI EQ is uninitialized
+ *
+ * Set the current MSI EQ queue head for the MSI-EQ described by the
+ * given devhandle and msiqid.
+ */
+#define HV_FAST_PCI_MSIQ_SETHEAD	0xc7
+
+/* pci_msiq_gettail()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSIQ_GETTAIL
+ * ARG0:	devhandle
+ * ARG1:	msiqid
+ * RET0:	status
+ * RET1:	msiqtail
+ * ERRORS:	EINVAL		Invalid devhandle or msiqid
+ *
+ * Get the current MSI EQ queue tail for the MSI-EQ described by the
+ * given devhandle and msiqid.
+ */
+#define HV_FAST_PCI_MSIQ_GETTAIL	0xc8
+
+/* pci_msi_getvalid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSI_GETVALID
+ * ARG0:	devhandle
+ * ARG1:	msinum
+ * RET0:	status
+ * RET1:	msivalidstate
+ * ERRORS:	EINVAL		Invalid devhandle or msinum
+ *
+ * Get the current valid/enabled state for the MSI defined by the
+ * given devhandle and msinum.
+ */
+#define HV_FAST_PCI_MSI_GETVALID	0xc9
+
+/* pci_msi_setvalid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSI_SETVALID
+ * ARG0:	devhandle
+ * ARG1:	msinum
+ * ARG2:	msivalidstate
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle or msinum or msivalidstate
+ *
+ * Set the current valid/enabled state for the MSI defined by the
+ * given devhandle and msinum.
+ */
+#define HV_FAST_PCI_MSI_SETVALID	0xca
+
+/* pci_msi_getmsiq()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSI_GETMSIQ
+ * ARG0:	devhandle
+ * ARG1:	msinum
+ * RET0:	status
+ * RET1:	msiqid
+ * ERRORS:	EINVAL		Invalid devhandle or msinum or MSI is unbound
+ *
+ * Get the MSI EQ that the MSI defined by the given devhandle and
+ * msinum is bound to.
+ */
+#define HV_FAST_PCI_MSI_GETMSIQ		0xcb
+
+/* pci_msi_setmsiq()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSI_SETMSIQ
+ * ARG0:	devhandle
+ * ARG1:	msinum
+ * ARG2:	msitype
+ * ARG3:	msiqid
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle or msinum or msiqid
+ *
+ * Set the MSI EQ that the MSI defined by the given devhandle and
+ * msinum is bound to.
+ */
+#define HV_FAST_PCI_MSI_SETMSIQ		0xcc
+
+/* pci_msi_getstate()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSI_GETSTATE
+ * ARG0:	devhandle
+ * ARG1:	msinum
+ * RET0:	status
+ * RET1:	msistate
+ * ERRORS:	EINVAL		Invalid devhandle or msinum
+ *
+ * Get the state of the MSI defined by the given devhandle and msinum.
+ * If not initialized, return HV_MSISTATE_IDLE.
+ */
+#define HV_FAST_PCI_MSI_GETSTATE	0xcd
+
+/* pci_msi_setstate()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSI_SETSTATE
+ * ARG0:	devhandle
+ * ARG1:	msinum
+ * ARG2:	msistate
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle or msinum or msistate
+ *
+ * Set the state of the MSI defined by the given devhandle and msinum.
+ */
+#define HV_FAST_PCI_MSI_SETSTATE	0xce
+
+/* pci_msg_getmsiq()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSG_GETMSIQ
+ * ARG0:	devhandle
+ * ARG1:	msgtype
+ * RET0:	status
+ * RET1:	msiqid
+ * ERRORS:	EINVAL		Invalid devhandle or msgtype
+ *
+ * Get the MSI EQ of the MSG defined by the given devhandle and msgtype.
+ */
+#define HV_FAST_PCI_MSG_GETMSIQ		0xd0
+
+/* pci_msg_setmsiq()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSG_SETMSIQ
+ * ARG0:	devhandle
+ * ARG1:	msgtype
+ * ARG2:	msiqid
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle, msgtype, or msiqid
+ *
+ * Set the MSI EQ of the MSG defined by the given devhandle and msgtype.
+ */
+#define HV_FAST_PCI_MSG_SETMSIQ		0xd1
+
+/* pci_msg_getvalid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSG_GETVALID
+ * ARG0:	devhandle
+ * ARG1:	msgtype
+ * RET0:	status
+ * RET1:	msgvalidstate
+ * ERRORS:	EINVAL		Invalid devhandle or msgtype
+ *
+ * Get the valid/enabled state of the MSG defined by the given
+ * devhandle and msgtype.
+ */
+#define HV_FAST_PCI_MSG_GETVALID	0xd2
+
+/* pci_msg_setvalid()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_MSG_SETVALID
+ * ARG0:	devhandle
+ * ARG1:	msgtype
+ * ARG2:	msgvalidstate
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle or msgtype or msgvalidstate
+ *
+ * Set the valid/enabled state of the MSG defined by the given
+ * devhandle and msgtype.
+ */
+#define HV_FAST_PCI_MSG_SETVALID	0xd3
+
+/* Performance counter services.  */
+
+#define HV_PERF_JBUS_PERF_CTRL_REG	0x00
+#define HV_PERF_JBUS_PERF_CNT_REG	0x01
+#define HV_PERF_DRAM_PERF_CTRL_REG_0	0x02
+#define HV_PERF_DRAM_PERF_CNT_REG_0	0x03
+#define HV_PERF_DRAM_PERF_CTRL_REG_1	0x04
+#define HV_PERF_DRAM_PERF_CNT_REG_1	0x05
+#define HV_PERF_DRAM_PERF_CTRL_REG_2	0x06
+#define HV_PERF_DRAM_PERF_CNT_REG_2	0x07
+#define HV_PERF_DRAM_PERF_CTRL_REG_3	0x08
+#define HV_PERF_DRAM_PERF_CNT_REG_3	0x09
+
+/* get_perfreg()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_GET_PERFREG
+ * ARG0:	performance reg number
+ * RET0:	status
+ * RET1:	performance reg value
+ * ERRORS:	EINVAL		Invalid performance register number
+ *		ENOACCESS	No access allowed to performance counters
+ *
+ * Read the value of the given DRAM/JBUS performance counter/control register.
+ */
+#define HV_FAST_GET_PERFREG		0x100
+
+/* set_perfreg()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_SET_PERFREG
+ * ARG0:	performance reg number
+ * ARG1:	performance reg value
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid performance register number
+ *		ENOACCESS	No access allowed to performance counters
+ *
+ * Write the given performance reg value to the given DRAM/JBUS
+ * performance counter/control register.
+ */
+#define HV_FAST_SET_PERFREG		0x101
+
+/* MMU statistics services.
+ *
+ * The hypervisor maintains MMU statistics and privileged code provides
+ * a buffer where these statistics can be collected.  It is continually
+ * updated once configured.  The layout is as follows:
+ */
+#ifndef __ASSEMBLY__
+struct hv_mmu_statistics {
+	unsigned long immu_tsb_hits_ctx0_8k_tte;
+	unsigned long immu_tsb_ticks_ctx0_8k_tte;
+	unsigned long immu_tsb_hits_ctx0_64k_tte;
+	unsigned long immu_tsb_ticks_ctx0_64k_tte;
+	unsigned long __reserved1[2];
+	unsigned long immu_tsb_hits_ctx0_4mb_tte;
+	unsigned long immu_tsb_ticks_ctx0_4mb_tte;
+	unsigned long __reserved2[2];
+	unsigned long immu_tsb_hits_ctx0_256mb_tte;
+	unsigned long immu_tsb_ticks_ctx0_256mb_tte;
+	unsigned long __reserved3[4];
+	unsigned long immu_tsb_hits_ctxnon0_8k_tte;
+	unsigned long immu_tsb_ticks_ctxnon0_8k_tte;
+	unsigned long immu_tsb_hits_ctxnon0_64k_tte;
+	unsigned long immu_tsb_ticks_ctxnon0_64k_tte;
+	unsigned long __reserved4[2];
+	unsigned long immu_tsb_hits_ctxnon0_4mb_tte;
+	unsigned long immu_tsb_ticks_ctxnon0_4mb_tte;
+	unsigned long __reserved5[2];
+	unsigned long immu_tsb_hits_ctxnon0_256mb_tte;
+	unsigned long immu_tsb_ticks_ctxnon0_256mb_tte;
+	unsigned long __reserved6[4];
+	unsigned long dmmu_tsb_hits_ctx0_8k_tte;
+	unsigned long dmmu_tsb_ticks_ctx0_8k_tte;
+	unsigned long dmmu_tsb_hits_ctx0_64k_tte;
+	unsigned long dmmu_tsb_ticks_ctx0_64k_tte;
+	unsigned long __reserved7[2];
+	unsigned long dmmu_tsb_hits_ctx0_4mb_tte;
+	unsigned long dmmu_tsb_ticks_ctx0_4mb_tte;
+	unsigned long __reserved8[2];
+	unsigned long dmmu_tsb_hits_ctx0_256mb_tte;
+	unsigned long dmmu_tsb_ticks_ctx0_256mb_tte;
+	unsigned long __reserved9[4];
+	unsigned long dmmu_tsb_hits_ctxnon0_8k_tte;
+	unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte;
+	unsigned long dmmu_tsb_hits_ctxnon0_64k_tte;
+	unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte;
+	unsigned long __reserved10[2];
+	unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte;
+	unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte;
+	unsigned long __reserved11[2];
+	unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte;
+	unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte;
+	unsigned long __reserved12[4];
+};
+#endif
+
+/* mmustat_conf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMUSTAT_CONF
+ * ARG0:	real address
+ * RET0:	status
+ * RET1:	real address
+ * ERRORS:	ENORADDR	Invalid real address
+ *		EBADALIGN	Real address not aligned on 64-byte boundary
+ *		EBADTRAP	API not supported on this processor
+ *
+ * Enable MMU statistic gathering using the buffer at the given real
+ * address on the current virtual CPU.  The new buffer real address
+ * is given in ARG1, and the previously specified buffer real address
+ * is returned in RET1, or is returned as zero for the first invocation.
+ *
+ * If the passed in real address argument is zero, this will disable
+ * MMU statistic collection on the current virtual CPU.  If an error is
+ * returned then no statistics are collected.
+ *
+ * The buffer contents should be initialized to all zeros before being
+ * given to the hypervisor or else the statistics will be meaningless.
+ */
+#define HV_FAST_MMUSTAT_CONF		0x102
+
+/* mmustat_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_MMUSTAT_INFO
+ * RET0:	status
+ * RET1:	real address
+ * ERRORS:	EBADTRAP	API not supported on this processor
+ *
+ * Return the current state and real address of the currently configured
+ * MMU statistics buffer on the current virtual CPU.
+ */
+#define HV_FAST_MMUSTAT_INFO		0x103
+
+/* Function numbers for HV_CORE_TRAP.  */
+#define HV_CORE_VER			0x00
+#define HV_CORE_PUTCHAR			0x01
+#define HV_CORE_EXIT			0x02
+
+#endif /* !(_SPARC64_HYPERVISOR_H) */
-- 
cgit v1.2.3-70-g09d2


From 1633a53c79498455b16d051451f4e3f83ab4e7dd Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sat, 4 Feb 2006 03:09:03 -0800
Subject: [SPARC64]: Add 'hypervisor' to ultra_tlb_type enumeration.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/spitfire.h | 1 +
 1 file changed, 1 insertion(+)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 962638c9d12..23ad8a7987a 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -44,6 +44,7 @@ enum ultra_tlb_layout {
 	spitfire = 0,
 	cheetah = 1,
 	cheetah_plus = 2,
+	hypervisor = 3,
 };
 
 extern enum ultra_tlb_layout tlb_type;
-- 
cgit v1.2.3-70-g09d2


From 30ddbdb03339fc62480ddbff800a44066bb14455 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sat, 4 Feb 2006 03:11:17 -0800
Subject: [SPARC64]: Add Niagara init-store twin-load ASI defines.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/asi.h | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h
index 534855660f2..4b36f5f4c8b 100644
--- a/include/asm-sparc64/asi.h
+++ b/include/asm-sparc64/asi.h
@@ -25,12 +25,16 @@
 
 /* SpitFire and later extended ASIs.  The "(III)" marker designates
  * UltraSparc-III and later specific ASIs.  The "(CMT)" marker designates
- * Chip Multi Threading specific ASIs.
+ * Chip Multi Threading specific ASIs.  "(NG)" designates Niagara specific
+ * ASIs, "(4V)" designates SUN4V specific ASIs.
  */
 #define ASI_PHYS_USE_EC		0x14 /* PADDR, E-cachable		*/
 #define ASI_PHYS_BYPASS_EC_E	0x15 /* PADDR, E-bit			*/
 #define ASI_PHYS_USE_EC_L	0x1c /* PADDR, E-cachable, little endian*/
 #define ASI_PHYS_BYPASS_EC_E_L	0x1d /* PADDR, E-bit, little endian	*/
+#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
+					 * secondary, user
+					 */
 #define ASI_NUCLEUS_QUAD_LDD	0x24 /* Cachable, qword load		*/
 #define ASI_NUCLEUS_QUAD_LDD_L	0x2c /* Cachable, qword load, l-endian 	*/
 #define ASI_PCACHE_DATA_STATUS	0x30 /* (III) PCache data stat RAM diag	*/
@@ -137,6 +141,9 @@
 #define ASI_FL16_SL		0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
 #define ASI_BLK_COMMIT_P	0xe0 /* Primary, blk store commit	*/
 #define ASI_BLK_COMMIT_S	0xe1 /* Secondary, blk store commit	*/
+#define ASI_BLK_INIT_QUAD_LDD_P	0xe2 /* (NG) init-store, twin load,
+				      * primary, implicit
+				      */
 #define ASI_BLK_P		0xf0 /* Primary, blk ld/st		*/
 #define ASI_BLK_S		0xf1 /* Secondary, blk ld/st		*/
 #define ASI_BLK_PL		0xf8 /* Primary, blk ld/st, little	*/
-- 
cgit v1.2.3-70-g09d2


From d398ee230f94a8ba386d8abb63f4fea129e4eaba Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sat, 4 Feb 2006 03:11:50 -0800
Subject: [SPARC64]: Sun4v specific ASI defines.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/asi.h | 9 +++++++++
 1 file changed, 9 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h
index 4b36f5f4c8b..662a21107ae 100644
--- a/include/asm-sparc64/asi.h
+++ b/include/asm-sparc64/asi.h
@@ -30,13 +30,22 @@
  */
 #define ASI_PHYS_USE_EC		0x14 /* PADDR, E-cachable		*/
 #define ASI_PHYS_BYPASS_EC_E	0x15 /* PADDR, E-bit			*/
+#define ASI_BLK_AIUP_4V		0x16 /* (4V) Prim, user, block ld/st	*/
+#define ASI_BLK_AIUS_4V		0x17 /* (4V) Sec, user, block ld/st	*/
 #define ASI_PHYS_USE_EC_L	0x1c /* PADDR, E-cachable, little endian*/
 #define ASI_PHYS_BYPASS_EC_E_L	0x1d /* PADDR, E-bit, little endian	*/
+#define ASI_BLK_AIUP_L_4V	0x1e /* (4V) Prim, user, block, l-endian*/
+#define ASI_BLK_AIUS_L_4V	0x1f /* (4V) Sec, user, block, l-endian	*/
+#define ASI_SCRATCHPAD		0x20 /* (4V) Scratch Pad Registers	*/
+#define ASI_MMU			0x21 /* (4V) MMU Context Registers	*/
 #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
 					 * secondary, user
 					 */
 #define ASI_NUCLEUS_QUAD_LDD	0x24 /* Cachable, qword load		*/
+#define ASI_QUEUE		0x25 /* (4V) Interrupt Queue Registers	*/
+#define ASI_QUAD_LDD_PHYS_4V	0x26 /* (4V) Physical, qword load	*/
 #define ASI_NUCLEUS_QUAD_LDD_L	0x2c /* Cachable, qword load, l-endian 	*/
+#define ASI_QUAD_LDD_PHYS_L_4V	0x2e /* (4V) Phys, qword load, l-endian	*/
 #define ASI_PCACHE_DATA_STATUS	0x30 /* (III) PCache data stat RAM diag	*/
 #define ASI_PCACHE_DATA		0x31 /* (III) PCache data RAM diag	*/
 #define ASI_PCACHE_TAG		0x32 /* (III) PCache tag RAM diag	*/
-- 
cgit v1.2.3-70-g09d2


From 277b6dd9600613b01f66cadef2f0065514fecf69 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sat, 4 Feb 2006 03:12:02 -0800
Subject: [SPARC64]: Sun4v scratchpad register layout.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/scratchpad.h | 14 ++++++++++++++
 1 file changed, 14 insertions(+)
 create mode 100644 include/asm-sparc64/scratchpad.h

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/scratchpad.h b/include/asm-sparc64/scratchpad.h
new file mode 100644
index 00000000000..5e8b01fb334
--- /dev/null
+++ b/include/asm-sparc64/scratchpad.h
@@ -0,0 +1,14 @@
+#ifndef _SPARC64_SCRATCHPAD_H
+#define _SPARC64_SCRATCHPAD_H
+
+/* Sun4v scratchpad registers, accessed via ASI_SCRATCHPAD.  */
+
+#define SCRATCHPAD_MMU_MISS	0x00 /* Shared with OBP - set by OBP	    */
+#define SCRATCHPAD_CPUID	0x08 /* Shared with OBP - set by hypervisor */
+#define SCRATCHPAD_UTSBREG1	0x10
+#define SCRATCHPAD_UTSBREG2	0x18
+	/* 0x20 and 0x28, hypervisor only... */
+#define SCRATCHPAD_UNUSED1	0x30
+#define SCRATCHPAD_UNUSED2	0x38 /* Reserved for OBP		    */
+
+#endif /* !(_SPARC64_SCRATCHPAD_H) */
-- 
cgit v1.2.3-70-g09d2


From e1c21c4f476f2270c98aad1fe55e5f33e25f77f5 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sat, 4 Feb 2006 03:12:14 -0800
Subject: [SPARC64]: Sun4v interrupt queue register definitions.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/intr_queue.h | 15 +++++++++++++++
 1 file changed, 15 insertions(+)
 create mode 100644 include/asm-sparc64/intr_queue.h

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/intr_queue.h b/include/asm-sparc64/intr_queue.h
new file mode 100644
index 00000000000..206077dedc2
--- /dev/null
+++ b/include/asm-sparc64/intr_queue.h
@@ -0,0 +1,15 @@
+#ifndef _SPARC64_INTR_QUEUE_H
+#define _SPARC64_INTR_QUEUE_H
+
+/* Sun4v interrupt queue registers, accessed via ASI_QUEUE.  */
+
+#define INTRQ_CPU_MONDO_HEAD	  0x3c0 /* CPU mondo head	          */
+#define INTRQ_CPU_MONDO_TAIL	  0x3c8 /* CPU mondo tail	          */
+#define INTRQ_DEVICE_MONDO_HEAD	  0x3d0 /* Device mondo head	          */
+#define INTRQ_DEVICE_MONDO_TAIL	  0x3d8 /* Device mondo tail	          */
+#define INTRQ_RESUM_MONDO_HEAD	  0x3e0 /* Resumable error mondo head     */
+#define INTRQ_RESUM_MONDO_TAIL	  0x3e8 /* Resumable error mondo tail     */
+#define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */
+#define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */
+
+#endif /* !(_SPARC64_INTR_QUEUE_H) */
-- 
cgit v1.2.3-70-g09d2


From d96b81533ba3d5775e45aee6986b2aa33c10801c Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 4 Feb 2006 15:40:53 -0800
Subject: [SPARC64]: Add sun4v case to __GET_CPUID() patch tables.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/setup.c   | 3 +++
 include/asm-sparc64/cpudata.h | 8 ++++++++
 2 files changed, 11 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index f751d11926b..2918ed3eb1b 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -520,6 +520,9 @@ static void __init per_cpu_patch(void)
 			else
 				insns = &p->cheetah_safari[0];
 			break;
+		case hypervisor:
+			insns = &p->sun4v[0];
+			break;
 		default:
 			prom_printf("Unknown cpu type, halting.\n");
 			prom_halt();
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index c15514f82c3..4f28a85c104 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -68,6 +68,7 @@ struct cpuid_patch_entry {
 	unsigned int	cheetah_safari[4];
 	unsigned int	cheetah_jbus[4];
 	unsigned int	starfire[4];
+	unsigned int	sun4v[4];
 };
 extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
 #endif
@@ -79,6 +80,8 @@ extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
 
 #define TRAP_BLOCK_SZ_SHIFT	6
 
+#include <asm/scratchpad.h>
+
 #ifdef CONFIG_SMP
 
 #define __GET_CPUID(REG)				\
@@ -105,6 +108,11 @@ extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
 	sllx		REG, 9, REG;			\
 	or		REG, 0xd0, REG;			\
 	lduwa		[REG] ASI_PHYS_BYPASS_EC_E, REG;\
+	/* sun4v implementation. */			\
+	mov		SCRATCHPAD_CPUID, REG;		\
+	nop;						\
+	ldxa		[REG] ASI_SCRATCHPAD, REG;	\
+	nop;						\
 	.previous;
 
 /* Clobbers TMP, current address space PGD phys address into DEST.  */
-- 
cgit v1.2.3-70-g09d2


From d619d7f11670f5b1cfca30e6645e44c8a6014820 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 4 Feb 2006 23:59:38 -0800
Subject: [SPARC64]: Add define for "GL" field of sun4v %tstate register.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/pstate.h | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h
index 29fb74aa805..1181d73315a 100644
--- a/include/asm-sparc64/pstate.h
+++ b/include/asm-sparc64/pstate.h
@@ -29,10 +29,11 @@
 /* The V9 TSTATE Register (with SpitFire and Linux extensions).
  *
  * ---------------------------------------------------------------
- * |  Resv  |  CCR  |  ASI  |  %pil  |  PSTATE  |  Resv  |  CWP  |
+ * |  Resv |  GL  |  CCR  |  ASI  |  %pil  |  PSTATE  |  Resv  |  CWP  |
  * ---------------------------------------------------------------
- *  63    40 39   32 31   24 23    20 19       8 7      5 4     0
+ *  63   43 42  40 39   32 31   24 23    20 19       8 7      5 4     0
  */
+#define TSTATE_GL	_AC(0x0000070000000000,UL) /* Global reg level  */
 #define TSTATE_CCR	_AC(0x000000ff00000000,UL) /* Condition Codes.	*/
 #define TSTATE_XCC	_AC(0x000000f000000000,UL) /* Condition Codes.	*/
 #define TSTATE_XNEG	_AC(0x0000008000000000,UL) /* %xcc Negative.	*/
-- 
cgit v1.2.3-70-g09d2


From 936f482af1743141d637483ec10eb881537c26dc Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sun, 5 Feb 2006 21:29:28 -0800
Subject: [SPARC64]: Add initial code to twiddle %gl on trap entry/exit.

Instead of setting/clearing PSTATE_AG we have to change
the %gl register value on sun4v.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/etrap.S       | 17 +++++++++++++++--
 arch/sparc64/kernel/rtrap.S       | 16 +++++++++++++++-
 arch/sparc64/kernel/setup.c       | 20 ++++++++++++++++++++
 arch/sparc64/kernel/vmlinux.lds.S |  3 +++
 include/asm-sparc64/cpudata.h     |  5 +++++
 include/asm-sparc64/head.h        |  4 ++++
 6 files changed, 62 insertions(+), 3 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index f2556146a73..4d644949ad4 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -102,7 +102,14 @@ etrap_save:	save	%g2, -STACK_BIAS, %sp
 2:		mov	%g4, %l4
 		mov	%g5, %l5
 		add	%g7, 4, %l2
-		wrpr	%g0, ETRAP_PSTATE1, %pstate
+
+		/* Go to trap time globals so we can save them.  */
+661:		wrpr	%g0, ETRAP_PSTATE1, %pstate
+		.section .gl_1insn_patch, "ax"
+		.word	661b
+		SET_GL(0)
+		.previous
+
 		stx	%g1, [%sp + PTREGS_OFF + PT_V9_G1]
 		stx	%g2, [%sp + PTREGS_OFF + PT_V9_G2]
 		sllx	%l7, 24, %l7
@@ -195,9 +202,15 @@ etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
 		rdpr	%tt, %g3
 		stx	%g3, [%g2 + STACK_BIAS + 0x78]
 
-		wrpr	%g1, %tl
 		stx	%g1, [%g2 + STACK_BIAS + 0x80]
 
+		wrpr	%g0, 1, %tl
+661:		nop
+		.section .gl_1insn_patch, "ax"
+		.word	661b
+		SET_GL(1)
+		.previous
+
 		rdpr	%tstate, %g1
 		sub	%g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
 		ba,pt	%xcc, 1b
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index ecfbbdc5612..e6130956307 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -230,7 +230,14 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 1:
 		ldx			[%sp + PTREGS_OFF + PT_V9_G6], %g6
 		ldx			[%sp + PTREGS_OFF + PT_V9_G7], %g7
-		wrpr			%g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
+
+		/* Normal globals are restored, go to trap globals.  */
+661:		wrpr			%g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
+		.section		.gl_1insn_patch, "ax"
+		.word			661b
+		SET_GL(1)
+		.previous
+
 		ldx			[%sp + PTREGS_OFF + PT_V9_I0], %i0
 		ldx			[%sp + PTREGS_OFF + PT_V9_I1], %i1
 
@@ -304,6 +311,13 @@ user_rtt_fill_fixup:
 		mov	%g6, %l1
 		wrpr	%g0, 0x0, %tl
 		wrpr	%g0, RTRAP_PSTATE, %pstate
+
+661:		nop
+		.section		.gl_1insn_patch, "ax"
+		.word			661b
+		SET_GL(0)
+		.previous
+
 		mov	%l1, %g6
 		ldx	[%g6 + TI_TASK], %g4
 		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 2918ed3eb1b..aaab319ad88 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -545,6 +545,24 @@ static void __init per_cpu_patch(void)
 #endif
 }
 
+static void __init gl_patch(void)
+{
+	struct gl_1insn_patch_entry *p;
+
+	if (tlb_type != hypervisor)
+		return;
+
+	p = &__gl_1insn_patch;
+	while (p < &__gl_1insn_patch_end) {
+		unsigned long addr = p->addr;
+
+		*(unsigned int *) (addr +  0) = p->insn;
+		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
+
+		p++;
+	}
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	/* Initialize PROM console and command line. */
@@ -567,6 +585,8 @@ void __init setup_arch(char **cmdline_p)
 	 */
 	per_cpu_patch();
 
+	gl_patch();
+
 	boot_flags_init(*cmdline_p);
 
 	idprom_init();
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 1639d9c935c..482d1ed87f4 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -77,6 +77,9 @@ SECTIONS
   __cpuid_patch = .;
   .cpuid_patch : { *(.cpuid_patch) }
   __cpuid_patch_end = .;
+  __gl_1insn_patch = .;
+  .gl_1insn_patch : { *(.gl_1insn_patch) }
+  __gl_1insn_patch_end = .;
   . = ALIGN(8192); 
   __initramfs_start = .;
   .init.ramfs : { *(.init.ramfs) }
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 4f28a85c104..8666440c89a 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -73,6 +73,11 @@ struct cpuid_patch_entry {
 extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
 #endif
 
+struct gl_1insn_patch_entry {
+	unsigned int	addr;
+	unsigned int	insn;
+};
+extern struct gl_1insn_patch_entry __gl_1insn_patch, __gl_1insn_patch_end;
 #endif /* !(__ASSEMBLY__) */
 
 #define TRAP_PER_CPU_THREAD	0x00
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index 731c842f3d1..ff76c0981b6 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -4,6 +4,10 @@
 
 #include <asm/pstate.h>
 
+	/* wrpr	%g0, val, %gl */
+#define SET_GL(val)	\
+	.word	0xa1902000 | val
+
 #define KERNBASE	0x400000
 
 #define	PTREGS_OFF	(STACK_BIAS + STACKFRAME_SZ)
-- 
cgit v1.2.3-70-g09d2


From 314981ac7177a933319e3c071a5cf0a579205e6e Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sun, 5 Feb 2006 21:59:03 -0800
Subject: [SPARC64]: Kill all %pstate changes in context switch code.

They are totally unnecessary because:

1) Interrupts are already disabled when switch_to()
   runs.

2) We don't use hard-coded alternate globals any longer.

This found a case in rtrap, which still assumed alternate
global %g6 was current_thread_info(), and that is fixed
by this changeset as well.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/rtrap.S  | 5 ++++-
 include/asm-sparc64/system.h | 5 -----
 2 files changed, 4 insertions(+), 6 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index e6130956307..a2fa277da62 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -224,7 +224,8 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 		ldx			[%sp + PTREGS_OFF + PT_V9_G4], %g4
 		ldx			[%sp + PTREGS_OFF + PT_V9_G5], %g5
 		brz,pt			%l3, 1f
-		 nop
+		mov			%g6, %l2
+
 		/* Must do this before thread reg is clobbered below.  */
 		LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
 1:
@@ -238,6 +239,8 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 		SET_GL(1)
 		.previous
 
+		mov			%l2, %g6
+
 		ldx			[%sp + PTREGS_OFF + PT_V9_I0], %i0
 		ldx			[%sp + PTREGS_OFF + PT_V9_I1], %i1
 
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 26c0807af3e..a18ec87a52c 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -213,7 +213,6 @@ do {	if (test_thread_flag(TIF_PERFCTR)) {				\
 		task_thread_info(next);					\
 	__asm__ __volatile__(						\
 	"mov	%%g4, %%g7\n\t"						\
-	"wrpr	%%g0, 0x95, %%pstate\n\t"				\
 	"stx	%%i6, [%%sp + 2047 + 0x70]\n\t"				\
 	"stx	%%i7, [%%sp + 2047 + 0x78]\n\t"				\
 	"rdpr	%%wstate, %%o5\n\t"					\
@@ -227,14 +226,10 @@ do {	if (test_thread_flag(TIF_PERFCTR)) {				\
 	"ldx	[%%g6 + %3], %%o6\n\t"					\
 	"ldub	[%%g6 + %2], %%o5\n\t"					\
 	"ldub	[%%g6 + %4], %%o7\n\t"					\
-	"mov	%%g6, %%l2\n\t"						\
 	"wrpr	%%o5, 0x0, %%wstate\n\t"				\
 	"ldx	[%%sp + 2047 + 0x70], %%i6\n\t"				\
 	"ldx	[%%sp + 2047 + 0x78], %%i7\n\t"				\
-	"wrpr	%%g0, 0x94, %%pstate\n\t"				\
-	"mov	%%l2, %%g6\n\t"						\
 	"ldx	[%%g6 + %6], %%g4\n\t"					\
-	"wrpr	%%g0, 0x96, %%pstate\n\t"				\
 	"brz,pt %%o7, 1f\n\t"						\
 	" mov	%%g7, %0\n\t"						\
 	"b,a ret_from_syscall\n\t"					\
-- 
cgit v1.2.3-70-g09d2


From 45fec05f805a113372c9a7ff4c653ac749f6921c Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sun, 5 Feb 2006 22:27:28 -0800
Subject: [SPARC64]: Sanitize %pstate writes for sun4v.

If we're just switching between different alternate global
sets, nop it out on sun4v.  Also, get rid of all of the
alternate global save/restore in the OBP CIF trampoline code.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/ktlb.S        |  18 +++-
 arch/sparc64/kernel/setup.c       |  26 +++--
 arch/sparc64/kernel/tsb.S         |  12 ++-
 arch/sparc64/kernel/vmlinux.lds.S |   3 +
 arch/sparc64/mm/ultra.S           |  18 +++-
 arch/sparc64/prom/cif.S           | 211 +++-----------------------------------
 include/asm-sparc64/cpudata.h     |   6 ++
 7 files changed, 88 insertions(+), 206 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 9b415ab6db6..c1335432124 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -60,8 +60,15 @@ kvmap_itlb_load:
 	retry
 
 kvmap_itlb_longpath:
-	rdpr	%pstate, %g5
+
+661:	rdpr	%pstate, %g5
 	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
+	.section .gl_2insn_patch, "ax"
+	.word	661b
+	nop
+	nop
+	.previous
+
 	rdpr	%tpc, %g5
 	ba,pt	%xcc, sparc64_realfault_common
 	 mov	FAULT_CODE_ITLB, %g4
@@ -161,8 +168,15 @@ kvmap_check_obp:
 	 nop
 
 kvmap_dtlb_longpath:
-	rdpr	%pstate, %g5
+
+661:	rdpr	%pstate, %g5
 	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
+	.section .gl_2insn_patch, "ax"
+	.word	661b
+	nop
+	nop
+	.previous
+
 	rdpr	%tl, %g4
 	cmp	%g4, 1
 	mov	TLB_TAG_ACCESS, %g4
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index aaab319ad88..e22bf5fc92c 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -547,19 +547,33 @@ static void __init per_cpu_patch(void)
 
 static void __init gl_patch(void)
 {
-	struct gl_1insn_patch_entry *p;
+	struct gl_1insn_patch_entry *p1;
+	struct gl_2insn_patch_entry *p2;
 
 	if (tlb_type != hypervisor)
 		return;
 
-	p = &__gl_1insn_patch;
-	while (p < &__gl_1insn_patch_end) {
-		unsigned long addr = p->addr;
+	p1 = &__gl_1insn_patch;
+	while (p1 < &__gl_1insn_patch_end) {
+		unsigned long addr = p1->addr;
 
-		*(unsigned int *) (addr +  0) = p->insn;
+		*(unsigned int *) (addr +  0) = p1->insn;
 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
 
-		p++;
+		p1++;
+	}
+
+	p2 = &__gl_2insn_patch;
+	while (p2 < &__gl_2insn_patch_end) {
+		unsigned long addr = p2->addr;
+
+		*(unsigned int *) (addr +  0) = p2->insns[0];
+		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
+
+		*(unsigned int *) (addr +  3) = p2->insns[1];
+		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
+
+		p2++;
 	}
 }
 
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 3b45db98005..96e63168d8b 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -82,9 +82,17 @@ tsb_itlb_load:
 	.globl		tsb_do_fault
 tsb_do_fault:
 	cmp		%g3, FAULT_CODE_DTLB
-	rdpr		%pstate, %g5
+
+661:	rdpr		%pstate, %g5
+	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
+	.section	.gl_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
 	bne,pn		%xcc, tsb_do_itlb_fault
-	 wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
+	 nop
 
 tsb_do_dtlb_fault:
 	rdpr	%tl, %g4
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 482d1ed87f4..686bf6b3b03 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -80,6 +80,9 @@ SECTIONS
   __gl_1insn_patch = .;
   .gl_1insn_patch : { *(.gl_1insn_patch) }
   __gl_1insn_patch_end = .;
+  __gl_2insn_patch = .;
+  .gl_2insn_patch : { *(.gl_2insn_patch) }
+  __gl_2insn_patch_end = .;
   . = ALIGN(8192); 
   __initramfs_start = .;
   .init.ramfs : { *(.init.ramfs) }
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index cac58d66fca..5dd86ad0d29 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -444,8 +444,15 @@ xcall_flush_tlb_kernel_range:	/* 22 insns */
 	 */
 	.globl		xcall_sync_tick
 xcall_sync_tick:
-	rdpr		%pstate, %g2
+
+661:	rdpr		%pstate, %g2
 	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
+	.section	.gl_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
 	rdpr		%pil, %g2
 	wrpr		%g0, 15, %pil
 	sethi		%hi(109f), %g7
@@ -468,8 +475,15 @@ xcall_sync_tick:
 	 */
 	.globl		xcall_report_regs
 xcall_report_regs:
-	rdpr		%pstate, %g2
+
+661:	rdpr		%pstate, %g2
 	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
+	.section	.gl_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
 	rdpr		%pil, %g2
 	wrpr		%g0, 15, %pil
 	sethi		%hi(109f), %g7
diff --git a/arch/sparc64/prom/cif.S b/arch/sparc64/prom/cif.S
index 29d0ae74aed..5f27ad779c0 100644
--- a/arch/sparc64/prom/cif.S
+++ b/arch/sparc64/prom/cif.S
@@ -1,10 +1,12 @@
 /* cif.S: PROM entry/exit assembler trampolines.
  *
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2005, 2006 David S. Miller <davem@davemloft.net>
  */
 
 #include <asm/pstate.h>
+#include <asm/cpudata.h>
+#include <asm/thread_info.h>
 
 	.text
 	.globl	prom_cif_interface
@@ -12,78 +14,16 @@ prom_cif_interface:
 	sethi	%hi(p1275buf), %o0
 	or	%o0, %lo(p1275buf), %o0
 	ldx	[%o0 + 0x010], %o1	! prom_cif_stack
-	save	%o1, -0x190, %sp
+	save	%o1, -192, %sp
 	ldx	[%i0 + 0x008], %l2	! prom_cif_handler
-	rdpr	%pstate, %l4
-	wrpr	%g0, 0x15, %pstate	! save alternate globals
-	stx	%g1, [%sp + 2047 + 0x0b0]
-	stx	%g2, [%sp + 2047 + 0x0b8]
-	stx	%g3, [%sp + 2047 + 0x0c0]
-	stx	%g4, [%sp + 2047 + 0x0c8]
-	stx	%g5, [%sp + 2047 + 0x0d0]
-	stx	%g6, [%sp + 2047 + 0x0d8]
-	stx	%g7, [%sp + 2047 + 0x0e0]
-	wrpr	%g0, 0x814, %pstate	! save interrupt globals
-	stx	%g1, [%sp + 2047 + 0x0e8]
-	stx	%g2, [%sp + 2047 + 0x0f0]
-	stx	%g3, [%sp + 2047 + 0x0f8]
-	stx	%g4, [%sp + 2047 + 0x100]
-	stx	%g5, [%sp + 2047 + 0x108]
-	stx	%g6, [%sp + 2047 + 0x110]
-	stx	%g7, [%sp + 2047 + 0x118]
-	wrpr	%g0, 0x14, %pstate	! save normal globals
-	stx	%g1, [%sp + 2047 + 0x120]
-	stx	%g2, [%sp + 2047 + 0x128]
-	stx	%g3, [%sp + 2047 + 0x130]
-	stx	%g4, [%sp + 2047 + 0x138]
-	stx	%g5, [%sp + 2047 + 0x140]
-	stx	%g6, [%sp + 2047 + 0x148]
-	stx	%g7, [%sp + 2047 + 0x150]
-	wrpr	%g0, 0x414, %pstate	! save mmu globals
-	stx	%g1, [%sp + 2047 + 0x158]
-	stx	%g2, [%sp + 2047 + 0x160]
-	stx	%g3, [%sp + 2047 + 0x168]
-	stx	%g4, [%sp + 2047 + 0x170]
-	stx	%g5, [%sp + 2047 + 0x178]
-	stx	%g6, [%sp + 2047 + 0x180]
-	stx	%g7, [%sp + 2047 + 0x188]
-	mov	%g1, %l0		! also save to locals, so we can handle
-	mov	%g2, %l1		! tlb faults later on, when accessing
-	mov	%g3, %l3		! the stack.
-	mov	%g7, %l5
-	wrpr	%l4, PSTATE_IE, %pstate	! turn off interrupts
+	mov	%g4, %l0
+	mov	%g5, %l1
+	mov	%g6, %l3
 	call	%l2
 	 add	%i0, 0x018, %o0		! prom_args
-	wrpr	%g0, 0x414, %pstate	! restore mmu globals
-	mov	%l0, %g1
-	mov	%l1, %g2
-	mov	%l3, %g3
-	mov	%l5, %g7
-	wrpr	%g0, 0x14, %pstate	! restore normal globals
-	ldx	[%sp + 2047 + 0x120], %g1
-	ldx	[%sp + 2047 + 0x128], %g2
-	ldx	[%sp + 2047 + 0x130], %g3
-	ldx	[%sp + 2047 + 0x138], %g4
-	ldx	[%sp + 2047 + 0x140], %g5
-	ldx	[%sp + 2047 + 0x148], %g6
-	ldx	[%sp + 2047 + 0x150], %g7
-	wrpr	%g0, 0x814, %pstate	! restore interrupt globals
-	ldx	[%sp + 2047 + 0x0e8], %g1
-	ldx	[%sp + 2047 + 0x0f0], %g2
-	ldx	[%sp + 2047 + 0x0f8], %g3
-	ldx	[%sp + 2047 + 0x100], %g4
-	ldx	[%sp + 2047 + 0x108], %g5
-	ldx	[%sp + 2047 + 0x110], %g6
-	ldx	[%sp + 2047 + 0x118], %g7
-	wrpr	%g0, 0x15, %pstate	! restore alternate globals
-	ldx	[%sp + 2047 + 0x0b0], %g1
-	ldx	[%sp + 2047 + 0x0b8], %g2
-	ldx	[%sp + 2047 + 0x0c0], %g3
-	ldx	[%sp + 2047 + 0x0c8], %g4
-	ldx	[%sp + 2047 + 0x0d0], %g5
-	ldx	[%sp + 2047 + 0x0d8], %g6
-	ldx	[%sp + 2047 + 0x0e0], %g7
-	wrpr	%l4, 0, %pstate	! restore original pstate
+	mov	%l0, %g4
+	mov	%l1, %g5
+	mov	%l3, %g6
 	ret
 	 restore
 
@@ -91,135 +31,18 @@ prom_cif_interface:
 prom_cif_callback:
 	sethi	%hi(p1275buf), %o1
 	or	%o1, %lo(p1275buf), %o1
-	save	%sp, -0x270, %sp
-	rdpr	%pstate, %l4
-	wrpr	%g0, 0x15, %pstate	! save PROM alternate globals
-	stx	%g1, [%sp + 2047 + 0x0b0]
-	stx	%g2, [%sp + 2047 + 0x0b8]
-	stx	%g3, [%sp + 2047 + 0x0c0]
-	stx	%g4, [%sp + 2047 + 0x0c8]
-	stx	%g5, [%sp + 2047 + 0x0d0]
-	stx	%g6, [%sp + 2047 + 0x0d8]
-	stx	%g7, [%sp + 2047 + 0x0e0]
-					! restore Linux alternate globals
-	ldx	[%sp + 2047 + 0x190], %g1
-	ldx	[%sp + 2047 + 0x198], %g2
-	ldx	[%sp + 2047 + 0x1a0], %g3
-	ldx	[%sp + 2047 + 0x1a8], %g4
-	ldx	[%sp + 2047 + 0x1b0], %g5
-	ldx	[%sp + 2047 + 0x1b8], %g6
-	ldx	[%sp + 2047 + 0x1c0], %g7
-	wrpr	%g0, 0x814, %pstate	! save PROM interrupt globals
-	stx	%g1, [%sp + 2047 + 0x0e8]
-	stx	%g2, [%sp + 2047 + 0x0f0]
-	stx	%g3, [%sp + 2047 + 0x0f8]
-	stx	%g4, [%sp + 2047 + 0x100]
-	stx	%g5, [%sp + 2047 + 0x108]
-	stx	%g6, [%sp + 2047 + 0x110]
-	stx	%g7, [%sp + 2047 + 0x118]
-					! restore Linux interrupt globals
-	ldx	[%sp + 2047 + 0x1c8], %g1
-	ldx	[%sp + 2047 + 0x1d0], %g2
-	ldx	[%sp + 2047 + 0x1d8], %g3
-	ldx	[%sp + 2047 + 0x1e0], %g4
-	ldx	[%sp + 2047 + 0x1e8], %g5
-	ldx	[%sp + 2047 + 0x1f0], %g6
-	ldx	[%sp + 2047 + 0x1f8], %g7
-	wrpr	%g0, 0x14, %pstate	! save PROM normal globals
-	stx	%g1, [%sp + 2047 + 0x120]
-	stx	%g2, [%sp + 2047 + 0x128]
-	stx	%g3, [%sp + 2047 + 0x130]
-	stx	%g4, [%sp + 2047 + 0x138]
-	stx	%g5, [%sp + 2047 + 0x140]
-	stx	%g6, [%sp + 2047 + 0x148]
-	stx	%g7, [%sp + 2047 + 0x150]
-					! restore Linux normal globals
-	ldx	[%sp + 2047 + 0x200], %g1
-	ldx	[%sp + 2047 + 0x208], %g2
-	ldx	[%sp + 2047 + 0x210], %g3
-	ldx	[%sp + 2047 + 0x218], %g4
-	ldx	[%sp + 2047 + 0x220], %g5
-	ldx	[%sp + 2047 + 0x228], %g6
-	ldx	[%sp + 2047 + 0x230], %g7
-	wrpr	%g0, 0x414, %pstate	! save PROM mmu globals
-	stx	%g1, [%sp + 2047 + 0x158]
-	stx	%g2, [%sp + 2047 + 0x160]
-	stx	%g3, [%sp + 2047 + 0x168]
-	stx	%g4, [%sp + 2047 + 0x170]
-	stx	%g5, [%sp + 2047 + 0x178]
-	stx	%g6, [%sp + 2047 + 0x180]
-	stx	%g7, [%sp + 2047 + 0x188]
-					! restore Linux mmu globals
-	ldx	[%sp + 2047 + 0x238], %o0
-	ldx	[%sp + 2047 + 0x240], %o1
-	ldx	[%sp + 2047 + 0x248], %l2
-	ldx	[%sp + 2047 + 0x250], %l3
-	ldx	[%sp + 2047 + 0x258], %l5
-	ldx	[%sp + 2047 + 0x260], %l6
-	ldx	[%sp + 2047 + 0x268], %l7
-					! switch to Linux tba
-	sethi	%hi(sparc64_ttable_tl0), %l1
-	rdpr	%tba, %l0		! save PROM tba
-	mov	%o0, %g1
-	mov	%o1, %g2
-	mov	%l2, %g3
-	mov	%l3, %g4
-	mov	%l5, %g5
-	mov	%l6, %g6
-	mov	%l7, %g7
-	wrpr	%l1, %tba		! install Linux tba
-	wrpr	%l4, 0, %pstate		! restore PSTATE
+	save	%sp, -192, %sp
+	TRAP_LOAD_THREAD_REG(%g6, %g1)
+	LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %o0)
+	ldx	[%g6 + TI_TASK], %g4
 	call	prom_world
-	 mov	%g0, %o0
+	 mov	0, %o0
 	ldx	[%i1 + 0x000], %l2
 	call	%l2
 	 mov	%i0, %o0
 	mov	%o0, %l1
 	call	prom_world
-	 or	%g0, 1, %o0
-	wrpr	%g0, 0x14, %pstate	! interrupts off
-					! restore PROM mmu globals
-	ldx	[%sp + 2047 + 0x158], %o0
-	ldx	[%sp + 2047 + 0x160], %o1
-	ldx	[%sp + 2047 + 0x168], %l2
-	ldx	[%sp + 2047 + 0x170], %l3
-	ldx	[%sp + 2047 + 0x178], %l5
-	ldx	[%sp + 2047 + 0x180], %l6
-	ldx	[%sp + 2047 + 0x188], %l7
-	wrpr	%g0, 0x414, %pstate	! restore PROM mmu globals
-	mov	%o0, %g1
-	mov	%o1, %g2
-	mov	%l2, %g3
-	mov	%l3, %g4
-	mov	%l5, %g5
-	mov	%l6, %g6
-	mov	%l7, %g7
-	wrpr	%l0, %tba		! restore PROM tba
-	wrpr	%g0, 0x14, %pstate	! restore PROM normal globals
-	ldx	[%sp + 2047 + 0x120], %g1
-	ldx	[%sp + 2047 + 0x128], %g2
-	ldx	[%sp + 2047 + 0x130], %g3
-	ldx	[%sp + 2047 + 0x138], %g4
-	ldx	[%sp + 2047 + 0x140], %g5
-	ldx	[%sp + 2047 + 0x148], %g6
-	ldx	[%sp + 2047 + 0x150], %g7
-	wrpr	%g0, 0x814, %pstate	! restore PROM interrupt globals
-	ldx	[%sp + 2047 + 0x0e8], %g1
-	ldx	[%sp + 2047 + 0x0f0], %g2
-	ldx	[%sp + 2047 + 0x0f8], %g3
-	ldx	[%sp + 2047 + 0x100], %g4
-	ldx	[%sp + 2047 + 0x108], %g5
-	ldx	[%sp + 2047 + 0x110], %g6
-	ldx	[%sp + 2047 + 0x118], %g7
-	wrpr	%g0, 0x15, %pstate	! restore PROM alternate globals
-	ldx	[%sp + 2047 + 0x0b0], %g1
-	ldx	[%sp + 2047 + 0x0b8], %g2
-	ldx	[%sp + 2047 + 0x0c0], %g3
-	ldx	[%sp + 2047 + 0x0c8], %g4
-	ldx	[%sp + 2047 + 0x0d0], %g5
-	ldx	[%sp + 2047 + 0x0d8], %g6
-	ldx	[%sp + 2047 + 0x0e0], %g7
-	wrpr	%l4, 0, %pstate
+	 mov	1, %o0
 	ret
 	 restore %l1, 0, %o0
 
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 8666440c89a..998145b9265 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -78,6 +78,12 @@ struct gl_1insn_patch_entry {
 	unsigned int	insn;
 };
 extern struct gl_1insn_patch_entry __gl_1insn_patch, __gl_1insn_patch_end;
+
+struct gl_2insn_patch_entry {
+	unsigned int	addr;
+	unsigned int	insns[2];
+};
+extern struct gl_2insn_patch_entry __gl_2insn_patch, __gl_2insn_patch_end;
 #endif /* !(__ASSEMBLY__) */
 
 #define TRAP_PER_CPU_THREAD	0x00
-- 
cgit v1.2.3-70-g09d2


From d257d5da39a78b32721ca84b2ba7f461f2f7ed7f Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 6 Feb 2006 23:44:37 -0800
Subject: [SPARC64]: Initial sun4v TLB miss handling infrastructure.

Things are a little tricky because, unlike sun4u, we have
to:

1) do a hypervisor trap to do the TLB load.
2) do the TSB lookup calculations by hand

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/head.S           |   1 +
 arch/sparc64/kernel/ktlb.S           |  12 +-
 arch/sparc64/kernel/sun4v_tlb_miss.S | 219 +++++++++++++++++++++++++++++++++++
 arch/sparc64/kernel/tsb.S            |  89 +++++++++++---
 arch/sparc64/kernel/vmlinux.lds.S    |   3 +
 arch/sparc64/mm/init.c               |  24 +++-
 include/asm-sparc64/cpudata.h        |   8 +-
 include/asm-sparc64/tsb.h            |  11 +-
 8 files changed, 349 insertions(+), 18 deletions(-)
 create mode 100644 arch/sparc64/kernel/sun4v_tlb_miss.S

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 7840271d7aa..03fc0b5b1d9 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -474,6 +474,7 @@ setup_tba:
 sparc64_boot_end:
 
 #include "systbls.S"
+#include "sun4v_tlb_miss.S"
 #include "ktlb.S"
 #include "tsb.S"
 #include "etrap.S"
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index c1335432124..2e55084a0c1 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -16,12 +16,16 @@
 	.text
 	.align		32
 
-	.globl		kvmap_itlb
 kvmap_itlb:
 	/* g6: TAG TARGET */
 	mov		TLB_TAG_ACCESS, %g4
 	ldxa		[%g4] ASI_IMMU, %g4
 
+	/* sun4v_itlb_miss branches here with the missing virtual
+	 * address already loaded into %g4
+	 */
+kvmap_itlb_4v:
+
 kvmap_itlb_nonlinear:
 	/* Catch kernel NULL pointer calls.  */
 	sethi		%hi(PAGE_SIZE), %g5
@@ -94,11 +98,15 @@ kvmap_dtlb_obp:
 	 nop
 
 	.align		32
-	.globl		kvmap_dtlb
 kvmap_dtlb:
 	/* %g6: TAG TARGET */
 	mov		TLB_TAG_ACCESS, %g4
 	ldxa		[%g4] ASI_DMMU, %g4
+
+	/* sun4v_dtlb_miss branches here with the missing virtual
+	 * address already loaded into %g4
+	 */
+kvmap_dtlb_4v:
 	brgez,pn	%g4, kvmap_dtlb_nonlinear
 	 nop
 
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
new file mode 100644
index 00000000000..58ea5dd8573
--- /dev/null
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -0,0 +1,219 @@
+/* sun4v_tlb_miss.S: Sun4v TLB miss handlers.
+ *
+ * Copyright (C) 2006 <davem@davemloft.net>
+ */
+
+	.text
+	.align	32
+
+sun4v_itlb_miss:
+	/* Load CPU ID into %g3.  */
+	mov	SCRATCHPAD_CPUID, %g1
+	ldxa	[%g1] ASI_SCRATCHPAD, %g3
+	
+	/* Load UTSB reg into %g1.  */
+	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1
+
+	/* Load &trap_block[smp_processor_id()] into %g2.  */
+	sethi	%hi(trap_block), %g2
+	or	%g2, %lo(trap_block), %g2
+	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
+	add	%g2, %g3, %g2
+
+	/* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
+	 * Branch if kernel TLB miss.  The kernel TSB and user TSB miss
+	 * code wants the missing virtual address in %g4, so that value
+	 * cannot be modified through the entirety of this handler.
+	 */
+	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
+	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
+	srlx	%g4, 22, %g3
+	sllx	%g5, 48, %g6
+	or	%g6, %g3, %g6
+	brz,pn	%g5, kvmap_itlb_4v
+	 nop
+
+	/* Create TSB pointer.  This is something like:
+	 *
+	 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
+	 * tsb_base = tsb_reg & ~0x7UL;
+	 */
+	and	%g1, 0x7, %g3
+	andn	%g1, 0x7, %g1
+	mov	512, %g7
+	sllx	%g7, %g3, %g7
+	sub	%g7, 1, %g7
+
+	/* TSB index mask is in %g7, tsb base is in %g1.  Compute
+	 * the TSB entry pointer into %g1:
+	 *
+	 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
+	 * tsb_ptr = tsb_base + (tsb_index * 16);
+	 */
+	srlx	%g4, PAGE_SHIFT, %g3
+	and	%g3, %g7, %g3
+	sllx	%g3, 4, %g3
+	add	%g1, %g3, %g1
+
+	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
+	ldda	[%g1] ASI_QUAD_LDD_PHYS, %g2
+	cmp	%g2, %g6
+	sethi	%hi(_PAGE_EXEC), %g7
+	bne,a,pn %xcc, tsb_miss_page_table_walk
+	 mov	FAULT_CODE_ITLB, %g3
+	andcc	%g3, %g7, %g0
+	be,a,pn	%xcc, tsb_do_fault
+	 mov	FAULT_CODE_ITLB, %g3
+
+	/* We have a valid entry, make hypervisor call to load
+	 * I-TLB and return from trap.
+	 *
+	 * %g3:	PTE
+	 * %g4:	vaddr
+	 * %g6:	TAG TARGET (only "CTX << 48" part matters)
+	 */
+sun4v_itlb_load:
+	mov	%o0, %g1		! save %o0
+	mov	%o1, %g2		! save %o1
+	mov	%o2, %g5		! save %o2
+	mov	%o3, %g7		! save %o3
+	mov	%g4, %o0		! vaddr
+	srlx	%g6, 48, %o1		! ctx
+	mov	%g3, %o2		! PTE
+	mov	HV_MMU_IMMU, %o3	! flags
+	ta	HV_MMU_MAP_ADDR_TRAP
+	mov	%g1, %o0		! restore %o0
+	mov	%g2, %o1		! restore %o1
+	mov	%g5, %o2		! restore %o2
+	mov	%g7, %o3		! restore %o3
+
+	retry
+
+sun4v_dtlb_miss:
+	/* Load CPU ID into %g3.  */
+	mov	SCRATCHPAD_CPUID, %g1
+	ldxa	[%g1] ASI_SCRATCHPAD, %g3
+	
+	/* Load UTSB reg into %g1.  */
+	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1
+
+	/* Load &trap_block[smp_processor_id()] into %g2.  */
+	sethi	%hi(trap_block), %g2
+	or	%g2, %lo(trap_block), %g2
+	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
+	add	%g2, %g3, %g2
+
+	/* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
+	 * Branch if kernel TLB miss.  The kernel TSB and user TSB miss
+	 * code wants the missing virtual address in %g4, so that value
+	 * cannot be modified through the entirety of this handler.
+	 */
+	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+	srlx	%g4, 22, %g3
+	sllx	%g5, 48, %g6
+	or	%g6, %g3, %g6
+	brz,pn	%g5, kvmap_dtlb_4v
+	 nop
+
+	/* Create TSB pointer.  This is something like:
+	 *
+	 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
+	 * tsb_base = tsb_reg & ~0x7UL;
+	 */
+	and	%g1, 0x7, %g3
+	andn	%g1, 0x7, %g1
+	mov	512, %g7
+	sllx	%g7, %g3, %g7
+	sub	%g7, 1, %g7
+
+	/* TSB index mask is in %g7, tsb base is in %g1.  Compute
+	 * the TSB entry pointer into %g1:
+	 *
+	 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
+	 * tsb_ptr = tsb_base + (tsb_index * 16);
+	 */
+	srlx	%g4, PAGE_SHIFT, %g3
+	and	%g3, %g7, %g3
+	sllx	%g3, 4, %g3
+	add	%g1, %g3, %g1
+
+	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
+	ldda	[%g1] ASI_QUAD_LDD_PHYS, %g2
+	cmp	%g2, %g6
+	bne,a,pn %xcc, tsb_miss_page_table_walk
+	 mov	FAULT_CODE_ITLB, %g3
+
+	/* We have a valid entry, make hypervisor call to load
+	 * D-TLB and return from trap.
+	 *
+	 * %g3:	PTE
+	 * %g4:	vaddr
+	 * %g6:	TAG TARGET (only "CTX << 48" part matters)
+	 */
+sun4v_dtlb_load:
+	mov	%o0, %g1		! save %o0
+	mov	%o1, %g2		! save %o1
+	mov	%o2, %g5		! save %o2
+	mov	%o3, %g7		! save %o3
+	mov	%g4, %o0		! vaddr
+	srlx	%g6, 48, %o1		! ctx
+	mov	%g3, %o2		! PTE
+	mov	HV_MMU_DMMU, %o3	! flags
+	ta	HV_MMU_MAP_ADDR_TRAP
+	mov	%g1, %o0		! restore %o0
+	mov	%g2, %o1		! restore %o1
+	mov	%g5, %o2		! restore %o2
+	mov	%g7, %o3		! restore %o3
+
+	retry
+
+sun4v_dtlb_prot:
+	/* Load CPU ID into %g3.  */
+	mov	SCRATCHPAD_CPUID, %g1
+	ldxa	[%g1] ASI_SCRATCHPAD, %g3
+	
+	/* Load &trap_block[smp_processor_id()] into %g2.  */
+	sethi	%hi(trap_block), %g2
+	or	%g2, %lo(trap_block), %g2
+	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
+	add	%g2, %g3, %g2
+
+	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g5
+	rdpr	%tl, %g1
+	cmp	%g1, 1
+	bgu,pn		%xcc, winfix_trampoline
+	 nop
+	ba,pt		%xcc, sparc64_realfault_common
+	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define SUN4V_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	srl	%g1, 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	sun4v_patch_tlb_handlers
+	.type	sun4v_patch_tlb_handlers,#function
+sun4v_patch_tlb_handlers:
+	SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss)
+	SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss)
+	SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss)
+	SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss)
+	SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot)
+	SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot)
+	retl
+	 nop
+	.size	sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 96e63168d8b..818bc9e9135 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -18,30 +18,33 @@
 	 * %g4:	available temporary
 	 * %g5:	available temporary
 	 * %g6: TAG TARGET
-	 * %g7:	physical address base of the linux page
+	 * %g7:	available temporary, will be loaded by us with
+	 *      the physical address base of the linux page
 	 *      tables for the current address space
 	 */
-	.globl		tsb_miss_dtlb
 tsb_miss_dtlb:
 	mov		TLB_TAG_ACCESS, %g4
 	ldxa		[%g4] ASI_DMMU, %g4
 	ba,pt		%xcc, tsb_miss_page_table_walk
 	 nop
 
-	.globl		tsb_miss_itlb
 tsb_miss_itlb:
 	mov		TLB_TAG_ACCESS, %g4
 	ldxa		[%g4] ASI_IMMU, %g4
 	ba,pt		%xcc, tsb_miss_page_table_walk
 	 nop
 
+	/* The sun4v TLB miss handlers jump directly here instead
+	 * of tsb_miss_{d,i}tlb with the missing virtual address
+	 * already loaded into %g4.
+	 */
 tsb_miss_page_table_walk:
 	TRAP_LOAD_PGD_PHYS(%g7, %g5)
 
 	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
 
 tsb_reload:
-	TSB_LOCK_TAG(%g1, %g2, %g4)
+	TSB_LOCK_TAG(%g1, %g2, %g7)
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
@@ -52,9 +55,9 @@ tsb_reload:
 	 * bother putting it into the TSB.
 	 */
 	srlx		%g5, 32, %g2
-	sethi		%hi(_PAGE_ALL_SZ_BITS >> 32), %g4
+	sethi		%hi(_PAGE_ALL_SZ_BITS >> 32), %g7
+	and		%g2, %g7, %g2
 	sethi		%hi(_PAGE_SZBITS >> 32), %g7
-	and		%g2, %g4, %g2
 	cmp		%g2, %g7
 	bne,a,pn	%xcc, tsb_tlb_reload
 	 TSB_STORE(%g1, %g0)
@@ -68,12 +71,54 @@ tsb_tlb_reload:
 	 nop
 
 tsb_dtlb_load:
-	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
+
+661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
 	retry
+	.section	.gl_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
+	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
+	 * instruction get nop'd out and we get here to branch
+	 * to the sun4v tlb load code.  The registers are setup
+	 * as follows:
+	 *
+	 * %g4: vaddr
+	 * %g5: PTE
+	 * %g6:	TAG
+	 *
+	 * The sun4v TLB load wants the PTE in %g3 so we fix that
+	 * up here.
+	 */
+	ba,pt		%xcc, sun4v_dtlb_load
+	 mov		%g5, %g3
 
 tsb_itlb_load:
-	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
+
+661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
 	retry
+	.section	.gl_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
+	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
+	 * instruction get nop'd out and we get here to branch
+	 * to the sun4v tlb load code.  The registers are setup
+	 * as follows:
+	 *
+	 * %g4: vaddr
+	 * %g5: PTE
+	 * %g6:	TAG
+	 *
+	 * The sun4v TLB load wants the PTE in %g3 so we fix that
+	 * up here.
+	 */
+	ba,pt		%xcc, sun4v_itlb_load
+	 mov		%g5, %g3
 
 	/* No valid entry in the page tables, do full fault
 	 * processing.
@@ -95,10 +140,17 @@ tsb_do_fault:
 	 nop
 
 tsb_do_dtlb_fault:
-	rdpr	%tl, %g4
-	cmp	%g4, 1
-	mov	TLB_TAG_ACCESS, %g4
+	rdpr	%tl, %g3
+	cmp	%g3, 1
+
+661:	mov	TLB_TAG_ACCESS, %g4
 	ldxa	[%g4] ASI_DMMU, %g5
+	.section .gl_2insn_patch, "ax"
+	.word	661b
+	mov	%g4, %g5
+	nop
+	.previous
+
 	be,pt	%xcc, sparc64_realfault_common
 	 mov	FAULT_CODE_DTLB, %g4
 	ba,pt	%xcc, winfix_trampoline
@@ -196,12 +248,23 @@ __tsb_context_switch:
 	add	%g2, %g1, %g2
 	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
 
-	mov	TSB_REG, %g1
+661:	mov	TSB_REG, %g1
 	stxa	%o1, [%g1] ASI_DMMU
+	.section .gl_2insn_patch, "ax"
+	.word	661b
+	mov	SCRATCHPAD_UTSBREG1, %g1
+	stxa	%o1, [%g1] ASI_SCRATCHPAD
+	.previous
+
 	membar	#Sync
 
-	stxa	%o1, [%g1] ASI_IMMU
+661:	stxa	%o1, [%g1] ASI_IMMU
 	membar	#Sync
+	.section .gl_2insn_patch, "ax"
+	.word	661b
+	nop
+	nop
+	.previous
 
 	brz	%o2, 9f
 	 nop
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 686bf6b3b03..a09a8a2383d 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -71,6 +71,9 @@ SECTIONS
   __con_initcall_end = .;
   SECURITY_INIT
   . = ALIGN(4);
+  __tsb_ldquad_phys_patch = .;
+  .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) }
+  __tsb_ldquad_phys_patch_end = .;
   __tsb_phys_patch = .;
   .tsb_phys_patch : { *(.tsb_phys_patch) }
   __tsb_phys_patch_end = .;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index ab50cd9618f..e9aac424877 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1050,8 +1050,25 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
 
 static void __init tsb_phys_patch(void)
 {
+	struct tsb_ldquad_phys_patch_entry *pquad;
 	struct tsb_phys_patch_entry *p;
 
+	pquad = &__tsb_ldquad_phys_patch;
+	while (pquad < &__tsb_ldquad_phys_patch_end) {
+		unsigned long addr = pquad->addr;
+
+		if (tlb_type == hypervisor)
+			*(unsigned int *) addr = pquad->sun4v_insn;
+		else
+			*(unsigned int *) addr = pquad->sun4u_insn;
+		wmb();
+		__asm__ __volatile__("flush	%0"
+				     : /* no outputs */
+				     : "r" (addr));
+
+		pquad++;
+	}
+
 	p = &__tsb_phys_patch;
 	while (p < &__tsb_phys_patch_end) {
 		unsigned long addr = p->addr;
@@ -1069,6 +1086,7 @@ static void __init tsb_phys_patch(void)
 /* paging_init() sets up the page tables */
 
 extern void cheetah_ecache_flush_init(void);
+extern void sun4v_patch_tlb_handlers(void);
 
 static unsigned long last_valid_pfn;
 pgd_t swapper_pg_dir[2048];
@@ -1078,9 +1096,13 @@ void __init paging_init(void)
 	unsigned long end_pfn, pages_avail, shift;
 	unsigned long real_end, i;
 
-	if (tlb_type == cheetah_plus)
+	if (tlb_type == cheetah_plus ||
+	    tlb_type == hypervisor)
 		tsb_phys_patch();
 
+	if (tlb_type == hypervisor)
+		sun4v_patch_tlb_handlers();
+
 	/* Find available physical memory... */
 	read_obp_memory("available", &pavail[0], &pavail_ents);
 
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 998145b9265..a3dc4afc4b2 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -6,6 +6,8 @@
 #ifndef _SPARC64_CPUDATA_H
 #define _SPARC64_CPUDATA_H
 
+#include <asm/hypervisor.h>
+
 #ifndef __ASSEMBLY__
 
 #include <linux/percpu.h>
@@ -57,6 +59,9 @@ struct trap_per_cpu {
 
 /* D-cache line 2 */
 	unsigned long		__pad2[4];
+
+/* Dcache lines 3 and 4 */
+	struct hv_fault_status	fault_info;
 } __attribute__((aligned(64)));
 extern struct trap_per_cpu trap_block[NR_CPUS];
 extern void init_cur_cpu_trap(void);
@@ -88,8 +93,9 @@ extern struct gl_2insn_patch_entry __gl_2insn_patch, __gl_2insn_patch_end;
 
 #define TRAP_PER_CPU_THREAD	0x00
 #define TRAP_PER_CPU_PGD_PADDR	0x08
+#define TRAP_PER_CPU_FAULT_INFO	0x20
 
-#define TRAP_BLOCK_SZ_SHIFT	6
+#define TRAP_BLOCK_SZ_SHIFT	7
 
 #include <asm/scratchpad.h>
 
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 44709cde561..7f3abc32c4d 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -53,6 +53,14 @@
  * kernel image, so we don't play these games for swapper_tsb access.
  */
 #ifndef __ASSEMBLY__
+struct tsb_ldquad_phys_patch_entry {
+	unsigned int	addr;
+	unsigned int	sun4u_insn;
+	unsigned int	sun4v_insn;
+};
+extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
+	__tsb_ldquad_phys_patch_end;
+
 struct tsb_phys_patch_entry {
 	unsigned int	addr;
 	unsigned int	insn;
@@ -61,9 +69,10 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 #endif
 #define TSB_LOAD_QUAD(TSB, REG)	\
 661:	ldda		[TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
-	.section	.tsb_phys_patch, "ax"; \
+	.section	.tsb_ldquad_phys_patch, "ax"; \
 	.word		661b; \
 	ldda		[TSB] ASI_QUAD_LDD_PHYS, REG; \
+	ldda		[TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
 	.previous
 
 #define TSB_LOAD_TAG_HIGH(TSB, REG) \
-- 
cgit v1.2.3-70-g09d2


From df7d6aec96ab98cb182dd5138a85bdc363a9bf0d Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 7 Feb 2006 00:00:16 -0800
Subject: [SPARC64]: Rename gl_{1,2}insn_patch --> sun4v_{1,2}insn_patch

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/etrap.S       |  4 ++--
 arch/sparc64/kernel/ktlb.S        |  4 ++--
 arch/sparc64/kernel/rtrap.S       |  4 ++--
 arch/sparc64/kernel/setup.c       | 16 ++++++++--------
 arch/sparc64/kernel/tsb.S         | 12 ++++++------
 arch/sparc64/kernel/vmlinux.lds.S | 12 ++++++------
 arch/sparc64/mm/ultra.S           |  4 ++--
 include/asm-sparc64/cpudata.h     | 11 +++++++----
 8 files changed, 35 insertions(+), 32 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 4d644949ad4..d8c062a1700 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -105,7 +105,7 @@ etrap_save:	save	%g2, -STACK_BIAS, %sp
 
 		/* Go to trap time globals so we can save them.  */
 661:		wrpr	%g0, ETRAP_PSTATE1, %pstate
-		.section .gl_1insn_patch, "ax"
+		.section .sun4v_1insn_patch, "ax"
 		.word	661b
 		SET_GL(0)
 		.previous
@@ -206,7 +206,7 @@ etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
 
 		wrpr	%g0, 1, %tl
 661:		nop
-		.section .gl_1insn_patch, "ax"
+		.section .sun4v_1insn_patch, "ax"
 		.word	661b
 		SET_GL(1)
 		.previous
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 2e55084a0c1..f6bb2e08964 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -67,7 +67,7 @@ kvmap_itlb_longpath:
 
 661:	rdpr	%pstate, %g5
 	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
-	.section .gl_2insn_patch, "ax"
+	.section .sun4v_2insn_patch, "ax"
 	.word	661b
 	nop
 	nop
@@ -179,7 +179,7 @@ kvmap_dtlb_longpath:
 
 661:	rdpr	%pstate, %g5
 	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
-	.section .gl_2insn_patch, "ax"
+	.section .sun4v_2insn_patch, "ax"
 	.word	661b
 	nop
 	nop
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index a2fa277da62..a55d517e76a 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -234,7 +234,7 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 
 		/* Normal globals are restored, go to trap globals.  */
 661:		wrpr			%g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
-		.section		.gl_1insn_patch, "ax"
+		.section		.sun4v_1insn_patch, "ax"
 		.word			661b
 		SET_GL(1)
 		.previous
@@ -316,7 +316,7 @@ user_rtt_fill_fixup:
 		wrpr	%g0, RTRAP_PSTATE, %pstate
 
 661:		nop
-		.section		.gl_1insn_patch, "ax"
+		.section		.sun4v_1insn_patch, "ax"
 		.word			661b
 		SET_GL(0)
 		.previous
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 40acac5b833..6d6178efd58 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -549,16 +549,16 @@ static void __init per_cpu_patch(void)
 #endif
 }
 
-static void __init gl_patch(void)
+static void __init sun4v_patch(void)
 {
-	struct gl_1insn_patch_entry *p1;
-	struct gl_2insn_patch_entry *p2;
+	struct sun4v_1insn_patch_entry *p1;
+	struct sun4v_2insn_patch_entry *p2;
 
 	if (tlb_type != hypervisor)
 		return;
 
-	p1 = &__gl_1insn_patch;
-	while (p1 < &__gl_1insn_patch_end) {
+	p1 = &__sun4v_1insn_patch;
+	while (p1 < &__sun4v_1insn_patch_end) {
 		unsigned long addr = p1->addr;
 
 		*(unsigned int *) (addr +  0) = p1->insn;
@@ -568,8 +568,8 @@ static void __init gl_patch(void)
 		p1++;
 	}
 
-	p2 = &__gl_2insn_patch;
-	while (p2 < &__gl_2insn_patch_end) {
+	p2 = &__sun4v_2insn_patch;
+	while (p2 < &__sun4v_2insn_patch_end) {
 		unsigned long addr = p2->addr;
 
 		*(unsigned int *) (addr +  0) = p2->insns[0];
@@ -606,7 +606,7 @@ void __init setup_arch(char **cmdline_p)
 	 */
 	per_cpu_patch();
 
-	gl_patch();
+	sun4v_patch();
 
 	boot_flags_init(*cmdline_p);
 
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 818bc9e9135..819a6ef9799 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -74,7 +74,7 @@ tsb_dtlb_load:
 
 661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
 	retry
-	.section	.gl_2insn_patch, "ax"
+	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	nop
 	nop
@@ -99,7 +99,7 @@ tsb_itlb_load:
 
 661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
 	retry
-	.section	.gl_2insn_patch, "ax"
+	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	nop
 	nop
@@ -130,7 +130,7 @@ tsb_do_fault:
 
 661:	rdpr		%pstate, %g5
 	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
-	.section	.gl_2insn_patch, "ax"
+	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	nop
 	nop
@@ -145,7 +145,7 @@ tsb_do_dtlb_fault:
 
 661:	mov	TLB_TAG_ACCESS, %g4
 	ldxa	[%g4] ASI_DMMU, %g5
-	.section .gl_2insn_patch, "ax"
+	.section .sun4v_2insn_patch, "ax"
 	.word	661b
 	mov	%g4, %g5
 	nop
@@ -250,7 +250,7 @@ __tsb_context_switch:
 
 661:	mov	TSB_REG, %g1
 	stxa	%o1, [%g1] ASI_DMMU
-	.section .gl_2insn_patch, "ax"
+	.section .sun4v_2insn_patch, "ax"
 	.word	661b
 	mov	SCRATCHPAD_UTSBREG1, %g1
 	stxa	%o1, [%g1] ASI_SCRATCHPAD
@@ -260,7 +260,7 @@ __tsb_context_switch:
 
 661:	stxa	%o1, [%g1] ASI_IMMU
 	membar	#Sync
-	.section .gl_2insn_patch, "ax"
+	.section .sun4v_2insn_patch, "ax"
 	.word	661b
 	nop
 	nop
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index a09a8a2383d..b097379a49a 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -80,12 +80,12 @@ SECTIONS
   __cpuid_patch = .;
   .cpuid_patch : { *(.cpuid_patch) }
   __cpuid_patch_end = .;
-  __gl_1insn_patch = .;
-  .gl_1insn_patch : { *(.gl_1insn_patch) }
-  __gl_1insn_patch_end = .;
-  __gl_2insn_patch = .;
-  .gl_2insn_patch : { *(.gl_2insn_patch) }
-  __gl_2insn_patch_end = .;
+  __sun4v_1insn_patch = .;
+  .sun4v_1insn_patch : { *(.sun4v_1insn_patch) }
+  __sun4v_1insn_patch_end = .;
+  __sun4v_2insn_patch = .;
+  .sun4v_2insn_patch : { *(.sun4v_2insn_patch) }
+  __sun4v_2insn_patch_end = .;
   . = ALIGN(8192); 
   __initramfs_start = .;
   .init.ramfs : { *(.init.ramfs) }
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 5dd86ad0d29..8c244932b1c 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -447,7 +447,7 @@ xcall_sync_tick:
 
 661:	rdpr		%pstate, %g2
 	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
-	.section	.gl_2insn_patch, "ax"
+	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	nop
 	nop
@@ -478,7 +478,7 @@ xcall_report_regs:
 
 661:	rdpr		%pstate, %g2
 	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
-	.section	.gl_2insn_patch, "ax"
+	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	nop
 	nop
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index a3dc4afc4b2..26b1dc9afbf 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -78,17 +78,20 @@ struct cpuid_patch_entry {
 extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
 #endif
 
-struct gl_1insn_patch_entry {
+struct sun4v_1insn_patch_entry {
 	unsigned int	addr;
 	unsigned int	insn;
 };
-extern struct gl_1insn_patch_entry __gl_1insn_patch, __gl_1insn_patch_end;
+extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
+	__sun4v_1insn_patch_end;
 
-struct gl_2insn_patch_entry {
+struct sun4v_2insn_patch_entry {
 	unsigned int	addr;
 	unsigned int	insns[2];
 };
-extern struct gl_2insn_patch_entry __gl_2insn_patch, __gl_2insn_patch_end;
+extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
+	__sun4v_2insn_patch_end;
+
 #endif /* !(__ASSEMBLY__) */
 
 #define TRAP_PER_CPU_THREAD	0x00
-- 
cgit v1.2.3-70-g09d2


From 89a5264f065672a882f555228000614a6b2182b7 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 7 Feb 2006 21:15:41 -0800
Subject: [SPARC64]: asm/cpudata.h needs asm/asi.h

For the expansion of __GET_CPUID() on SMP.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/cpudata.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 26b1dc9afbf..8a171ad7727 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -7,6 +7,7 @@
 #define _SPARC64_CPUDATA_H
 
 #include <asm/hypervisor.h>
+#include <asm/asi.h>
 
 #ifndef __ASSEMBLY__
 
@@ -130,9 +131,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 	lduwa		[REG] ASI_PHYS_BYPASS_EC_E, REG;\
 	/* sun4v implementation. */			\
 	mov		SCRATCHPAD_CPUID, REG;		\
-	nop;						\
 	ldxa		[REG] ASI_SCRATCHPAD, REG;	\
 	nop;						\
+	nop;						\
 	.previous;
 
 /* Clobbers TMP, current address space PGD phys address into DEST.  */
-- 
cgit v1.2.3-70-g09d2


From 481295f982b21b1dbe71cbf41d3a93028fee30d1 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 7 Feb 2006 21:51:08 -0800
Subject: [SPARC64]: Register per-cpu fault status area with sun4v hypervisor.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/smp.c     |  3 +++
 arch/sparc64/mm/init.c        | 29 +++++++++++++++++++++++++----
 include/asm-sparc64/pgtable.h |  1 +
 3 files changed, 29 insertions(+), 4 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index aba0f886b05..223cc6bd369 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -122,6 +122,9 @@ void __init smp_callin(void)
 
 	__local_per_cpu_offset = __per_cpu_offset(cpuid);
 
+	if (tlb_type == hypervisor)
+		sun4v_register_fault_status();
+
 	__flush_tlb_all();
 
 	smp_setup_percpu_timer();
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index e9aac424877..4c95cf34075 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -40,6 +40,7 @@
 #include <asm/spitfire.h>
 #include <asm/sections.h>
 #include <asm/tsb.h>
+#include <asm/hypervisor.h>
 
 extern void device_scan(void);
 
@@ -1083,6 +1084,24 @@ static void __init tsb_phys_patch(void)
 	}
 }
 
+/* Register this cpu's fault status area with the hypervisor.  */
+void __cpuinit sun4v_register_fault_status(void)
+{
+	register unsigned long arg0 asm("%o0");
+	register unsigned long arg1 asm("%o1");
+	int cpu = hard_smp_processor_id();
+	struct trap_per_cpu *tb = &trap_block[cpu];
+	unsigned long pa;
+
+	pa = kern_base + ((unsigned long) tb - KERNBASE);
+	arg0 = HV_FAST_MMU_FAULT_AREA_CONF;
+	arg1 = pa;
+	__asm__ __volatile__("ta	%4"
+			     : "=&r" (arg0), "=&r" (arg1)
+			     : "0" (arg0), "1" (arg1),
+			       "i" (HV_FAST_TRAP));
+}
+
 /* paging_init() sets up the page tables */
 
 extern void cheetah_ecache_flush_init(void);
@@ -1096,12 +1115,17 @@ void __init paging_init(void)
 	unsigned long end_pfn, pages_avail, shift;
 	unsigned long real_end, i;
 
+	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+
 	if (tlb_type == cheetah_plus ||
 	    tlb_type == hypervisor)
 		tsb_phys_patch();
 
-	if (tlb_type == hypervisor)
+	if (tlb_type == hypervisor) {
 		sun4v_patch_tlb_handlers();
+		sun4v_register_fault_status();
+	}
 
 	/* Find available physical memory... */
 	read_obp_memory("available", &pavail[0], &pavail_ents);
@@ -1112,9 +1136,6 @@ void __init paging_init(void)
 
 	pfn_base = phys_base >> PAGE_SHIFT;
 
-	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
-	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
-
 	set_bit(0, mmu_context_bmap);
 
 	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 2b2ecd6104d..c42c06a37d1 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -437,6 +437,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
 #define HAVE_ARCH_FB_UNMAPPED_AREA
 
 extern void pgtable_cache_init(void);
+extern void sun4v_register_fault_status(void);
 
 #endif /* !(__ASSEMBLY__) */
 
-- 
cgit v1.2.3-70-g09d2


From 8b11bd12aff76e02cdc2cbc9e439bba88d281223 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 7 Feb 2006 22:13:05 -0800
Subject: [SPARC64]: Patch up mmu context register writes for sun4v.

sun4v uses ASI_MMU instead of ASI_DMMU

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S       | 80 ++++++++++++++++++++++++++++++++++-----
 arch/sparc64/kernel/etrap.S       |  8 +++-
 arch/sparc64/kernel/head.S        | 33 ++++++++++++----
 arch/sparc64/kernel/rtrap.S       | 24 ++++++++++--
 arch/sparc64/kernel/setup.c       | 30 ++++++++-------
 arch/sparc64/kernel/trampoline.S  | 32 ++++++++++++----
 arch/sparc64/mm/init.c            |  9 -----
 arch/sparc64/prom/p1275.c         | 11 ------
 include/asm-sparc64/mmu_context.h | 15 +++++---
 9 files changed, 176 insertions(+), 66 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 4ca3ea0beaf..f51b66a1687 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -97,10 +97,22 @@ do_fpdis:
 	add		%g6, TI_FPREGS + 0x80, %g1
 	faddd		%f0, %f2, %f4
 	fmuld		%f0, %f2, %f6
-	ldxa		[%g3] ASI_DMMU, %g5
+
+661:	ldxa		[%g3] ASI_DMMU, %g5
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	ldxa		[%g3] ASI_MMU, %g5
+	.previous
+
 	sethi		%hi(sparc64_kern_sec_context), %g2
 	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
-	stxa		%g2, [%g3] ASI_DMMU
+
+661:	stxa		%g2, [%g3] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g2, [%g3] ASI_MMU
+	.previous
+
 	membar		#Sync
 	add		%g6, TI_FPREGS + 0xc0, %g2
 	faddd		%f0, %f2, %f8
@@ -126,11 +138,23 @@ do_fpdis:
 	 fzero		%f32
 	mov		SECONDARY_CONTEXT, %g3
 	fzero		%f34
-	ldxa		[%g3] ASI_DMMU, %g5
+
+661:	ldxa		[%g3] ASI_DMMU, %g5
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	ldxa		[%g3] ASI_MMU, %g5
+	.previous
+
 	add		%g6, TI_FPREGS, %g1
 	sethi		%hi(sparc64_kern_sec_context), %g2
 	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
-	stxa		%g2, [%g3] ASI_DMMU
+
+661:	stxa		%g2, [%g3] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g2, [%g3] ASI_MMU
+	.previous
+
 	membar		#Sync
 	add		%g6, TI_FPREGS + 0x40, %g2
 	faddd		%f32, %f34, %f36
@@ -155,10 +179,22 @@ do_fpdis:
 	 nop
 3:	mov		SECONDARY_CONTEXT, %g3
 	add		%g6, TI_FPREGS, %g1
-	ldxa		[%g3] ASI_DMMU, %g5
+
+661:	ldxa		[%g3] ASI_DMMU, %g5
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	ldxa		[%g3] ASI_MMU, %g5
+	.previous
+
 	sethi		%hi(sparc64_kern_sec_context), %g2
 	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
-	stxa		%g2, [%g3] ASI_DMMU
+
+661:	stxa		%g2, [%g3] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g2, [%g3] ASI_MMU
+	.previous
+
 	membar		#Sync
 	mov		0x40, %g2
 	membar		#Sync
@@ -169,7 +205,13 @@ do_fpdis:
 	ldda		[%g1 + %g2] ASI_BLK_S, %f48
 	membar		#Sync
 fpdis_exit:
-	stxa		%g5, [%g3] ASI_DMMU
+
+661:	stxa		%g5, [%g3] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g5, [%g3] ASI_MMU
+	.previous
+
 	membar		#Sync
 fpdis_exit2:
 	wr		%g7, 0, %gsr
@@ -323,10 +365,22 @@ do_fptrap_after_fsr:
 	rd		%gsr, %g3
 	stx		%g3, [%g6 + TI_GSR]
 	mov		SECONDARY_CONTEXT, %g3
-	ldxa		[%g3] ASI_DMMU, %g5
+
+661:	ldxa		[%g3] ASI_DMMU, %g5
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	ldxa		[%g3] ASI_MMU, %g5
+	.previous
+
 	sethi		%hi(sparc64_kern_sec_context), %g2
 	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
-	stxa		%g2, [%g3] ASI_DMMU
+
+661:	stxa		%g2, [%g3] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g2, [%g3] ASI_MMU
+	.previous
+
 	membar		#Sync
 	add		%g6, TI_FPREGS, %g2
 	andcc		%g1, FPRS_DL, %g0
@@ -341,7 +395,13 @@ do_fptrap_after_fsr:
 	stda		%f48, [%g2 + %g3] ASI_BLK_S
 5:	mov		SECONDARY_CONTEXT, %g1
 	membar		#Sync
-	stxa		%g5, [%g1] ASI_DMMU
+
+661:	stxa		%g5, [%g1] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g5, [%g1] ASI_MMU
+	.previous
+
 	membar		#Sync
 	ba,pt		%xcc, etrap
 	 wr		%g0, 0, %fprs
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index d8c062a1700..a0e7d480e5d 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -95,7 +95,13 @@ etrap_save:	save	%g2, -STACK_BIAS, %sp
 		wrpr	%g2, 0, %wstate
 		sethi	%hi(sparc64_kern_pri_context), %g2
 		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g3
-		stxa	%g3, [%l4] ASI_DMMU
+
+661:		stxa	%g3, [%l4] ASI_DMMU
+		.section .sun4v_1insn_patch, "ax"
+		.word	661b
+		stxa	%g3, [%l4] ASI_MMU
+		.previous
+
 		sethi	%hi(KERNBASE), %l4
 		flush	%l4
 		mov	ASI_AIUS, %l7
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index f04f7391f23..a304845f8c5 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -303,12 +303,24 @@ jump_to_sun4u_init:
 
 sun4u_init:
 	/* Set ctx 0 */
-	mov	PRIMARY_CONTEXT, %g7
-	stxa	%g0, [%g7] ASI_DMMU
-	membar	#Sync
+	mov		PRIMARY_CONTEXT, %g7
+
+661:	stxa		%g0, [%g7] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g0, [%g7] ASI_MMU
+	.previous
+
+	membar		#Sync
+
+	mov		SECONDARY_CONTEXT, %g7
+
+661:	stxa		%g0, [%g7] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g0, [%g7] ASI_MMU
+	.previous
 
-	mov	SECONDARY_CONTEXT, %g7
-	stxa	%g0, [%g7] ASI_DMMU
 	membar	#Sync
 
 	BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
@@ -436,8 +448,15 @@ setup_trap_table:
 	/* Start using proper page size encodings in ctx register.  */
 	sethi	%hi(sparc64_kern_pri_context), %g3
 	ldx	[%g3 + %lo(sparc64_kern_pri_context)], %g2
-	mov	PRIMARY_CONTEXT, %g1
-	stxa	%g2, [%g1] ASI_DMMU
+
+	mov		PRIMARY_CONTEXT, %g1
+
+661:	stxa		%g2, [%g1] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g2, [%g1] ASI_MMU
+	.previous
+
 	membar	#Sync
 
 	/* Kill PROM timer */
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index a55d517e76a..551f7198200 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -264,11 +264,23 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 
 		brnz,pn			%l3, kern_rtt
 		 mov			PRIMARY_CONTEXT, %l7
-		ldxa			[%l7 + %l7] ASI_DMMU, %l0
+
+661:		ldxa			[%l7 + %l7] ASI_DMMU, %l0
+		.section		.sun4v_1insn_patch, "ax"
+		.word			661b
+		ldxa			[%l7 + %l7] ASI_MMU, %l0
+		.previous
+
 		sethi			%hi(sparc64_kern_pri_nuc_bits), %l1
 		ldx			[%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
 		or			%l0, %l1, %l0
-		stxa			%l0, [%l7] ASI_DMMU
+
+661:		stxa			%l0, [%l7] ASI_DMMU
+		.section		.sun4v_1insn_patch, "ax"
+		.word			661b
+		stxa			%l0, [%l7] ASI_MMU
+		.previous
+
 		sethi			%hi(KERNBASE), %l7
 		flush			%l7
 		rdpr			%wstate, %l1
@@ -303,7 +315,13 @@ user_rtt_fill_fixup:
 		sethi	%hi(sparc64_kern_pri_context), %g2
 		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
 		mov	PRIMARY_CONTEXT, %g1
-		stxa	%g2, [%g1] ASI_DMMU
+
+661:		stxa	%g2, [%g1] ASI_DMMU
+		.section .sun4v_1insn_patch, "ax"
+		.word	661b
+		stxa	%g2, [%g1] ASI_MMU
+		.previous
+
 		sethi	%hi(KERNBASE), %g1
 		flush	%g1
 
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 6d6178efd58..2d64320d3a4 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -189,26 +189,30 @@ int prom_callback(long *args)
 		}
 
 		if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
-			extern unsigned long sparc64_kern_pri_context;
-
-			/* Spitfire Errata #32 workaround */
-			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
-					     "flush	%%g6"
-					     : /* No outputs */
-					     : "r" (sparc64_kern_pri_context),
-					       "r" (PRIMARY_CONTEXT),
-					       "i" (ASI_DMMU));
+			if (tlb_type == spitfire) {
+				extern unsigned long sparc64_kern_pri_context;
+
+				/* Spitfire Errata #32 workaround */
+				__asm__ __volatile__(
+					"stxa 	%0, [%1] %2\n\t"
+					"flush	%%g6"
+					: /* No outputs */
+					: "r" (sparc64_kern_pri_context),
+					  "r" (PRIMARY_CONTEXT),
+					  "i" (ASI_DMMU));
+			}
 
 			/*
 			 * Locked down tlb entry.
 			 */
 
-			if (tlb_type == spitfire)
+			if (tlb_type == spitfire) {
 				tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
-			else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+				res = PROM_TRUE;
+			} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 				tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
-
-			res = PROM_TRUE;
+				res = PROM_TRUE;
+			}
 			goto done;
 		}
 
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 18c333f841e..d9e2af35158 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -272,10 +272,22 @@ do_unlock:
 	wr		%g0, ASI_P, %asi
 
 	mov		PRIMARY_CONTEXT, %g7
-	stxa		%g0, [%g7] ASI_DMMU
+
+661:	stxa		%g0, [%g7] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g0, [%g7] ASI_MMU
+	.previous
+
 	membar		#Sync
 	mov		SECONDARY_CONTEXT, %g7
-	stxa		%g0, [%g7] ASI_DMMU
+
+661:	stxa		%g0, [%g7] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g0, [%g7] ASI_MMU
+	.previous
+
 	membar		#Sync
 
 	mov		1, %g5
@@ -301,11 +313,17 @@ do_unlock:
 	 nop
 
 	/* Start using proper page size encodings in ctx register.  */
-	sethi	%hi(sparc64_kern_pri_context), %g3
-	ldx	[%g3 + %lo(sparc64_kern_pri_context)], %g2
-	mov	PRIMARY_CONTEXT, %g1
-	stxa	%g2, [%g1] ASI_DMMU
-	membar	#Sync
+	sethi		%hi(sparc64_kern_pri_context), %g3
+	ldx		[%g3 + %lo(sparc64_kern_pri_context)], %g2
+	mov		PRIMARY_CONTEXT, %g1
+
+661:	stxa		%g2, [%g1] ASI_DMMU
+	.section	.sun4v_1insn_patch, "ax"
+	.word		661b
+	stxa		%g2, [%g1] ASI_MMU
+	.previous
+
+	membar		#Sync
 
 	rdpr		%pstate, %o1
 	or		%o1, PSTATE_IE, %o1
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4c95cf34075..6504d6eb537 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -792,15 +792,6 @@ void sparc_ultra_dump_dtlb(void)
 	}
 }
 
-static inline void spitfire_errata32(void)
-{
-	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
-			     "flush	%%g6"
-			     : /* No outputs */
-			     : "r" (0),
-			       "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-}
-
 extern unsigned long cmdline_memory_size;
 
 unsigned long __init bootmem_init(unsigned long *pages_avail)
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
index a5a7c571202..2b32c489860 100644
--- a/arch/sparc64/prom/p1275.c
+++ b/arch/sparc64/prom/p1275.c
@@ -30,16 +30,6 @@ extern void prom_world(int);
 extern void prom_cif_interface(void);
 extern void prom_cif_callback(void);
 
-static inline unsigned long spitfire_get_primary_context(void)
-{
-	unsigned long ctx;
-
-	__asm__ __volatile__("ldxa	[%1] %2, %0"
-			     : "=r" (ctx)
-			     : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-	return ctx;
-}
-
 /*
  * This provides SMP safety on the p1275buf. prom_callback() drops this lock
  * to allow recursuve acquisition.
@@ -55,7 +45,6 @@ long p1275_cmd(const char *service, long fmt, ...)
 	long attrs, x;
 	
 	p = p1275buf.prom_buffer;
-	BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0);
 
 	spin_lock_irqsave(&prom_entry_lock, flags);
 
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 1d232678821..2760353591a 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -41,11 +41,16 @@ extern void smp_tsb_sync(struct mm_struct *mm);
 
 /* Set MMU context in the actual hardware. */
 #define load_secondary_context(__mm) \
-	__asm__ __volatile__("stxa	%0, [%1] %2\n\t" \
-			     "flush	%%g6" \
-			     : /* No outputs */ \
-			     : "r" (CTX_HWBITS((__mm)->context)), \
-			       "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU))
+	__asm__ __volatile__( \
+	"\n661:	stxa		%0, [%1] %2\n" \
+	"	.section	.sun4v_1insn_patch, \"ax\"\n" \
+	"	.word		661b\n" \
+	"	stxa		%0, [%1] %3\n" \
+	"	.previous\n" \
+	"	flush		%%g6\n" \
+	: /* No outputs */ \
+	: "r" (CTX_HWBITS((__mm)->context)), \
+	  "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
 
 extern void __flush_tlb_mm(unsigned long, unsigned long);
 
-- 
cgit v1.2.3-70-g09d2


From 3bfd6f3e77f58479ec53aa91d0b078abbb4c0868 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 7 Feb 2006 22:49:38 -0800
Subject: [SPARC64]: Fix some comment typos in asm/hypervisor.h

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/hypervisor.h | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index 9c8e453abe9..b4e0d52acd5 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -272,17 +272,19 @@
  *		EBADALIGN	Base real address is not correctly aligned
  *				for size.
  *
- * Configure the given queue to be placed at the givem base real
+ * Configure the given queue to be placed at the given base real
  * address, with the given number of entries.  The number of entries
  * must be a power of 2.  The base real address must be aligned
  * exactly to match the queue size.  Each queue entry is 64 bytes
  * long, so for example a 32 entry queue must be aligned on a 2048
  * byte real address boundary.
  *
- * The specified queue is unconfigured is number of entries is given as zero.
+ * The specified queue is unconfigured if the number of entries is given
+ * as zero.
  *
  * For the current version of this API service, the argument queue is defined
  * as follows:
+ *
  *	queue		description
  *	-----		-------------------------
  *	0x3c		cpu mondo queue
-- 
cgit v1.2.3-70-g09d2


From 7202c55c5c57d2ad4611a751544c9368d7fba93a Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 7 Feb 2006 22:53:56 -0800
Subject: [SPARC64]: Add sun4v mondo queue bases to struct trap_per_cpu.

Also, correct TRAP_PER_CPU_FAULT_INFO define, it should be
0x40 not 0x20.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/cpudata.h | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 8a171ad7727..492314b5347 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -53,15 +53,18 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
  */
 struct thread_info;
 struct trap_per_cpu {
-/* D-cache line 1 */
+/* D-cache line 1: Basic thread information */
 	struct thread_info	*thread;
 	unsigned long		pgd_paddr;
 	unsigned long		__pad1[2];
 
-/* D-cache line 2 */
-	unsigned long		__pad2[4];
+/* D-cache line 2: Sun4V Mondo Queue pointers */
+	unsigned long		cpu_mondo_pa;
+	unsigned long		dev_mondo_pa;
+	unsigned long		resum_mondo_pa;
+	unsigned long		nonresum_mondo_pa;
 
-/* Dcache lines 3 and 4 */
+/* Dcache lines 3 and 4: Hypervisor Fault Status */
 	struct hv_fault_status	fault_info;
 } __attribute__((aligned(64)));
 extern struct trap_per_cpu trap_block[NR_CPUS];
@@ -95,11 +98,15 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 
 #endif /* !(__ASSEMBLY__) */
 
-#define TRAP_PER_CPU_THREAD	0x00
-#define TRAP_PER_CPU_PGD_PADDR	0x08
-#define TRAP_PER_CPU_FAULT_INFO	0x20
+#define TRAP_PER_CPU_THREAD		0x00
+#define TRAP_PER_CPU_PGD_PADDR		0x08
+#define TRAP_PER_CPU_CPU_MONDO_PA	0x20
+#define TRAP_PER_CPU_DEV_MONDO_PA	0x28
+#define TRAP_PER_CPU_RESUM_MONDO_PA	0x30
+#define TRAP_PER_CPU_NONRESUM_MONDO_PA	0x38
+#define TRAP_PER_CPU_FAULT_INFO		0x40
 
-#define TRAP_BLOCK_SZ_SHIFT	7
+#define TRAP_BLOCK_SZ_SHIFT		7
 
 #include <asm/scratchpad.h>
 
-- 
cgit v1.2.3-70-g09d2


From 5b0c0572fcd6204675c5f7ddfa572b5017f817dd Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 8 Feb 2006 02:53:50 -0800
Subject: [SPARC64]: Sun4v interrupt handling.

Sun4v has 4 interrupt queues: cpu, device, resumable errors,
and non-resumable errors.  A set of head/tail offset pointers
help maintain a work queue in physical memory.  The entries
are 64-bytes in size.

Each queue is allocated then registered with the hypervisor
as we bring cpus up.

The two error queues each get a kernel side buffer that we
use to quickly empty the main interrupt queue before we
call up to C code to log the event and possibly take evasive
action.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/head.S       |   3 +-
 arch/sparc64/kernel/irq.c        |  16 +-
 arch/sparc64/kernel/sun4v_ivec.S | 349 +++++++++++++++++++++++++++++++++++++++
 arch/sparc64/kernel/traps.c      | 184 +++++++++++++++++++++
 arch/sparc64/kernel/ttable.S     |   5 +-
 include/asm-sparc64/cpudata.h    |  22 ++-
 6 files changed, 568 insertions(+), 11 deletions(-)
 create mode 100644 arch/sparc64/kernel/sun4v_ivec.S

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index a304845f8c5..01980014aea 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -511,13 +511,14 @@ setup_tba:
 sparc64_boot_end:
 
 #include "systbls.S"
-#include "sun4v_tlb_miss.S"
 #include "ktlb.S"
 #include "tsb.S"
 #include "etrap.S"
 #include "rtrap.S"
 #include "winfixup.S"
 #include "entry.S"
+#include "sun4v_tlb_miss.S"
+#include "sun4v_ivec.S"
 
 /*
  * The following skip makes sure the trap table in ttable.S is aligned
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3c1a2139f1b..ff201c007e0 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -888,7 +888,19 @@ static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type)
 	}
 }
 
-/* Allocate and init the mondo queues for this cpu.  */
+static void __cpuinit init_one_kbuf(unsigned long *pa_ptr)
+{
+	unsigned long page = get_zeroed_page(GFP_ATOMIC);
+
+	if (!page) {
+		prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
+		prom_halt();
+	}
+
+	*pa_ptr = __pa(page);
+}
+
+/* Allocate and init the mondo and error queues for this cpu.  */
 void __cpuinit sun4v_init_mondo_queues(void)
 {
 	int cpu = hard_smp_processor_id();
@@ -897,7 +909,9 @@ void __cpuinit sun4v_init_mondo_queues(void)
 	init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
 	init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
 	init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
+	init_one_kbuf(&tb->resum_kernel_buf_pa);
 	init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
+	init_one_kbuf(&tb->nonresum_kernel_buf_pa);
 }
 
 /* Only invoked on boot processor. */
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
new file mode 100644
index 00000000000..d9d442017d3
--- /dev/null
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -0,0 +1,349 @@
+/* sun4v_ivec.S: Sun4v interrupt vector handling.
+ *
+ * Copyright (C) 2006 <davem@davemloft.net>
+ */
+
+#include <asm/cpudata.h>
+#include <asm/intr_queue.h>
+
+	.text
+	.align	32
+
+sun4v_cpu_mondo:
+	/* Head offset in %g2, tail offset in %g4.
+	 * If they are the same, no work.
+	 */
+	mov	INTRQ_CPU_MONDO_HEAD, %g2
+	ldxa	[%g2] ASI_QUEUE, %g2
+	mov	INTRQ_CPU_MONDO_TAIL, %g4
+	ldxa	[%g4] ASI_QUEUE, %g4
+	cmp	%g2, %g4
+	be,pn	%xcc, sun4v_cpu_mondo_queue_empty
+	 nop
+
+	/* Get &trap_block[smp_processor_id()] into %g3.  */
+	__GET_CPUID(%g1)
+	sethi	%hi(trap_block), %g3
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
+	or	%g3, %lo(trap_block), %g3
+	add	%g3, %g7, %g3
+
+	/* Get CPU mondo queue base phys address into %g7.  */
+	ldx	[%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
+
+	/* Now get the cross-call arguments and handler PC, same
+	 * layout as sun4u:
+	 *
+	 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
+	 *                  high half is context arg to MMU flushes, into %g5
+	 * 2nd 64-bit word: 64-bit arg, load into %g1
+	 * 3rd 64-bit word: 64-bit arg, load into %g7
+	 */
+	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g3
+	add	%g2, 0x8, %g2
+	srlx	%g3, 32, %g5
+	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
+	add	%g2, 0x8, %g2
+	srl	%g3, 0, %g3
+	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g7
+	add	%g2, 0x40 - 0x8 - 0x8, %g2
+
+	/* Update queue head pointer.  */
+	sethi	%hi(8192 - 1), %g4
+	or	%g4, %lo(8192 - 1), %g4
+	and	%g2, %g4, %g2
+
+	mov	INTRQ_CPU_MONDO_HEAD, %g4
+	stxa	%g2, [%g4] ASI_QUEUE
+	membar	#Sync
+
+	jmpl	%g3, %g0
+	 nop
+
+sun4v_cpu_mondo_queue_empty:
+	retry
+
+sun4v_dev_mondo:
+	/* Head offset in %g2, tail offset in %g4.  */
+	mov	INTRQ_DEVICE_MONDO_HEAD, %g2
+	ldxa	[%g2] ASI_QUEUE, %g2
+	mov	INTRQ_DEVICE_MONDO_TAIL, %g4
+	ldxa	[%g4] ASI_QUEUE, %g4
+	cmp	%g2, %g4
+	be,pn	%xcc, sun4v_dev_mondo_queue_empty
+	 nop
+
+	/* Get &trap_block[smp_processor_id()] into %g3.  */
+	__GET_CPUID(%g1)
+	sethi	%hi(trap_block), %g3
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
+	or	%g3, %lo(trap_block), %g3
+	add	%g3, %g7, %g3
+
+	/* Get DEV mondo queue base phys address into %g5.  */
+	ldx	[%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
+
+	/* Load IVEC into %g3.  */
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	add	%g2, 0x40, %g2
+
+	/* XXX There can be a full 64-byte block of data here.
+	 * XXX This is how we can get at MSI vector data.
+	 * XXX Current we do not capture this, but when we do we'll
+	 * XXX need to add a 64-byte storage area in the struct ino_bucket
+	 * XXX or the struct irq_desc.
+	 */
+
+	/* Update queue head pointer, this frees up some registers.  */
+	sethi	%hi(8192 - 1), %g4
+	or	%g4, %lo(8192 - 1), %g4
+	and	%g2, %g4, %g2
+
+	mov	INTRQ_DEVICE_MONDO_HEAD, %g4
+	stxa	%g2, [%g4] ASI_QUEUE
+	membar	#Sync
+
+	/* Get &__irq_work[smp_processor_id()] into %g1.  */
+	sethi	%hi(__irq_work), %g4
+	sllx	%g1, 6, %g1
+	or	%g4, %lo(__irq_work), %g4
+	add	%g4, %g1, %g1
+
+	/* Get &ivector_table[IVEC] into %g4.  */
+	sethi	%hi(ivector_table), %g4
+	sllx	%g3, 5, %g3
+	or	%g4, %lo(ivector_table), %g4
+	add	%g4, %g3, %g4
+
+	/* Load IRQ %pil into %g5.  */
+	ldub	[%g4 + 0x04], %g5
+
+	/* Insert ivector_table[] entry into __irq_work[] queue.  */
+	sllx	%g5, 2, %g3
+	lduw	[%g1 + %g3], %g2	/* g2 = irq_work(cpu, pil) */
+	stw	%g2, [%g4 + 0x00]	/* bucket->irq_chain = g2 */
+	stw	%g4, [%g1 + %g3]	/* irq_work(cpu, pil) = bucket */
+
+	/* Signal the interrupt by setting (1 << pil) in %softint.  */
+	mov	1, %g2
+	sllx	%g2, %g5, %g2
+	wr	%g2, 0x0, %set_softint
+
+sun4v_dev_mondo_queue_empty:
+	retry
+
+sun4v_res_mondo:
+	/* Head offset in %g2, tail offset in %g4.  */
+	mov	INTRQ_RESUM_MONDO_HEAD, %g2
+	ldxa	[%g2] ASI_QUEUE, %g2
+	mov	INTRQ_RESUM_MONDO_TAIL, %g4
+	ldxa	[%g4] ASI_QUEUE, %g4
+	cmp	%g2, %g4
+	be,pn	%xcc, sun4v_res_mondo_queue_empty
+	 nop
+
+	/* Get &trap_block[smp_processor_id()] into %g3.  */
+	__GET_CPUID(%g1)
+	sethi	%hi(trap_block), %g3
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
+	or	%g3, %lo(trap_block), %g3
+	add	%g3, %g7, %g3
+
+	/* Get RES mondo queue base phys address into %g5.  */
+	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
+
+	/* Get RES kernel buffer base phys address into %g7.  */
+	ldx	[%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
+
+	/* If the first word is non-zero, queue is full.  */
+	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
+	brnz,pn	%g1, sun4v_res_mondo_queue_full
+	 nop
+
+	/* Remember this entry's offset in %g1.  */
+	mov	%g2, %g1
+
+	/* Copy 64-byte queue entry into kernel buffer.  */
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+
+	/* Update queue head pointer.  */
+	sethi	%hi(8192 - 1), %g4
+	or	%g4, %lo(8192 - 1), %g4
+	and	%g2, %g4, %g2
+
+	mov	INTRQ_RESUM_MONDO_HEAD, %g4
+	stxa	%g2, [%g4] ASI_QUEUE
+	membar	#Sync
+
+	/* Disable interrupts and save register state so we can call
+	 * C code.  The etrap handling will leave %g4 in %l4 for us
+	 * when it's done.
+	 */
+	rdpr	%pil, %g2
+	wrpr	%g0, 15, %pil
+	mov	%g1, %g4
+	ba,pt	%xcc, etrap_irq
+	 rd	%pc, %g7
+
+	/* Log the event.  */
+	add	%sp, PTREGS_OFF, %o0
+	call	sun4v_resum_error
+	 mov	%l4, %o1
+
+	/* Return from trap.  */
+	ba,pt	%xcc, rtrap_irq
+	 nop
+
+sun4v_res_mondo_queue_empty:
+	retry
+
+sun4v_res_mondo_queue_full:
+	/* The queue is full, consolidate our damage by setting
+	 * the head equal to the tail.  We'll just trap again otherwise.
+	 * Call C code to log the event.
+	 */
+	mov	INTRQ_RESUM_MONDO_HEAD, %g2
+	stxa	%g4, [%g2] ASI_QUEUE
+	membar	#Sync
+
+	rdpr	%pil, %g2
+	wrpr	%g0, 15, %pil
+	ba,pt	%xcc, etrap_irq
+	 rd	%pc, %g7
+
+	call	sun4v_resum_overflow
+	 add	%sp, PTREGS_OFF, %o0
+
+	ba,pt	%xcc, rtrap_irq
+	 nop
+
+sun4v_nonres_mondo:
+	/* Head offset in %g2, tail offset in %g4.  */
+	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
+	ldxa	[%g2] ASI_QUEUE, %g2
+	mov	INTRQ_NONRESUM_MONDO_TAIL, %g4
+	ldxa	[%g4] ASI_QUEUE, %g4
+	cmp	%g2, %g4
+	be,pn	%xcc, sun4v_nonres_mondo_queue_empty
+	 nop
+
+	/* Get &trap_block[smp_processor_id()] into %g3.  */
+	__GET_CPUID(%g1)
+	sethi	%hi(trap_block), %g3
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
+	or	%g3, %lo(trap_block), %g3
+	add	%g3, %g7, %g3
+
+	/* Get RES mondo queue base phys address into %g5.  */
+	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
+
+	/* Get RES kernel buffer base phys address into %g7.  */
+	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
+
+	/* If the first word is non-zero, queue is full.  */
+	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
+	brnz,pn	%g1, sun4v_nonres_mondo_queue_full
+	 nop
+
+	/* Remember this entry's offset in %g1.  */
+	mov	%g2, %g1
+
+	/* Copy 64-byte queue entry into kernel buffer.  */
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
+	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
+	add	%g2, 0x08, %g2
+
+	/* Update queue head pointer.  */
+	sethi	%hi(8192 - 1), %g4
+	or	%g4, %lo(8192 - 1), %g4
+	and	%g2, %g4, %g2
+
+	mov	INTRQ_NONRESUM_MONDO_HEAD, %g4
+	stxa	%g2, [%g4] ASI_QUEUE
+	membar	#Sync
+
+	/* Disable interrupts and save register state so we can call
+	 * C code.  The etrap handling will leave %g4 in %l4 for us
+	 * when it's done.
+	 */
+	rdpr	%pil, %g2
+	wrpr	%g0, 15, %pil
+	mov	%g1, %g4
+	ba,pt	%xcc, etrap_irq
+	 rd	%pc, %g7
+
+	/* Log the event.  */
+	add	%sp, PTREGS_OFF, %o0
+	call	sun4v_nonresum_error
+	 mov	%l4, %o1
+
+	/* Return from trap.  */
+	ba,pt	%xcc, rtrap_irq
+	 nop
+
+sun4v_nonres_mondo_queue_empty:
+	retry
+
+sun4v_nonres_mondo_queue_full:
+	/* The queue is full, consolidate our damage by setting
+	 * the head equal to the tail.  We'll just trap again otherwise.
+	 * Call C code to log the event.
+	 */
+	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
+	stxa	%g4, [%g2] ASI_QUEUE
+	membar	#Sync
+
+	rdpr	%pil, %g2
+	wrpr	%g0, 15, %pil
+	ba,pt	%xcc, etrap_irq
+	 rd	%pc, %g7
+
+	call	sun4v_nonresum_overflow
+	 add	%sp, PTREGS_OFF, %o0
+
+	ba,pt	%xcc, rtrap_irq
+	 nop
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 8f3fce24359..5417ff1b934 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1668,6 +1668,186 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
 	       regs->tpc);
 }
 
+struct sun4v_error_entry {
+	u64		err_handle;
+	u64		err_stick;
+
+	u32		err_type;
+#define SUN4V_ERR_TYPE_UNDEFINED	0
+#define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
+#define SUN4V_ERR_TYPE_PRECISE_NONRES	2
+#define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
+#define SUN4V_ERR_TYPE_WARNING_RES	4
+
+	u32		err_attrs;
+#define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
+#define SUN4V_ERR_ATTRS_MEMORY		0x00000002
+#define SUN4V_ERR_ATTRS_PIO		0x00000004
+#define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
+#define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
+#define SUN4V_ERR_ATTRS_USER_MODE	0x01000000
+#define SUN4V_ERR_ATTRS_PRIV_MODE	0x02000000
+#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
+
+	u64		err_raddr;
+	u32		err_size;
+	u16		err_cpu;
+	u16		err_pad;
+};
+
+static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+
+static const char *sun4v_err_type_to_str(u32 type)
+{
+	switch (type) {
+	case SUN4V_ERR_TYPE_UNDEFINED:
+		return "undefined";
+	case SUN4V_ERR_TYPE_UNCORRECTED_RES:
+		return "uncorrected resumable";
+	case SUN4V_ERR_TYPE_PRECISE_NONRES:
+		return "precise nonresumable";
+	case SUN4V_ERR_TYPE_DEFERRED_NONRES:
+		return "deferred nonresumable";
+	case SUN4V_ERR_TYPE_WARNING_RES:
+		return "warning resumable";
+	default:
+		return "unknown";
+	};
+}
+
+static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
+{
+	int cnt;
+
+	printk("%s: Reporting on cpu %d\n", pfx, cpu);
+	printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
+	       pfx,
+	       ent->err_handle, ent->err_stick,
+	       ent->err_type,
+	       sun4v_err_type_to_str(ent->err_type));
+	printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
+	       pfx,
+	       ent->err_attrs,
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
+		"processor" : ""),
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
+		"memory" : ""),
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
+		"pio" : ""),
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
+		"integer-regs" : ""),
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
+		"fpu-regs" : ""),
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
+		"user" : ""),
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
+		"privileged" : ""),
+	       ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
+		"queue-full" : ""));
+	printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
+	       pfx,
+	       ent->err_raddr, ent->err_size, ent->err_cpu);
+
+	if ((cnt = atomic_read(ocnt)) != 0) {
+		atomic_set(ocnt, 0);
+		wmb();
+		printk("%s: Queue overflowed %d times.\n",
+		       pfx, cnt);
+	}
+}
+
+/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
+ * Log the event and clear the first word of the entry.
+ */
+void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
+{
+	struct sun4v_error_entry *ent, local_copy;
+	struct trap_per_cpu *tb;
+	unsigned long paddr;
+	int cpu;
+
+	cpu = get_cpu();
+
+	tb = &trap_block[cpu];
+	paddr = tb->resum_kernel_buf_pa + offset;
+	ent = __va(paddr);
+
+	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
+
+	/* We have a local copy now, so release the entry.  */
+	ent->err_handle = 0;
+	wmb();
+
+	put_cpu();
+
+	sun4v_log_error(&local_copy, cpu,
+			KERN_ERR "RESUMABLE ERROR",
+			&sun4v_resum_oflow_cnt);
+}
+
+/* If we try to printk() we'll probably make matters worse, by trying
+ * to retake locks this cpu already holds or causing more errors. So
+ * just bump a counter, and we'll report these counter bumps above.
+ */
+void sun4v_resum_overflow(struct pt_regs *regs)
+{
+	atomic_inc(&sun4v_resum_oflow_cnt);
+}
+
+/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
+ * Log the event, clear the first word of the entry, and die.
+ */
+void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
+{
+	struct sun4v_error_entry *ent, local_copy;
+	struct trap_per_cpu *tb;
+	unsigned long paddr;
+	int cpu;
+
+	cpu = get_cpu();
+
+	tb = &trap_block[cpu];
+	paddr = tb->nonresum_kernel_buf_pa + offset;
+	ent = __va(paddr);
+
+	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
+
+	/* We have a local copy now, so release the entry.  */
+	ent->err_handle = 0;
+	wmb();
+
+	put_cpu();
+
+#ifdef CONFIG_PCI
+	/* Check for the special PCI poke sequence. */
+	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
+		pci_poke_faulted = 1;
+		regs->tpc += 4;
+		regs->tnpc = regs->tpc + 4;
+		return;
+	}
+#endif
+
+	sun4v_log_error(&local_copy, cpu,
+			KERN_EMERG "NON-RESUMABLE ERROR",
+			&sun4v_nonresum_oflow_cnt);
+
+	panic("Non-resumable error.");
+}
+
+/* If we try to printk() we'll probably make matters worse, by trying
+ * to retake locks this cpu already holds or causing more errors. So
+ * just bump a counter, and we'll report these counter bumps above.
+ */
+void sun4v_nonresum_overflow(struct pt_regs *regs)
+{
+	/* XXX Actually even this can make not that much sense.  Perhaps
+	 * XXX we should just pull the plug and panic directly from here?
+	 */
+	atomic_inc(&sun4v_nonresum_oflow_cnt);
+}
+
 void do_fpe_common(struct pt_regs *regs)
 {
 	if (regs->tstate & TSTATE_PRIV) {
@@ -2190,8 +2370,12 @@ void __init trap_init(void)
 	     offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
 	    (TRAP_PER_CPU_RESUM_MONDO_PA !=
 	     offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
+	    (TRAP_PER_CPU_RESUM_KBUF_PA !=
+	     offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
 	    (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
 	     offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
+	    (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
+	     offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
 	    (TRAP_PER_CPU_FAULT_INFO !=
 	     offsetof(struct trap_per_cpu, fault_info)))
 		trap_per_cpu_offsets_are_bolixed_dave();
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 2679b6e253a..1608ba4bf1c 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -88,7 +88,10 @@ tl0_dcpe:	BTRAP(0x71)	/* D-cache Parity Error on Cheetah+ */
 tl0_icpe:	BTRAP(0x72)	/* I-cache Parity Error on Cheetah+ */
 tl0_resv073:	BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
 tl0_resv076:	BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
-tl0_resv07c:	BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
+tl0_cpu_mondo:	TRAP_NOSAVE(sun4v_cpu_mondo)
+tl0_dev_mondo:	TRAP_NOSAVE(sun4v_dev_mondo)
+tl0_res_mondo:	TRAP_NOSAVE(sun4v_res_mondo)
+tl0_nres_mondo:	TRAP_NOSAVE(sun4v_nonres_mondo)
 tl0_s0n:	SPILL_0_NORMAL
 tl0_s1n:	SPILL_1_NORMAL
 tl0_s2n:	SPILL_2_NORMAL
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 492314b5347..7f0a74ec47f 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -53,16 +53,17 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
  */
 struct thread_info;
 struct trap_per_cpu {
-/* D-cache line 1: Basic thread information */
+/* D-cache line 1: Basic thread information, cpu and device mondo queues */
 	struct thread_info	*thread;
 	unsigned long		pgd_paddr;
-	unsigned long		__pad1[2];
-
-/* D-cache line 2: Sun4V Mondo Queue pointers */
 	unsigned long		cpu_mondo_pa;
 	unsigned long		dev_mondo_pa;
+
+/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
 	unsigned long		resum_mondo_pa;
+	unsigned long		resum_kernel_buf_pa;
 	unsigned long		nonresum_mondo_pa;
+	unsigned long		nonresum_kernel_buf_pa;
 
 /* Dcache lines 3 and 4: Hypervisor Fault Status */
 	struct hv_fault_status	fault_info;
@@ -100,10 +101,12 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 
 #define TRAP_PER_CPU_THREAD		0x00
 #define TRAP_PER_CPU_PGD_PADDR		0x08
-#define TRAP_PER_CPU_CPU_MONDO_PA	0x20
-#define TRAP_PER_CPU_DEV_MONDO_PA	0x28
-#define TRAP_PER_CPU_RESUM_MONDO_PA	0x30
-#define TRAP_PER_CPU_NONRESUM_MONDO_PA	0x38
+#define TRAP_PER_CPU_CPU_MONDO_PA	0x10
+#define TRAP_PER_CPU_DEV_MONDO_PA	0x18
+#define TRAP_PER_CPU_RESUM_MONDO_PA	0x20
+#define TRAP_PER_CPU_RESUM_KBUF_PA	0x28
+#define TRAP_PER_CPU_NONRESUM_MONDO_PA	0x30
+#define TRAP_PER_CPU_NONRESUM_KBUF_PA	0x38
 #define TRAP_PER_CPU_FAULT_INFO		0x40
 
 #define TRAP_BLOCK_SZ_SHIFT		7
@@ -188,6 +191,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 
 #else
 
+#define __GET_CPUID(REG)				\
+	mov	0, REG;
+
 /* Uniprocessor versions, we know the cpuid is zero.  */
 #define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
 	sethi	%hi(trap_block), DEST;		\
-- 
cgit v1.2.3-70-g09d2


From 1d2f1f90a1e004b0c1b8a73ed4394a93f09104b3 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 8 Feb 2006 16:41:20 -0800
Subject: [SPARC64]: Sun4v cross-call sending support.

Technically the hypervisor call supports sending in a list
of all cpus to get the cross-call, but I only pass in one
cpu at a time for now.

The multi-cpu support is there, just ifdef'd out so it's easy to
enable or delete it later.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/irq.c     |  22 ++++++++
 arch/sparc64/kernel/smp.c     | 125 +++++++++++++++++++++++++++++++++++++++++-
 arch/sparc64/kernel/traps.c   |   6 +-
 include/asm-sparc64/cpudata.h |  14 ++++-
 4 files changed, 163 insertions(+), 4 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ff201c007e0..c80d2531ec4 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -900,6 +900,24 @@ static void __cpuinit init_one_kbuf(unsigned long *pa_ptr)
 	*pa_ptr = __pa(page);
 }
 
+static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb)
+{
+#ifdef CONFIG_SMP
+	unsigned long page;
+
+	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+
+	page = get_zeroed_page(GFP_ATOMIC);
+	if (!page) {
+		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+		prom_halt();
+	}
+
+	tb->cpu_mondo_block_pa = __pa(page);
+	tb->cpu_list_pa = __pa(page + 64);
+#endif
+}
+
 /* Allocate and init the mondo and error queues for this cpu.  */
 void __cpuinit sun4v_init_mondo_queues(void)
 {
@@ -908,10 +926,14 @@ void __cpuinit sun4v_init_mondo_queues(void)
 
 	init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
 	init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
+
 	init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
 	init_one_kbuf(&tb->resum_kernel_buf_pa);
+
 	init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
 	init_one_kbuf(&tb->nonresum_kernel_buf_pa);
+
+	init_cpu_send_mondo_info(tb);
 }
 
 /* Only invoked on boot processor. */
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 223cc6bd369..c10a3a8639e 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -531,10 +531,133 @@ retry:
 	}
 }
 
+#if 0
+/* Multi-cpu list version.  */
+static int init_cpu_list(u16 *list, cpumask_t mask)
+{
+	int i, cnt;
+
+	cnt = 0;
+	for_each_cpu_mask(i, mask)
+		list[cnt++] = i;
+
+	return cnt;
+}
+
+static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask)
+{
+	int i;
+
+	for (i = 0; i < orig_cnt; i++) {
+		if (list[i] == 0xffff)
+			cpu_clear(i, mask);
+	}
+
+	return init_cpu_list(list, mask);
+}
+
+static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+	int this_cpu = get_cpu();
+	struct trap_per_cpu *tb = &trap_block[this_cpu];
+	u64 *mondo = __va(tb->cpu_mondo_block_pa);
+	u16 *cpu_list = __va(tb->cpu_list_pa);
+	int cnt, retries;
+
+	mondo[0] = data0;
+	mondo[1] = data1;
+	mondo[2] = data2;
+	wmb();
+
+	retries = 0;
+	cnt = init_cpu_list(cpu_list, mask);
+	do {
+		register unsigned long func __asm__("%o0");
+		register unsigned long arg0 __asm__("%o1");
+		register unsigned long arg1 __asm__("%o2");
+		register unsigned long arg2 __asm__("%o3");
+
+		func = HV_FAST_CPU_MONDO_SEND;
+		arg0 = cnt;
+		arg1 = tb->cpu_list_pa;
+		arg2 = tb->cpu_mondo_block_pa;
+
+		__asm__ __volatile__("ta	%8"
+				     : "=&r" (func), "=&r" (arg0),
+				       "=&r" (arg1), "=&r" (arg2)
+				     : "0" (func), "1" (arg0),
+				       "2" (arg1), "3" (arg2),
+				       "i" (HV_FAST_TRAP)
+				     : "memory");
+		if (likely(func == HV_EOK))
+			break;
+
+		if (unlikely(++retries > 100)) {
+			printk("CPU[%d]: sun4v mondo error %lu\n",
+			       this_cpu, func);
+			break;
+		}
+
+		cnt = update_cpu_list(cpu_list, cnt, mask);
+
+		udelay(2 * cnt);
+	} while (1);
+
+	put_cpu();
+}
+#else
+/* Single-cpu list version.  */
 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
-	/* XXX implement me */
+	int this_cpu = get_cpu();
+	struct trap_per_cpu *tb = &trap_block[this_cpu];
+	u64 *mondo = __va(tb->cpu_mondo_block_pa);
+	u16 *cpu_list = __va(tb->cpu_list_pa);
+	int i;
+
+	mondo[0] = data0;
+	mondo[1] = data1;
+	mondo[2] = data2;
+	wmb();
+
+	for_each_cpu_mask(i, mask) {
+		int retries = 0;
+
+		do {
+			register unsigned long func __asm__("%o0");
+			register unsigned long arg0 __asm__("%o1");
+			register unsigned long arg1 __asm__("%o2");
+			register unsigned long arg2 __asm__("%o3");
+
+			cpu_list[0] = i;
+			func = HV_FAST_CPU_MONDO_SEND;
+			arg0 = 1;
+			arg1 = tb->cpu_list_pa;
+			arg2 = tb->cpu_mondo_block_pa;
+
+			__asm__ __volatile__("ta	%8"
+					     : "=&r" (func), "=&r" (arg0),
+					       "=&r" (arg1), "=&r" (arg2)
+					     : "0" (func), "1" (arg0),
+					       "2" (arg1), "3" (arg2),
+					       "i" (HV_FAST_TRAP)
+					     : "memory");
+			if (likely(func == HV_EOK))
+				break;
+
+			if (unlikely(++retries > 100)) {
+				printk("CPU[%d]: sun4v mondo error %lu\n",
+				       this_cpu, func);
+				break;
+			}
+
+			udelay(2 * i);
+		} while (1);
+	}
+
+	put_cpu();
 }
+#endif
 
 /* Send cross call to all processors mentioned in MASK
  * except self.
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 5417ff1b934..ac171161e79 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2377,7 +2377,11 @@ void __init trap_init(void)
 	    (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
 	     offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
 	    (TRAP_PER_CPU_FAULT_INFO !=
-	     offsetof(struct trap_per_cpu, fault_info)))
+	     offsetof(struct trap_per_cpu, fault_info)) ||
+	    (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
+	     offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
+	    (TRAP_PER_CPU_CPU_LIST_PA !=
+	     offsetof(struct trap_per_cpu, cpu_list_pa)))
 		trap_per_cpu_offsets_are_bolixed_dave();
 
 	/* Attach to the address space of init_task.  On SMP we
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 7f0a74ec47f..338b0ca5b51 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -65,8 +65,16 @@ struct trap_per_cpu {
 	unsigned long		nonresum_mondo_pa;
 	unsigned long		nonresum_kernel_buf_pa;
 
-/* Dcache lines 3 and 4: Hypervisor Fault Status */
+/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
 	struct hv_fault_status	fault_info;
+
+/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list.  */
+	unsigned long		cpu_mondo_block_pa;
+	unsigned long		cpu_list_pa;
+	unsigned long		__pad1[2];
+
+/* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size.  */
+	unsigned long		__pad2[4];
 } __attribute__((aligned(64)));
 extern struct trap_per_cpu trap_block[NR_CPUS];
 extern void init_cur_cpu_trap(void);
@@ -108,8 +116,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 #define TRAP_PER_CPU_NONRESUM_MONDO_PA	0x30
 #define TRAP_PER_CPU_NONRESUM_KBUF_PA	0x38
 #define TRAP_PER_CPU_FAULT_INFO		0x40
+#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA	0xc0
+#define TRAP_PER_CPU_CPU_LIST_PA	0xc8
 
-#define TRAP_BLOCK_SZ_SHIFT		7
+#define TRAP_BLOCK_SZ_SHIFT		8
 
 #include <asm/scratchpad.h>
 
-- 
cgit v1.2.3-70-g09d2


From d82ace7dc4073b090a55b9740700e32b9a9ae302 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 02:52:44 -0800
Subject: [SPARC64]: Detect sun4v early in boot process.

We look for "SUNW,sun4v" in the 'compatible' property
of the root OBP device tree node.

Protect every %ver register access, to make sure it is
not touched on sun4v, as %ver is hyperprivileged there.

Lock kernel TLB entries using hypervisor calls instead of
calls into OBP.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/cpu.c          |  6 +++
 arch/sparc64/kernel/head.S         | 96 ++++++++++++++++++++++++++++++++++----
 arch/sparc64/kernel/irq.c          | 78 +++++++++++++++++--------------
 arch/sparc64/kernel/setup.c        |  9 ++--
 arch/sparc64/kernel/trampoline.S   | 62 +++++++++++++++++++++---
 arch/sparc64/kernel/us2e_cpufreq.c |  3 ++
 arch/sparc64/kernel/us3_cpufreq.c  |  3 ++
 arch/sparc64/mm/init.c             | 58 ++++++++++++++++++-----
 arch/sparc64/prom/init.c           | 20 ++++----
 arch/sparc64/prom/tree.c           |  9 ++--
 include/asm-sparc64/head.h         |  6 +++
 include/asm-sparc64/oplib.h        |  3 ++
 12 files changed, 273 insertions(+), 80 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 00eed88ef2e..c7a4fb39702 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -71,6 +71,12 @@ void __init cpu_probe(void)
 	unsigned long ver, fpu_vers, manuf, impl, fprs;
 	int i;
 	
+	if (tlb_type == hypervisor) {
+		sparc_cpu_type = "UltraSparc T1 (Niagara)";
+		sparc_fpu_type = "UltraSparc T1 integrated FPU";
+		return;
+	}
+
 	fprs = fprs_read();
 	fprs_write(FPRS_FEF);
 	__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 01980014aea..d048f0dfd42 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -95,12 +95,17 @@ sparc64_boot:
 	wrpr	%g1, 0x0, %pstate
 	ba,a,pt	%xcc, 1f
 
-	.globl	prom_finddev_name, prom_chosen_path
-	.globl	prom_getprop_name, prom_mmu_name
-	.globl	prom_callmethod_name, prom_translate_name
+	.globl	prom_finddev_name, prom_chosen_path, prom_root_node
+	.globl	prom_getprop_name, prom_mmu_name, prom_peer_name
+	.globl	prom_callmethod_name, prom_translate_name, prom_root_compatible
 	.globl	prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
 	.globl	prom_boot_mapped_pc, prom_boot_mapping_mode
 	.globl	prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
+	.globl	is_sun4v
+prom_peer_name:
+	.asciz	"peer"
+prom_compatible_name:
+	.asciz	"compatible"
 prom_finddev_name:
 	.asciz	"finddevice"
 prom_chosen_path:
@@ -117,7 +122,13 @@ prom_map_name:
 	.asciz	"map"
 prom_unmap_name:
 	.asciz	"unmap"
+prom_sun4v_name:
+	.asciz	"SUNW,sun4v"
 	.align	4
+prom_root_compatible:
+	.skip	64
+prom_root_node:
+	.word	0
 prom_mmu_ihandle_cache:
 	.word	0
 prom_boot_mapped_pc:
@@ -129,8 +140,54 @@ prom_boot_mapping_phys_high:
 	.xword	0
 prom_boot_mapping_phys_low:
 	.xword	0
+is_sun4v:
+	.word	0
 1:
 	rd	%pc, %l0
+
+	mov	(1b - prom_peer_name), %l1
+	sub	%l0, %l1, %l1
+	mov	0, %l2
+
+	/* prom_root_node = prom_peer(0) */
+	stx	%l1, [%sp + 2047 + 128 + 0x00]	! service, "peer"
+	mov	1, %l3
+	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 1
+	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 1
+	stx	%l2, [%sp + 2047 + 128 + 0x18]	! arg1, 0
+	stx	%g0, [%sp + 2047 + 128 + 0x20]	! ret1
+	call	%l7
+	 add	%sp, (2047 + 128), %o0		! argument array
+
+	ldx	[%sp + 2047 + 128 + 0x20], %l4	! prom root node
+	mov	(1b - prom_root_node), %l1
+	sub	%l0, %l1, %l1
+	stw	%l4, [%l1]
+
+	mov	(1b - prom_getprop_name), %l1
+	mov	(1b - prom_compatible_name), %l2
+	mov	(1b - prom_root_compatible), %l5
+	sub	%l0, %l1, %l1
+	sub	%l0, %l2, %l2
+	sub	%l0, %l5, %l5
+
+	/* prom_getproperty(prom_root_node, "compatible",
+	 *                  &prom_root_compatible, 64)
+	 */
+	stx	%l1, [%sp + 2047 + 128 + 0x00]	! service, "getprop"
+	mov	4, %l3
+	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 4
+	mov	1, %l3
+	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 1
+	stx	%l4, [%sp + 2047 + 128 + 0x18]	! arg1, prom_root_node
+	stx	%l2, [%sp + 2047 + 128 + 0x20]	! arg2, "compatible"
+	stx	%l5, [%sp + 2047 + 128 + 0x28]	! arg3, &prom_root_compatible
+	mov	64, %l3
+	stx	%l3, [%sp + 2047 + 128 + 0x30]	! arg4, size
+	stx	%g0, [%sp + 2047 + 128 + 0x38]	! ret1
+	call	%l7
+	 add	%sp, (2047 + 128), %o0		! argument array
+
 	mov	(1b - prom_finddev_name), %l1
 	mov	(1b - prom_chosen_path), %l2
 	mov	(1b - prom_boot_mapped_pc), %l3
@@ -239,6 +296,27 @@ prom_boot_mapping_phys_low:
 	add	%sp, (192 + 128), %sp
 
 sparc64_boot_after_remap:
+	sethi	%hi(prom_root_compatible), %g1
+	or	%g1, %lo(prom_root_compatible), %g1
+	sethi	%hi(prom_sun4v_name), %g7
+	or	%g7, %lo(prom_sun4v_name), %g7
+	mov	10, %g3
+1:	ldub	[%g7], %g2
+	ldub	[%g1], %g4
+	cmp	%g2, %g4
+	bne,pn	%icc, 2f
+	 add	%g7, 1, %g7
+	subcc	%g3, 1, %g3
+	bne,pt	%xcc, 1b
+	 add	%g1, 1, %g1
+
+	sethi	%hi(is_sun4v), %g1
+	or	%g1, %lo(is_sun4v), %g1
+	mov	1, %g7
+	stw	%g7, [%g1]
+
+2:
+	BRANCH_IF_SUN4V(g1, jump_to_sun4u_init)
 	BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
 	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
 	ba,pt	%xcc, spitfire_boot
@@ -323,14 +401,12 @@ sun4u_init:
 
 	membar	#Sync
 
-	BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
+	BRANCH_IF_SUN4V(g1, niagara_tlb_fixup)
+	BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
 
 	ba,pt	%xcc, spitfire_tlb_fixup
 	 nop
 
-	/* XXX Nothing branches to here yet, when %ver register indicates
-	 * XXX Niagara we should do this.
-	 */
 niagara_tlb_fixup:
 	mov	3, %g2		/* Set TLB type to hypervisor. */
 	sethi	%hi(tlb_type), %g1
@@ -346,6 +422,9 @@ niagara_tlb_fixup:
 	call	hypervisor_patch_cachetlbops
 	 nop
 
+	ba,pt	%xcc, tlb_fixup_done
+	 nop
+
 cheetah_tlb_fixup:
 	mov	2, %g2		/* Set TLB type to cheetah+. */
 	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
@@ -464,7 +543,8 @@ setup_trap_table:
 	sllx	%o2, 32, %o2
 	wr	%o2, 0, %tick_cmpr
 
-	BRANCH_IF_ANY_CHEETAH(o2,o3,1f)
+	BRANCH_IF_SUN4V(o2, 1f)
+	BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
 
 	ba,pt	%xcc, 2f
 	 nop
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c80d2531ec4..1f6455503f2 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -150,47 +150,53 @@ void enable_irq(unsigned int irq)
 
 	preempt_disable();
 
-	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-		unsigned long ver;
-
-		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
-		if ((ver >> 32) == __JALAPENO_ID ||
-		    (ver >> 32) == __SERRANO_ID) {
-			/* We set it to our JBUS ID. */
+	if (tlb_type == hypervisor) {
+		/* XXX SUN4V: implement me... XXX */
+	} else {
+		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+			unsigned long ver;
+
+			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+			if ((ver >> 32) == __JALAPENO_ID ||
+			    (ver >> 32) == __SERRANO_ID) {
+				/* We set it to our JBUS ID. */
+				__asm__ __volatile__("ldxa [%%g0] %1, %0"
+						     : "=r" (tid)
+						     : "i" (ASI_JBUS_CONFIG));
+				tid = ((tid & (0x1fUL<<17)) << 9);
+				tid &= IMAP_TID_JBUS;
+			} else {
+				/* We set it to our Safari AID. */
+				__asm__ __volatile__("ldxa [%%g0] %1, %0"
+						     : "=r" (tid)
+						     : "i"(ASI_SAFARI_CONFIG));
+				tid = ((tid & (0x3ffUL<<17)) << 9);
+				tid &= IMAP_AID_SAFARI;
+			}
+		} else if (this_is_starfire == 0) {
+			/* We set it to our UPA MID. */
 			__asm__ __volatile__("ldxa [%%g0] %1, %0"
 					     : "=r" (tid)
-					     : "i" (ASI_JBUS_CONFIG));
-			tid = ((tid & (0x1fUL<<17)) << 9);
-			tid &= IMAP_TID_JBUS;
+					     : "i" (ASI_UPA_CONFIG));
+			tid = ((tid & UPA_CONFIG_MID) << 9);
+			tid &= IMAP_TID_UPA;
 		} else {
-			/* We set it to our Safari AID. */
-			__asm__ __volatile__("ldxa [%%g0] %1, %0"
-					     : "=r" (tid)
-					     : "i" (ASI_SAFARI_CONFIG));
-			tid = ((tid & (0x3ffUL<<17)) << 9);
-			tid &= IMAP_AID_SAFARI;
+			tid = (starfire_translate(imap,
+						  smp_processor_id()) << 26);
+			tid &= IMAP_TID_UPA;
 		}
-	} else if (this_is_starfire == 0) {
-		/* We set it to our UPA MID. */
-		__asm__ __volatile__("ldxa [%%g0] %1, %0"
-				     : "=r" (tid)
-				     : "i" (ASI_UPA_CONFIG));
-		tid = ((tid & UPA_CONFIG_MID) << 9);
-		tid &= IMAP_TID_UPA;
-	} else {
-		tid = (starfire_translate(imap, smp_processor_id()) << 26);
-		tid &= IMAP_TID_UPA;
-	}
 
-	/* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
-	 * of this SYSIO's preconfigured IGN in the SYSIO Control
-	 * Register, the hardware just mirrors that value here.
-	 * However for Graphics and UPA Slave devices the full
-	 * IMAP_INR field can be set by the programmer here.
-	 *
-	 * Things like FFB can now be handled via the new IRQ mechanism.
-	 */
-	upa_writel(tid | IMAP_VALID, imap);
+		/* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
+		 * of this SYSIO's preconfigured IGN in the SYSIO Control
+		 * Register, the hardware just mirrors that value here.
+		 * However for Graphics and UPA Slave devices the full
+		 * IMAP_INR field can be set by the programmer here.
+		 *
+		 * Things like FFB can now be handled via the new IRQ
+		 * mechanism.
+		 */
+		upa_writel(tid | IMAP_VALID, imap);
+	}
 
 	preempt_enable();
 }
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 2d64320d3a4..7f02c8f71df 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -504,9 +504,12 @@ static void __init per_cpu_patch(void)
 	if (tlb_type == spitfire && !this_is_starfire)
 		return;
 
-	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
-	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
-		   (ver >> 32) == __SERRANO_ID);
+	is_jbus = 0;
+	if (tlb_type != hypervisor) {
+		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+		is_jbus = ((ver >> 32) == __JALAPENO_ID ||
+			   (ver >> 32) == __SERRANO_ID);
+	}
 
 	p = &__cpuid_patch;
 	while (p < &__cpuid_patch_end) {
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index fbf844f84a4..ffa8b79632c 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -16,6 +16,7 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/mmu.h>
+#include <asm/hypervisor.h>
 
 	.data
 	.align	8
@@ -34,8 +35,9 @@ dtlb_load:
 sparc64_cpu_startup:
 	flushw
 
-	BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup)
-	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup)
+	BRANCH_IF_SUN4V(g1, niagara_startup)
+	BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
 
 	ba,pt	%xcc, spitfire_startup
 	 nop
@@ -70,7 +72,9 @@ cheetah_generic_startup:
 	stxa	%g0, [%g3] ASI_DMMU
 	stxa	%g0, [%g3] ASI_IMMU
 	membar	#Sync
+	/* fallthru */
 
+niagara_startup:
 	/* Disable STICK_INT interrupts. */
 	sethi		%hi(0x80000000), %g5
 	sllx		%g5, 32, %g5
@@ -91,6 +95,8 @@ startup_continue:
 	sllx		%g2, 32, %g2
 	wr		%g2, 0, %tick_cmpr
 
+	BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
+
 	/* Call OBP by hand to lock KERNBASE into i/d tlbs.
 	 * We lock 2 consequetive entries if we are 'bigkernel'.
 	 */
@@ -142,8 +148,7 @@ startup_continue:
 
 	sethi		%hi(bigkernel), %g2
 	lduw		[%g2 + %lo(bigkernel)], %g2
-	cmp		%g2, 0
-	be,pt		%icc, do_dtlb
+	brz,pt		%g2, do_dtlb
 	 nop
 
 	sethi		%hi(call_method), %g2
@@ -214,8 +219,7 @@ do_dtlb:
 
 	sethi		%hi(bigkernel), %g2
 	lduw		[%g2 + %lo(bigkernel)], %g2
-	cmp		%g2, 0
-	be,pt		%icc, do_unlock
+	brz,pt		%g2, do_unlock
 	 nop
 
 	sethi		%hi(call_method), %g2
@@ -257,6 +261,52 @@ do_unlock:
 	stb		%g0, [%g2 + %lo(prom_entry_lock)]
 	membar		#StoreStore | #StoreLoad
 
+	ba,pt		%xcc, after_lock_tlb
+	 nop
+
+niagara_lock_tlb:
+	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
+	sethi		%hi(KERNBASE), %o1
+	clr		%o2
+	sethi		%hi(kern_locked_tte_data), %o3
+	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
+	mov		HV_MMU_IMMU, %o4
+	ta		HV_FAST_TRAP
+
+	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
+	sethi		%hi(KERNBASE), %o1
+	clr		%o2
+	sethi		%hi(kern_locked_tte_data), %o3
+	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
+	mov		HV_MMU_DMMU, %o4
+	ta		HV_FAST_TRAP
+
+	sethi		%hi(bigkernel), %g2
+	lduw		[%g2 + %lo(bigkernel)], %g2
+	brz,pt		%g2, after_lock_tlb
+	 nop
+
+	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
+	sethi		%hi(KERNBASE + 0x400000), %o1
+	clr		%o2
+	sethi		%hi(kern_locked_tte_data), %o3
+	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
+	sethi		%hi(0x400000), %o4
+	add		%o3, %o4, %o3
+	mov		HV_MMU_IMMU, %o4
+	ta		HV_FAST_TRAP
+
+	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
+	sethi		%hi(KERNBASE + 0x400000), %o1
+	clr		%o2
+	sethi		%hi(kern_locked_tte_data), %o3
+	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
+	sethi		%hi(0x400000), %o4
+	add		%o3, %o4, %o3
+	mov		HV_MMU_DMMU, %o4
+	ta		HV_FAST_TRAP
+
+after_lock_tlb:
 	mov		%l1, %sp
 	flushw
 
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
index b35dc8dc995..669fb83dd4f 100644
--- a/arch/sparc64/kernel/us2e_cpufreq.c
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -346,6 +346,9 @@ static int __init us2e_freq_init(void)
 	unsigned long manuf, impl, ver;
 	int ret;
 
+	if (tlb_type != spitfire)
+		return -ENODEV;
+
 	__asm__("rdpr %%ver, %0" : "=r" (ver));
 	manuf = ((ver >> 48) & 0xffff);
 	impl  = ((ver >> 32) & 0xffff);
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 6d1f9a3c464..a912c45bdc0 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -203,6 +203,9 @@ static int __init us3_freq_init(void)
 	unsigned long manuf, impl, ver;
 	int ret;
 
+	if (tlb_type != cheetah && tlb_type != cheetah_plus)
+		return -ENODEV;
+
 	__asm__("rdpr %%ver, %0" : "=r" (ver));
 	manuf = ((ver >> 48) & 0xffff);
 	impl  = ((ver >> 32) & 0xffff);
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 6504d6eb537..e602b857071 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -514,6 +514,29 @@ static void __init read_obp_translations(void)
 	}
 }
 
+static void __init hypervisor_tlb_lock(unsigned long vaddr,
+				       unsigned long pte,
+				       unsigned long mmu)
+{
+	register unsigned long func asm("%o0");
+	register unsigned long arg0 asm("%o1");
+	register unsigned long arg1 asm("%o2");
+	register unsigned long arg2 asm("%o3");
+	register unsigned long arg3 asm("%o4");
+
+	func = HV_FAST_MMU_MAP_PERM_ADDR;
+	arg0 = vaddr;
+	arg1 = 0;
+	arg2 = pte;
+	arg3 = mmu;
+	__asm__ __volatile__("ta	0x80"
+			     : "=&r" (func), "=&r" (arg0),
+			       "=&r" (arg1), "=&r" (arg2),
+			       "=&r" (arg3)
+			     : "0" (func), "1" (arg0), "2" (arg1),
+			       "3" (arg2), "4" (arg3));
+}
+
 static void __init remap_kernel(void)
 {
 	unsigned long phys_page, tte_vaddr, tte_data;
@@ -527,19 +550,30 @@ static void __init remap_kernel(void)
 
 	kern_locked_tte_data = tte_data;
 
-	/* Now lock us into the TLBs via OBP. */
-	prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
-	prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
-	if (bigkernel) {
-		tlb_ent -= 1;
-		prom_dtlb_load(tlb_ent,
-			       tte_data + 0x400000, 
-			       tte_vaddr + 0x400000);
-		prom_itlb_load(tlb_ent,
-			       tte_data + 0x400000, 
-			       tte_vaddr + 0x400000);
+	/* Now lock us into the TLBs via Hypervisor or OBP. */
+	if (tlb_type == hypervisor) {
+		hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
+		hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
+		if (bigkernel) {
+			tte_vaddr += 0x400000;
+			tte_data += 0x400000;
+			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
+			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
+		}
+	} else {
+		prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
+		prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
+		if (bigkernel) {
+			tlb_ent -= 1;
+			prom_dtlb_load(tlb_ent,
+				       tte_data + 0x400000, 
+				       tte_vaddr + 0x400000);
+			prom_itlb_load(tlb_ent,
+				       tte_data + 0x400000, 
+				       tte_vaddr + 0x400000);
+		}
+		sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
 	}
-	sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
 	if (tlb_type == cheetah_plus) {
 		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
 					    CTX_CHEETAH_PLUS_NUC);
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
index f3cc2d8578b..095755e428a 100644
--- a/arch/sparc64/prom/init.c
+++ b/arch/sparc64/prom/init.c
@@ -18,7 +18,6 @@ enum prom_major_version prom_vers;
 unsigned int prom_rev, prom_prev;
 
 /* The root node of the prom device tree. */
-int prom_root_node;
 int prom_stdin, prom_stdout;
 int prom_chosen_node;
 
@@ -41,26 +40,22 @@ void __init prom_init(void *cif_handler, void *cif_stack)
 
 	prom_cif_init(cif_handler, cif_stack);
 
-	prom_root_node = prom_getsibling(0);
-	if((prom_root_node == 0) || (prom_root_node == -1))
-		prom_halt();
-
 	prom_chosen_node = prom_finddevice(prom_chosen_path);
 	if (!prom_chosen_node || prom_chosen_node == -1)
 		prom_halt();
 
-	prom_stdin = prom_getint (prom_chosen_node, "stdin");
-	prom_stdout = prom_getint (prom_chosen_node, "stdout");
+	prom_stdin = prom_getint(prom_chosen_node, "stdin");
+	prom_stdout = prom_getint(prom_chosen_node, "stdout");
 
 	node = prom_finddevice("/openprom");
 	if (!node || node == -1)
 		prom_halt();
 
-	prom_getstring (node, "version", buffer, sizeof (buffer));
+	prom_getstring(node, "version", buffer, sizeof (buffer));
 
-	prom_printf ("\n");
+	prom_printf("\n");
 
-	if (strncmp (buffer, "OBP ", 4))
+	if (strncmp(buffer, "OBP ", 4))
 		goto strange_version;
 
 	/*
@@ -70,7 +65,7 @@ void __init prom_init(void *cif_handler, void *cif_stack)
 	 * accordingly. -spot
 	 */
 
-	if (strncmp (buffer, "OBP  ", 5))
+	if (strncmp(buffer, "OBP  ", 5))
 		bufadjust = 4;
 	else
 		bufadjust = 5;
@@ -87,7 +82,8 @@ void __init prom_init(void *cif_handler, void *cif_stack)
 	prom_rev = ints[1];
 	prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2];
 
-	printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
+	printk("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
+	printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible);
 
 	/* Initialization successful. */
 	return;
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index b1ff9e87dcc..49075abd7cb 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -51,7 +51,7 @@ prom_getparent(int node)
 __inline__ int
 __prom_getsibling(int node)
 {
-	return p1275_cmd ("peer", P1275_INOUT(1, 1), node);
+	return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node);
 }
 
 __inline__ int
@@ -59,9 +59,12 @@ prom_getsibling(int node)
 {
 	int sibnode;
 
-	if(node == -1) return 0;
+	if (node == -1)
+		return 0;
 	sibnode = __prom_getsibling(node);
-	if(sibnode == -1) return 0;
+	if (sibnode == -1)
+		return 0;
+
 	return sibnode;
 }
 
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index ff76c0981b6..c4ac3e87aa5 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -24,6 +24,12 @@
 #define PANTHER_IMPL		0x0019 /* Ultra-IV+   */
 #define SERRANO_IMPL		0x0022 /* Ultra-IIIi+ */
 
+#define BRANCH_IF_SUN4V(tmp1,label)		\
+	sethi	%hi(is_sun4v), %tmp1;		\
+	lduw	[%tmp1 + %lo(is_sun4v)], %tmp1; \
+	brnz,pn	%tmp1, label;			\
+	 nop
+
 #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label)	\
 	rdpr	%ver, %tmp1;			\
 	sethi	%hi(__CHEETAH_ID), %tmp2;	\
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 3c59b2693fb..2ea545b931b 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -39,6 +39,9 @@ extern int prom_stdin, prom_stdout;
 extern int prom_chosen_node;
 
 /* Helper values and strings in arch/sparc64/kernel/head.S */
+extern const char prom_peer_name[];
+extern const char prom_compatible_name[];
+extern const char prom_root_compatible[];
 extern const char prom_finddev_name[];
 extern const char prom_chosen_path[];
 extern const char prom_getprop_name[];
-- 
cgit v1.2.3-70-g09d2


From aa9143b9719c07fb6f1f6207790c9c5086ae07e7 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 16:12:22 -0800
Subject: [SPARC64]: Implement sun4v TSB miss handlers.

When we register a TSB with the hypervisor, so that it or hardware can
handle TLB misses and do the TSB walk for us, the hypervisor traps
down to these trap when it incurs a TSB miss.

Processing is simple, we load the missing virtual address and context,
and do a full page table walk.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/sun4v_tlb_miss.S | 51 ++++++++++++++++++++++++++++++++++++
 arch/sparc64/kernel/tsb.S            |  7 +++--
 arch/sparc64/kernel/ttable.S         | 15 ++++++-----
 include/asm-sparc64/ttable.h         | 20 ++++++++++++++
 4 files changed, 84 insertions(+), 9 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index 58ea5dd8573..b8678b5557a 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -187,6 +187,57 @@ sun4v_dtlb_prot:
 	ba,pt		%xcc, sparc64_realfault_common
 	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
 
+	/* Called from trap table with &trap_block[smp_processor_id()] in
+	 * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1.
+	 */
+sun4v_itsb_miss:
+	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
+	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
+
+	srlx	%g4, 22, %g7
+	sllx	%g5, 48, %g6
+	or	%g6, %g7, %g6
+	brz,pn	%g5, kvmap_itlb_4v
+	 nop
+
+	ba,pt	%xcc, sun4v_tsb_miss_common
+	 mov	FAULT_CODE_ITLB, %g3
+
+	/* Called from trap table with &trap_block[smp_processor_id()] in
+	 * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1.
+	 */
+sun4v_dtsb_miss:
+	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+
+	srlx	%g4, 22, %g7
+	sllx	%g5, 48, %g6
+	or	%g6, %g7, %g6
+	brz,pn	%g5, kvmap_dtlb_4v
+	 nop
+
+	mov	FAULT_CODE_DTLB, %g3
+
+	/* Create TSB pointer into %g1.  This is something like:
+	 *
+	 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
+	 * tsb_base = tsb_reg & ~0x7UL;
+	 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
+	 * tsb_ptr = tsb_base + (tsb_index * 16);
+	 */
+sun4v_tsb_miss_common:
+	and	%g1, 0x7, %g2
+	andn	%g1, 0x7, %g1
+	mov	512, %g7
+	sllx	%g7, %g2, %g7
+	sub	%g7, 1, %g7
+	srlx	%g4, PAGE_SHIFT, %g2
+	and	%g2, %g7, %g2
+	sllx	%g2, 4, %g2
+	ba,pt	%xcc, tsb_miss_page_table_walk
+	 add	%g1, %g2, %g1
+
+
 #define BRANCH_ALWAYS	0x10680000
 #define NOP		0x01000000
 #define SUN4V_DO_PATCH(OLD, NEW)	\
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 819a6ef9799..c848c8847cd 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -35,8 +35,11 @@ tsb_miss_itlb:
 	 nop
 
 	/* The sun4v TLB miss handlers jump directly here instead
-	 * of tsb_miss_{d,i}tlb with the missing virtual address
-	 * already loaded into %g4.
+	 * of tsb_miss_{d,i}tlb with registers setup as follows:
+	 *
+	 * %g4:	missing virtual address
+	 * %g1:	TSB entry address loaded
+	 * %g6:	TAG TARGET ((vaddr >> 22) | (ctx << 48))
 	 */
 tsb_miss_page_table_walk:
 	TRAP_LOAD_PGD_PHYS(%g7, %g5)
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 1608ba4bf1c..a9d210e11eb 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -1,7 +1,6 @@
-/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $
- * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions.
+/* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions.
  *
- * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/config.h>
@@ -22,7 +21,8 @@ tl0_iax:	membar #Sync
 tl0_resv009:	BTRAP(0x9)
 tl0_iae:	membar #Sync
 		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
-tl0_resv00b:	BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
+tl0_itsb_4v:	SUN4V_ITSB_MISS
+tl0_resv00c:	BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
 tl0_ill:	membar #Sync
 		TRAP_7INSNS(do_illegal_instruction)
 tl0_privop:	TRAP(do_privop)
@@ -38,7 +38,7 @@ tl0_div0:	TRAP(do_div0)
 tl0_resv029:	BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
 tl0_resv02f:	BTRAP(0x2f)
 tl0_dax:	TRAP_NOSAVE(__spitfire_data_access_exception)
-tl0_resv031:	BTRAP(0x31)
+tl0_dtsb_4v:	SUN4V_DTSB_MISS
 tl0_dae:	membar #Sync
 		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
 tl0_resv033:	BTRAP(0x33)
@@ -185,7 +185,8 @@ tl1_iax:	TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
 tl1_resv009:	BTRAPTL1(0x9)
 tl1_iae:	membar #Sync
 		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
-tl1_resv00b:	BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
+tl1_itsb_4v:	SUN4V_ITSB_MISS
+tl1_resv00c:	BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
 tl1_ill:	TRAPTL1(do_ill_tl1)
 tl1_privop:	BTRAPTL1(0x11)
 tl1_resv012:	BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
@@ -201,7 +202,7 @@ tl1_div0:	TRAPTL1(do_div0_tl1)
 tl1_resv029:	BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
 tl1_resv02d:	BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
 tl1_dax:	TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
-tl1_resv031:	BTRAPTL1(0x31)
+tl1_dtsb_4v:	SUN4V_DTSB_MISS
 tl1_dae:	membar #Sync
 		TRAP_NOSAVE_7INSNS(__spitfire_access_error)
 tl1_resv033:	BTRAPTL1(0x33)
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index f912f52c0c7..972f913709a 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -180,6 +180,26 @@
 #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
 #endif
 
+#define SUN4V_ITSB_MISS				\
+	mov	SCRATCHPAD_CPUID, %g1;		\
+	ldxa	[%g1] ASI_SCRATCHPAD, %g2;	\
+	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1;\
+	sethi	%hi(trap_block), %g5;		\
+	sllx	%g2, TRAP_BLOCK_SZ_SHIFT, %g2;	\
+	or	%g5, %lo(trap_block), %g5;	\
+	ba,pt	%xcc, sun4v_itsb_miss;		\
+	 add	%g5, %g2, %g5;
+
+#define SUN4V_DTSB_MISS				\
+	mov	SCRATCHPAD_CPUID, %g1;		\
+	ldxa	[%g1] ASI_SCRATCHPAD, %g2;	\
+	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1;\
+	sethi	%hi(trap_block), %g5;		\
+	sllx	%g2, TRAP_BLOCK_SZ_SHIFT, %g2;	\
+	or	%g5, %lo(trap_block), %g5;	\
+	ba,pt	%xcc, sun4v_dtsb_miss;		\
+	 add	%g5, %g2, %g5;
+
 /* Before touching these macros, you owe it to yourself to go and
  * see how arch/sparc64/kernel/winfixup.S works... -DaveM
  *
-- 
cgit v1.2.3-70-g09d2


From 618e9ed98aed924a1fc664eb6522db4a5e927043 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 17:21:53 -0800
Subject: [SPARC64]: Hypervisor TSB context switching.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/tsb.S         | 42 ++++++++++++++++++++++------------
 arch/sparc64/mm/tsb.c             | 48 ++++++++++++++++++++++++++++++++++++++-
 include/asm-sparc64/mmu.h         | 16 +++++++------
 include/asm-sparc64/mmu_context.h | 10 +++++---
 4 files changed, 90 insertions(+), 26 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index c848c8847cd..a53ec6fb769 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -4,6 +4,7 @@
  */
 
 #include <asm/tsb.h>
+#include <asm/hypervisor.h>
 
 	.text
 	.align	32
@@ -233,6 +234,7 @@ tsb_flush:
 	 * %o1:	TSB register value
 	 * %o2:	TSB virtual address
 	 * %o3:	TSB mapping locked PTE
+	 * %o4:	Hypervisor TSB descriptor physical address
 	 *
 	 * We have to run this whole thing with interrupts
 	 * disabled so that the current cpu doesn't change
@@ -251,30 +253,40 @@ __tsb_context_switch:
 	add	%g2, %g1, %g2
 	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
 
-661:	mov	TSB_REG, %g1
-	stxa	%o1, [%g1] ASI_DMMU
-	.section .sun4v_2insn_patch, "ax"
-	.word	661b
+	sethi	%hi(tlb_type), %g1
+	lduw	[%g1 + %lo(tlb_type)], %g1
+	cmp	%g1, 3
+	bne,pt	%icc, 1f
+	 nop
+
+	/* Hypervisor TSB switch. */
 	mov	SCRATCHPAD_UTSBREG1, %g1
 	stxa	%o1, [%g1] ASI_SCRATCHPAD
-	.previous
+	mov	-1, %g2
+	mov	SCRATCHPAD_UTSBREG2, %g1
+	stxa	%g2, [%g1] ASI_SCRATCHPAD
 
-	membar	#Sync
+	mov	HV_FAST_MMU_TSB_CTXNON0, %o0
+	mov	1, %o1
+	mov	%o4, %o2
+	ta	HV_FAST_TRAP
+
+	ba,pt	%xcc, 9f
+	 nop
 
-661:	stxa	%o1, [%g1] ASI_IMMU
+	/* SUN4U TSB switch.  */
+1:	mov	TSB_REG, %g1
+	stxa	%o1, [%g1] ASI_DMMU
+	membar	#Sync
+	stxa	%o1, [%g1] ASI_IMMU
 	membar	#Sync
-	.section .sun4v_2insn_patch, "ax"
-	.word	661b
-	nop
-	nop
-	.previous
 
-	brz	%o2, 9f
+2:	brz	%o2, 9f
 	 nop
 
-	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %o4
+	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
 	mov	TLB_TAG_ACCESS, %g1
-	lduw	[%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
+	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
 	stxa	%o2, [%g1] ASI_DMMU
 	membar	#Sync
 	sllx	%g2, 3, %g2
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 2cc8e6528c6..6ae2a5a702c 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -149,7 +149,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 		BUG();
 	};
 
-	if (tlb_type == cheetah_plus) {
+	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
 		/* Physical mapping, no locked TLB entry for TSB.  */
 		tsb_reg |= tsb_paddr;
 
@@ -166,6 +166,52 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 		mm->context.tsb_map_pte = tte;
 	}
 
+	/* Setup the Hypervisor TSB descriptor.  */
+	if (tlb_type == hypervisor) {
+		struct hv_tsb_descr *hp = &mm->context.tsb_descr;
+
+		switch (PAGE_SIZE) {
+		case 8192:
+		default:
+			hp->pgsz_idx = HV_PGSZ_IDX_8K;
+			break;
+
+		case 64 * 1024:
+			hp->pgsz_idx = HV_PGSZ_IDX_64K;
+			break;
+
+		case 512 * 1024:
+			hp->pgsz_idx = HV_PGSZ_IDX_512K;
+			break;
+
+		case 4 * 1024 * 1024:
+			hp->pgsz_idx = HV_PGSZ_IDX_4MB;
+			break;
+		};
+		hp->assoc = 1;
+		hp->num_ttes = tsb_bytes / 16;
+		hp->ctx_idx = 0;
+		switch (PAGE_SIZE) {
+		case 8192:
+		default:
+			hp->pgsz_mask = HV_PGSZ_MASK_8K;
+			break;
+
+		case 64 * 1024:
+			hp->pgsz_mask = HV_PGSZ_MASK_64K;
+			break;
+
+		case 512 * 1024:
+			hp->pgsz_mask = HV_PGSZ_MASK_512K;
+			break;
+
+		case 4 * 1024 * 1024:
+			hp->pgsz_mask = HV_PGSZ_MASK_4MB;
+			break;
+		};
+		hp->tsb_base = tsb_paddr;
+		hp->resv = 0;
+	}
 }
 
 /* The page tables are locked against modifications while this
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 55e622711b9..473d990848e 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -4,6 +4,7 @@
 #include <linux/config.h>
 #include <asm/page.h>
 #include <asm/const.h>
+#include <asm/hypervisor.h>
 
 /*
  * For the 8k pagesize kernel, use only 10 hw context bits to optimize some
@@ -101,13 +102,14 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte
 extern void tsb_flush(unsigned long ent, unsigned long tag);
 
 typedef struct {
-	unsigned long	sparc64_ctx_val;
-	struct tsb	*tsb;
-	unsigned long	tsb_rss_limit;
-	unsigned long	tsb_nentries;
-	unsigned long	tsb_reg_val;
-	unsigned long	tsb_map_vaddr;
-	unsigned long	tsb_map_pte;
+	unsigned long		sparc64_ctx_val;
+	struct tsb		*tsb;
+	unsigned long		tsb_rss_limit;
+	unsigned long		tsb_nentries;
+	unsigned long		tsb_reg_val;
+	unsigned long		tsb_map_vaddr;
+	unsigned long		tsb_map_pte;
+	struct hv_tsb_descr	tsb_descr;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 2760353591a..eb660b1609c 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -22,14 +22,18 @@ extern void get_new_mmu_context(struct mm_struct *mm);
 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
 
-extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg,
-				 unsigned long tsb_vaddr, unsigned long tsb_pte);
+extern void __tsb_context_switch(unsigned long pgd_pa,
+				 unsigned long tsb_reg,
+				 unsigned long tsb_vaddr,
+				 unsigned long tsb_pte,
+				 unsigned long tsb_descr_pa);
 
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
 	__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
 			     mm->context.tsb_map_vaddr,
-			     mm->context.tsb_map_pte);
+			     mm->context.tsb_map_pte,
+			     __pa(&mm->context.tsb_descr));
 }
 
 extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
-- 
cgit v1.2.3-70-g09d2


From 5fe91cf6254c8f23d90efb5fc11fff57dd5ab8dd Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 20:45:26 -0800
Subject: [SPARC]: Clean up idprom header files.

Delete unused macros, and use fixed sized types in
sparc32 header.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc/idprom.h   | 26 +++++++++-----------------
 include/asm-sparc64/idprom.h | 12 +-----------
 2 files changed, 10 insertions(+), 28 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc/idprom.h b/include/asm-sparc/idprom.h
index d856e640acd..59083ed8523 100644
--- a/include/asm-sparc/idprom.h
+++ b/include/asm-sparc/idprom.h
@@ -7,27 +7,19 @@
 #ifndef _SPARC_IDPROM_H
 #define _SPARC_IDPROM_H
 
-/* Offset into the EEPROM where the id PROM is located on the 4c */
-#define IDPROM_OFFSET  0x7d8
+#include <linux/types.h>
 
-/* On sun4m; physical. */
-/* MicroSPARC(-II) does not decode 31rd bit, but it works. */
-#define IDPROM_OFFSET_M  0xfd8
-
-struct idprom
-{
-	unsigned char	id_format;	/* Format identifier (always 0x01) */
-	unsigned char	id_machtype;	/* Machine type */
-	unsigned char	id_ethaddr[6];	/* Hardware ethernet address */
-	long		id_date;	/* Date of manufacture */
-	unsigned int	id_sernum:24;	/* Unique serial number */
-	unsigned char	id_cksum;	/* Checksum - xor of the data bytes */
-	unsigned char	reserved[16];
+struct idprom {
+	u8		id_format;	/* Format identifier (always 0x01) */
+	u8		id_machtype;	/* Machine type */
+	u8		id_ethaddr[6];	/* Hardware ethernet address */
+	s32		id_date;	/* Date of manufacture */
+	u32		id_sernum:24;	/* Unique serial number */
+	u8		id_cksum;	/* Checksum - xor of the data bytes */
+	u8		reserved[16];
 };
 
 extern struct idprom *idprom;
 extern void idprom_init(void);
 
-#define IDPROM_SIZE  (sizeof(struct idprom))
-
 #endif /* !(_SPARC_IDPROM_H) */
diff --git a/include/asm-sparc64/idprom.h b/include/asm-sparc64/idprom.h
index 701483c5465..77fbf987385 100644
--- a/include/asm-sparc64/idprom.h
+++ b/include/asm-sparc64/idprom.h
@@ -9,15 +9,7 @@
 
 #include <linux/types.h>
 
-/* Offset into the EEPROM where the id PROM is located on the 4c */
-#define IDPROM_OFFSET  0x7d8
-
-/* On sun4m; physical. */
-/* MicroSPARC(-II) does not decode 31rd bit, but it works. */
-#define IDPROM_OFFSET_M  0xfd8
-
-struct idprom
-{
+struct idprom {
 	u8		id_format;	/* Format identifier (always 0x01) */
 	u8		id_machtype;	/* Machine type */
 	u8		id_ethaddr[6];	/* Hardware ethernet address */
@@ -30,6 +22,4 @@ struct idprom
 extern struct idprom *idprom;
 extern void idprom_init(void);
 
-#define IDPROM_SIZE  (sizeof(struct idprom))
-
 #endif /* !(_SPARC_IDPROM_H) */
-- 
cgit v1.2.3-70-g09d2


From 8f6a93a196ba6c569c3e8daa6e81cca7e3ba81b1 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 21:32:07 -0800
Subject: [SPARC64]: Beginnings of SUN4V PCI controller support.

Abstract out IOMMU operations so that we can have a different
set of calls on sun4v, which needs to do things through
hypervisor calls.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/Makefile    |  2 +-
 arch/sparc64/kernel/pci.c       | 13 ++++++++
 arch/sparc64/kernel/pci_iommu.c | 27 ++++++++++-----
 arch/sparc64/kernel/pci_sun4v.c | 74 +++++++++++++++++++++++++++++++++++++++++
 include/asm-sparc64/pci.h       | 56 +++++++++++++++++++++++++------
 5 files changed, 152 insertions(+), 20 deletions(-)
 create mode 100644 arch/sparc64/kernel/pci_sun4v.c

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index a482a9ffe5b..44043390e34 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -14,7 +14,7 @@ obj-y		:= process.o setup.o cpu.o idprom.o \
 		   power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
 
 obj-$(CONFIG_PCI)	 += ebus.o isa.o pci_common.o pci_iommu.o \
-			    pci_psycho.o pci_sabre.o pci_schizo.o
+			    pci_psycho.o pci_sabre.o pci_schizo.o pci_sun4v.o
 obj-$(CONFIG_SMP)	 += smp.o trampoline.o
 obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
 obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 2ff7c32ab0c..95ffa941862 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -188,6 +188,7 @@ extern void psycho_init(int, char *);
 extern void schizo_init(int, char *);
 extern void schizo_plus_init(int, char *);
 extern void tomatillo_init(int, char *);
+extern void sun4v_pci_init(int, char *);
 
 static struct {
 	char *model_name;
@@ -204,6 +205,7 @@ static struct {
 	{ "pci108e,8002", schizo_plus_init },
 	{ "SUNW,tomatillo", tomatillo_init },
 	{ "pci108e,a801", tomatillo_init },
+	{ "SUNW,sun4v-pci", sun4v_pci_init },
 };
 #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
 				  sizeof(pci_controller_table[0]))
@@ -283,6 +285,12 @@ int __init pcic_present(void)
 	return pci_controller_scan(pci_is_controller);
 }
 
+struct pci_iommu_ops *pci_iommu_ops;
+EXPORT_SYMBOL(pci_iommu_ops);
+
+extern struct pci_iommu_ops pci_sun4u_iommu_ops,
+	pci_sun4v_iommu_ops;
+
 /* Find each controller in the system, attach and initialize
  * software state structure for each and link into the
  * pci_controller_root.  Setup the controller enough such
@@ -290,6 +298,11 @@ int __init pcic_present(void)
  */
 static void __init pci_controller_probe(void)
 {
+	if (tlb_type == hypervisor)
+		pci_iommu_ops = &pci_sun4v_iommu_ops;
+	else
+		pci_iommu_ops = &pci_sun4u_iommu_ops;
+
 	printk("PCI: Probing for controllers.\n");
 
 	pci_controller_scan(pci_controller_init);
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index a11910be101..8e52232f6f3 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -219,7 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
  * DMA for PCI device PDEV.  Return non-NULL cpu-side address if
  * successful and set *DMA_ADDRP to the PCI side dma address.
  */
-void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -267,7 +267,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
 }
 
 /* Free and unmap a consistent DMA translation. */
-void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
+static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -294,7 +294,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
 /* Map a single buffer at PTR of SZ bytes for PCI DMA
  * in streaming mode.
  */
-dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
+static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -415,7 +415,7 @@ do_flush_sync:
 }
 
 /* Unmap a single streaming mode DMA translation. */
-void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -548,7 +548,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
  * When making changes here, inspect the assembly output. I was having
  * hard time to kepp this routine out of using stack slots for holding variables.
  */
-int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -635,7 +635,7 @@ bad_no_ctx:
 }
 
 /* Unmap a set of streaming mode DMA translations. */
-void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -695,7 +695,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
 /* Make physical memory consistent for a single
  * streaming mode DMA translation after a transfer.
  */
-void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -735,7 +735,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
 /* Make physical memory consistent for a set of streaming
  * mode DMA translations after a transfer.
  */
-void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
 	struct pcidev_cookie *pcp;
 	struct pci_iommu *iommu;
@@ -776,6 +776,17 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
 	spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
+struct pci_iommu_ops pci_sun4u_iommu_ops = {
+	.alloc_consistent		= pci_4u_alloc_consistent,
+	.free_consistent		= pci_4u_free_consistent,
+	.map_single			= pci_4u_map_single,
+	.unmap_single			= pci_4u_unmap_single,
+	.map_sg				= pci_4u_map_sg,
+	.unmap_sg			= pci_4u_unmap_sg,
+	.dma_sync_single_for_cpu	= pci_4u_dma_sync_single_for_cpu,
+	.dma_sync_sg_for_cpu		= pci_4u_dma_sync_sg_for_cpu,
+};
+
 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
 {
 	struct pci_dev *ali_isa_bridge;
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
new file mode 100644
index 00000000000..c1a077196c5
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -0,0 +1,74 @@
+/* pci_sun4v.c: SUN4V specific PCI controller support.
+ *
+ * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+#include <asm/upa.h>
+#include <asm/pstate.h>
+#include <asm/oplib.h>
+#include <asm/hypervisor.h>
+
+#include "pci_impl.h"
+#include "iommu_common.h"
+
+static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+{
+	return NULL;
+}
+
+static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
+{
+}
+
+static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
+{
+	return 0;
+}
+
+static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+{
+}
+
+static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+	return nelems;
+}
+
+static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+}
+
+static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+{
+}
+
+static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+}
+
+struct pci_iommu_ops pci_sun4v_iommu_ops = {
+	.alloc_consistent		= pci_4v_alloc_consistent,
+	.free_consistent		= pci_4v_free_consistent,
+	.map_single			= pci_4v_map_single,
+	.unmap_single			= pci_4v_unmap_single,
+	.map_sg				= pci_4v_map_sg,
+	.unmap_sg			= pci_4v_unmap_sg,
+	.dma_sync_single_for_cpu	= pci_4v_dma_sync_single_for_cpu,
+	.dma_sync_sg_for_cpu		= pci_4v_dma_sync_sg_for_cpu,
+};
+
+void sun4v_pci_init(int node, char *model_name)
+{
+	prom_printf("sun4v_pci_init: Implement me.\n");
+	prom_halt();
+}
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index 89bd71b1c0d..7c5a589ea43 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -41,10 +41,26 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
 
 struct pci_dev;
 
+struct pci_iommu_ops {
+	void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *);
+	void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
+	dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
+	void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
+	int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int);
+	void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int);
+	void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int);
+	void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
+};
+
+extern struct pci_iommu_ops *pci_iommu_ops;
+
 /* Allocate and map kernel buffer using consistent mode DMA for a device.
  * hwdev should be valid struct pci_dev pointer for PCI devices.
  */
-extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
+{
+	return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle);
+}
 
 /* Free and unmap a consistent DMA buffer.
  * cpu_addr is what was returned from pci_alloc_consistent,
@@ -54,7 +70,10 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t
  * References to the memory and mappings associated with cpu_addr/dma_addr
  * past this call are illegal.
  */
-extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+	return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle);
+}
 
 /* Map a single buffer of the indicated size for DMA in streaming mode.
  * The 32-bit bus address to use is returned.
@@ -62,7 +81,10 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
  * Once the device is given the dma address, the device owns this memory
  * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
  */
-extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
+static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+	return pci_iommu_ops->map_single(hwdev, ptr, size, direction);
+}
 
 /* Unmap a single streaming mode DMA translation.  The dma_addr and size
  * must match what was provided for in a previous pci_map_single call.  All
@@ -71,7 +93,10 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
  * After this call, reads by the cpu to the buffer are guaranteed to see
  * whatever the device wrote there.
  */
-extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
+static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
+{
+	pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction);
+}
 
 /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */
 #define pci_map_page(dev, page, off, size, dir) \
@@ -107,15 +132,19 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
  * Device ownership issues as mentioned above for pci_map_single are
  * the same here.
  */
-extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-		      int nents, int direction);
+static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+	return pci_iommu_ops->map_sg(hwdev, sg, nents, direction);
+}
 
 /* Unmap a set of streaming mode DMA translations.
  * Again, cpu read rules concerning calls here are the same as for
  * pci_unmap_single() above.
  */
-extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-			 int nhwents, int direction);
+static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction)
+{
+	pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction);
+}
 
 /* Make physical memory consistent for a single
  * streaming mode DMA translation after a transfer.
@@ -127,8 +156,10 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
  * must first perform a pci_dma_sync_for_device, and then the
  * device again owns the buffer.
  */
-extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
-					size_t size, int direction);
+static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
+{
+	pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction);
+}
 
 static inline void
 pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
@@ -144,7 +175,10 @@ pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
  * The same as pci_dma_sync_single_* but for a scatter-gather list,
  * same rules and usage.
  */
-extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
+static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+{
+	pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction);
+}
 
 static inline void
 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
-- 
cgit v1.2.3-70-g09d2


From bade5622167181844cd4e60087971c1f949e149f Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 22:05:54 -0800
Subject: [SPARC64]: More SUN4V PCI controller work.

Add assembler file for PCI hypervisor calls.
Setup basic skeleton of SUN4V PCI controller driver.

Add 32-bit devhandle to PBM struct, as this is needed for
hypervisor calls.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/Makefile        |   3 +-
 arch/sparc64/kernel/pci_sun4v.c     | 286 ++++++++++++++++++++++++++++++++++++
 arch/sparc64/kernel/pci_sun4v.h     |  20 +++
 arch/sparc64/kernel/pci_sun4v_asm.S |  56 +++++++
 include/asm-sparc64/pbm.h           |   3 +
 5 files changed, 367 insertions(+), 1 deletion(-)
 create mode 100644 arch/sparc64/kernel/pci_sun4v.h
 create mode 100644 arch/sparc64/kernel/pci_sun4v_asm.S

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 44043390e34..fedfd9c6729 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -14,7 +14,8 @@ obj-y		:= process.o setup.o cpu.o idprom.o \
 		   power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
 
 obj-$(CONFIG_PCI)	 += ebus.o isa.o pci_common.o pci_iommu.o \
-			    pci_psycho.o pci_sabre.o pci_schizo.o pci_sun4v.o
+			    pci_psycho.o pci_sabre.o pci_schizo.o \
+			    pci_sun4v.o pci_sun4v_asm.o
 obj-$(CONFIG_SMP)	 += smp.o trampoline.o
 obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
 obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index c1a077196c5..1d61353e264 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -21,6 +21,8 @@
 #include "pci_impl.h"
 #include "iommu_common.h"
 
+#include "pci_sun4v.h"
+
 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
 {
 	return NULL;
@@ -67,8 +69,292 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
 	.dma_sync_sg_for_cpu		= pci_4v_dma_sync_sg_for_cpu,
 };
 
+/* SUN4V PCI configuration space accessors. */
+
+static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+				  int where, int size, u32 *value)
+{
+	/* XXX Implement me! XXX */
+	return 0;
+}
+
+static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+				   int where, int size, u32 value)
+{
+	/* XXX Implement me! XXX */
+	return 0;
+}
+
+static struct pci_ops pci_sun4v_ops = {
+	.read =		pci_sun4v_read_pci_cfg,
+	.write =	pci_sun4v_write_pci_cfg,
+};
+
+
+static void pci_sun4v_scan_bus(struct pci_controller_info *p)
+{
+	/* XXX Implement me! XXX */
+}
+
+static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
+					struct pci_dev *pdev,
+					unsigned int ino)
+{
+	/* XXX Implement me! XXX */
+	return 0;
+}
+
+/* XXX correct? XXX */
+static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm = pcp->pbm;
+	struct resource *res, *root;
+	u32 reg;
+	int where, size, is_64bit;
+
+	res = &pdev->resource[resource];
+	if (resource < 6) {
+		where = PCI_BASE_ADDRESS_0 + (resource * 4);
+	} else if (resource == PCI_ROM_RESOURCE) {
+		where = pdev->rom_base_reg;
+	} else {
+		/* Somebody might have asked allocation of a non-standard resource */
+		return;
+	}
+
+	is_64bit = 0;
+	if (res->flags & IORESOURCE_IO)
+		root = &pbm->io_space;
+	else {
+		root = &pbm->mem_space;
+		if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+		    == PCI_BASE_ADDRESS_MEM_TYPE_64)
+			is_64bit = 1;
+	}
+
+	size = res->end - res->start;
+	pci_read_config_dword(pdev, where, &reg);
+	reg = ((reg & size) |
+	       (((u32)(res->start - root->start)) & ~size));
+	if (resource == PCI_ROM_RESOURCE) {
+		reg |= PCI_ROM_ADDRESS_ENABLE;
+		res->flags |= IORESOURCE_ROM_ENABLE;
+	}
+	pci_write_config_dword(pdev, where, reg);
+
+	/* This knows that the upper 32-bits of the address
+	 * must be zero.  Our PCI common layer enforces this.
+	 */
+	if (is_64bit)
+		pci_write_config_dword(pdev, where + 4, 0);
+}
+
+/* XXX correct? XXX */
+static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
+				      struct resource *res,
+				      struct resource *root)
+{
+	res->start += root->start;
+	res->end += root->start;
+}
+
+/* Use ranges property to determine where PCI MEM, I/O, and Config
+ * space are for this PCI bus module.
+ */
+static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
+{
+	int i, saw_cfg, saw_mem, saw_io;
+
+	saw_cfg = saw_mem = saw_io = 0;
+	for (i = 0; i < pbm->num_pbm_ranges; i++) {
+		struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
+		unsigned long a;
+		int type;
+
+		type = (pr->child_phys_hi >> 24) & 0x3;
+		a = (((unsigned long)pr->parent_phys_hi << 32UL) |
+		     ((unsigned long)pr->parent_phys_lo  <<  0UL));
+
+		switch (type) {
+		case 0:
+			/* PCI config space, 16MB */
+			pbm->config_space = a;
+			saw_cfg = 1;
+			break;
+
+		case 1:
+			/* 16-bit IO space, 16MB */
+			pbm->io_space.start = a;
+			pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
+			pbm->io_space.flags = IORESOURCE_IO;
+			saw_io = 1;
+			break;
+
+		case 2:
+			/* 32-bit MEM space, 2GB */
+			pbm->mem_space.start = a;
+			pbm->mem_space.end = a + (0x80000000UL - 1UL);
+			pbm->mem_space.flags = IORESOURCE_MEM;
+			saw_mem = 1;
+			break;
+
+		default:
+			break;
+		};
+	}
+
+	if (!saw_cfg || !saw_io || !saw_mem) {
+		prom_printf("%s: Fatal error, missing %s PBM range.\n",
+			    pbm->name,
+			    ((!saw_cfg ?
+			      "CFG" :
+			      (!saw_io ?
+			       "IO" : "MEM"))));
+		prom_halt();
+	}
+
+	printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
+	       pbm->name,
+	       pbm->config_space,
+	       pbm->io_space.start,
+	       pbm->mem_space.start);
+}
+
+static void pbm_register_toplevel_resources(struct pci_controller_info *p,
+					    struct pci_pbm_info *pbm)
+{
+	pbm->io_space.name = pbm->mem_space.name = pbm->name;
+
+	request_resource(&ioport_resource, &pbm->io_space);
+	request_resource(&iomem_resource, &pbm->mem_space);
+	pci_register_legacy_regions(&pbm->io_space,
+				    &pbm->mem_space);
+}
+
+static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
+{
+	/* XXX Implement me! XXX */
+}
+
+static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node)
+{
+	struct pci_pbm_info *pbm;
+	struct linux_prom64_registers regs;
+	unsigned int busrange[2];
+	int err;
+
+	/* XXX */
+	pbm = &p->pbm_A;
+
+	pbm->parent = p;
+	pbm->prom_node = prom_node;
+	pbm->pci_first_slot = 1;
+
+	prom_getproperty(prom_node, "reg", (char *)&regs, sizeof(regs));
+	pbm->devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
+
+	sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
+		p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
+
+	printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle);
+
+	prom_getstring(prom_node, "name",
+		       pbm->prom_name, sizeof(pbm->prom_name));
+
+	err = prom_getproperty(prom_node, "ranges",
+			       (char *) pbm->pbm_ranges,
+			       sizeof(pbm->pbm_ranges));
+	if (err == 0 || err == -1) {
+		prom_printf("%s: Fatal error, no ranges property.\n",
+			    pbm->name);
+		prom_halt();
+	}
+
+	pbm->num_pbm_ranges =
+		(err / sizeof(struct linux_prom_pci_ranges));
+
+	pci_sun4v_determine_mem_io_space(pbm);
+	pbm_register_toplevel_resources(p, pbm);
+
+	err = prom_getproperty(prom_node, "interrupt-map",
+			       (char *)pbm->pbm_intmap,
+			       sizeof(pbm->pbm_intmap));
+	if (err != -1) {
+		pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+		err = prom_getproperty(prom_node, "interrupt-map-mask",
+				       (char *)&pbm->pbm_intmask,
+				       sizeof(pbm->pbm_intmask));
+		if (err == -1) {
+			prom_printf("%s: Fatal error, no "
+				    "interrupt-map-mask.\n", pbm->name);
+			prom_halt();
+		}
+	} else {
+		pbm->num_pbm_intmap = 0;
+		memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+	}
+
+	err = prom_getproperty(prom_node, "bus-range",
+			       (char *)&busrange[0],
+			       sizeof(busrange));
+	if (err == 0 || err == -1) {
+		prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
+		prom_halt();
+	}
+	pbm->pci_first_busno = busrange[0];
+	pbm->pci_last_busno = busrange[1];
+
+	pci_sun4v_iommu_init(pbm);
+}
+
 void sun4v_pci_init(int node, char *model_name)
 {
+	struct pci_controller_info *p;
+	struct pci_iommu *iommu;
+
+	p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
+	if (!p) {
+		prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(p, 0, sizeof(*p));
+
+	iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+	if (!iommu) {
+		prom_printf("SCHIZO: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(iommu, 0, sizeof(*iommu));
+	p->pbm_A.iommu = iommu;
+
+	iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+	if (!iommu) {
+		prom_printf("SCHIZO: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(iommu, 0, sizeof(*iommu));
+	p->pbm_B.iommu = iommu;
+
+	p->next = pci_controller_root;
+	pci_controller_root = p;
+
+	p->index = pci_num_controllers++;
+	p->pbms_same_domain = 0;
+
+	p->scan_bus = pci_sun4v_scan_bus;
+	p->irq_build = pci_sun4v_irq_build;
+	p->base_address_update = pci_sun4v_base_address_update;
+	p->resource_adjust = pci_sun4v_resource_adjust;
+	p->pci_ops = &pci_sun4v_ops;
+
+	/* Like PSYCHO and SCHIZO we have a 2GB aligned area
+	 * for memory space.
+	 */
+	pci_memspace_mask = 0x7fffffffUL;
+
+	pci_sun4v_pbm_init(p, node);
+
 	prom_printf("sun4v_pci_init: Implement me.\n");
 	prom_halt();
 }
diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h
new file mode 100644
index 00000000000..d3ac7ece4b3
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sun4v.h
@@ -0,0 +1,20 @@
+/* pci_sun4v.h: SUN4V specific PCI controller support.
+ *
+ * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ */
+
+#ifndef _PCI_SUN4V_H
+#define _PCI_SUN4V_H
+
+extern unsigned long pci_sun4v_devino_to_sysino(unsigned long devhandle,
+						unsigned long deino);
+extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle,
+					 unsigned long tsbid,
+					 unsigned long num_ttes,
+					 unsigned long io_attributes,
+					 unsigned long io_page_list_pa);
+extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
+					   unsigned long tsbid,
+					   unsigned long num_ttes);
+
+#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S
new file mode 100644
index 00000000000..fd2fe0edf16
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sun4v_asm.S
@@ -0,0 +1,56 @@
+/* pci_sun4v_asm: Hypervisor calls for PCI support.
+ *
+ * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
+ */
+
+#include <asm/hypervisor.h>
+
+	/* %o0: devhandle
+	 * %o1:	devino
+	 *
+	 * returns %o0: sysino
+	 */
+	.globl	pci_sun4v_devino_to_sysino
+pci_sun4v_devino_to_sysino:
+	mov	%o1, %o2
+	mov	%o0, %o1
+	mov	HV_FAST_INTR_DEVINO2SYSINO, %o0
+	ta	HV_FAST_TRAP
+	retl
+	 mov	%o1, %o0
+
+	/* %o0: devhandle
+	 * %o1:	tsbid
+	 * %o2:	num ttes
+	 * %o3:	io_attributes
+	 * %o4:	io_page_list phys address
+	 *
+	 * returns %o0:	num ttes mapped
+	 */
+	.globl	pci_sun4v_iommu_map
+pci_sun4v_iommu_map:
+	mov	%o4, %o5
+	mov	%o3, %o4
+	mov	%o2, %o3
+	mov	%o1, %o2
+	mov	%o0, %o1
+	mov	HV_FAST_PCI_IOMMU_MAP, %o0
+	ta	HV_FAST_TRAP
+	retl
+	 mov	%o1, %o0
+
+	/* %o0: devhandle
+	 * %o1:	tsbid
+	 * %o2:	num ttes
+	 *
+	 * returns %o0:	num ttes demapped
+	 */
+	.globl	pci_sun4v_iommu_demap
+pci_sun4v_iommu_demap:
+	mov	%o2, %o3
+	mov	%o1, %o2
+	mov	%o0, %o1
+	mov	HV_FAST_PCI_IOMMU_DEMAP, %o0
+	ta	HV_FAST_TRAP
+	retl
+	 mov	%o1, %o0
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index dd35a2c7798..1396f110939 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -139,6 +139,9 @@ struct pci_pbm_info {
 	/* Opaque 32-bit system bus Port ID. */
 	u32				portid;
 
+	/* Opaque 32-bit handle used for hypervisor calls.  */
+	u32				devhandle;
+
 	/* Chipset version information. */
 	int				chip_type;
 #define PBM_CHIP_TYPE_SABRE		1
-- 
cgit v1.2.3-70-g09d2


From dedacf623283cb24933ec9f7d5bf539f19173cd4 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 22:26:34 -0800
Subject: [SPARC64]: Add HV_PCI_TSBID() macro.

For constructing hypervisor PCI TSB IDs.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/hypervisor.h | 6 ++++++
 1 file changed, 6 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index b4e0d52acd5..5d795ee5192 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -1300,6 +1300,9 @@ struct hv_trap_trace_entry {
  *			a tsbnum and a tsbindex.  Bits 63:32 contain the
  *			tsbnum and bits 31:00 contain the tsbindex.
  *
+ *			Use the HV_PCI_TSBID() macro to construct such
+ * 			values.
+ *
  *	io_attributes	IO attributes for IOMMU mappings.  One of more
  *			of the attritbute bits are stores in a 64-bit
  *			value.  The values are defined below.
@@ -1354,6 +1357,9 @@ struct hv_trap_trace_entry {
 	 (((d) & 0x1f) << 11) | \
 	 (((f) & 0x07) <<  8))
 
+#define HV_PCI_TSBID(__tsb_num, __tsb_index) \
+	((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index)))
+
 #define HV_PCI_SYNC_FOR_DEVICE		0x01
 #define HV_PCI_SYNC_FOR_CPU		0x02
 
-- 
cgit v1.2.3-70-g09d2


From 12eaa328f9fb2d3fcb5afb682c762690d05a3cd8 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 10 Feb 2006 15:39:51 -0800
Subject: [SPARC64]: Use ASI_SCRATCHPAD address 0x0 properly.

This is where the virtual address of the fault status
area belongs.

To set it up we don't make a hypervisor call, instead
we call OBP's SUNW,set-trap-table with the real address
of the fault status area as the second argument.  And
right before that call we write the virtual address into
ASI_SCRATCHPAD vaddr 0x0.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/head.S           |  29 ++++++-
 arch/sparc64/kernel/sun4v_ivec.S     |  28 ++----
 arch/sparc64/kernel/sun4v_tlb_miss.S | 162 ++++++++++-------------------------
 arch/sparc64/kernel/trampoline.S     |  29 ++++++-
 arch/sparc64/mm/init.c               |  22 +----
 arch/sparc64/prom/misc.c             |   5 ++
 include/asm-sparc64/cpudata.h        |  26 +++---
 include/asm-sparc64/oplib.h          |   1 +
 include/asm-sparc64/ttable.h         |  34 ++++----
 9 files changed, 144 insertions(+), 192 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index d048f0dfd42..f581f0e917f 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -521,11 +521,36 @@ setup_trap_table:
 	wrpr	%g0, 15, %pil
 
 	/* Make the firmware call to jump over to the Linux trap table.  */
-	call	prom_set_trap_table
+	sethi	%hi(is_sun4v), %o0
+	lduw	[%o0 + %lo(is_sun4v)], %o0
+	brz,pt	%o0, 1f
+	 nop
+
+	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
+	add	%g2, TRAP_PER_CPU_FAULT_INFO, %g2
+	stxa	%g2, [%g0] ASI_SCRATCHPAD
+
+	/* Compute physical address:
+	 *
+	 * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
+	 */
+	sethi	%hi(KERNBASE), %g3
+	sub	%g2, %g3, %g2
+	sethi	%hi(kern_base), %g3
+	ldx	[%g3 + %lo(kern_base)], %g3
+	add	%g2, %g3, %o1
+
+	call	prom_set_trap_table_sun4v
+	 sethi	%hi(sparc64_ttable_tl0), %o0
+
+	ba,pt	%xcc, 2f
+	 nop
+
+1:	call	prom_set_trap_table
 	 sethi	%hi(sparc64_ttable_tl0), %o0
 
 	/* Start using proper page size encodings in ctx register.  */
-	sethi	%hi(sparc64_kern_pri_context), %g3
+2:	sethi	%hi(sparc64_kern_pri_context), %g3
 	ldx	[%g3 + %lo(sparc64_kern_pri_context)], %g2
 
 	mov		PRIMARY_CONTEXT, %g1
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index d9d442017d3..c0367ef7e09 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -22,11 +22,8 @@ sun4v_cpu_mondo:
 	 nop
 
 	/* Get &trap_block[smp_processor_id()] into %g3.  */
-	__GET_CPUID(%g1)
-	sethi	%hi(trap_block), %g3
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
-	or	%g3, %lo(trap_block), %g3
-	add	%g3, %g7, %g3
+	ldxa	[%g0] ASI_SCRATCHPAD, %g3
+	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
 
 	/* Get CPU mondo queue base phys address into %g7.  */
 	ldx	[%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
@@ -74,11 +71,8 @@ sun4v_dev_mondo:
 	 nop
 
 	/* Get &trap_block[smp_processor_id()] into %g3.  */
-	__GET_CPUID(%g1)
-	sethi	%hi(trap_block), %g3
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
-	or	%g3, %lo(trap_block), %g3
-	add	%g3, %g7, %g3
+	ldxa	[%g0] ASI_SCRATCHPAD, %g3
+	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
 
 	/* Get DEV mondo queue base phys address into %g5.  */
 	ldx	[%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
@@ -143,11 +137,8 @@ sun4v_res_mondo:
 	 nop
 
 	/* Get &trap_block[smp_processor_id()] into %g3.  */
-	__GET_CPUID(%g1)
-	sethi	%hi(trap_block), %g3
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
-	or	%g3, %lo(trap_block), %g3
-	add	%g3, %g7, %g3
+	ldxa	[%g0] ASI_SCRATCHPAD, %g3
+	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
 
 	/* Get RES mondo queue base phys address into %g5.  */
 	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
@@ -251,11 +242,8 @@ sun4v_nonres_mondo:
 	 nop
 
 	/* Get &trap_block[smp_processor_id()] into %g3.  */
-	__GET_CPUID(%g1)
-	sethi	%hi(trap_block), %g3
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g7
-	or	%g3, %lo(trap_block), %g3
-	add	%g3, %g7, %g3
+	ldxa	[%g0] ASI_SCRATCHPAD, %g3
+	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
 
 	/* Get RES mondo queue base phys address into %g5.  */
 	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index c408b05a5f0..f6222623de3 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -7,26 +7,20 @@
 	.align	32
 
 sun4v_itlb_miss:
-	/* Load CPU ID into %g3.  */
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
+	/* Load MMU Miss base into %g2.  */
+	ldxa	[%g0] ASI_SCRATCHPAD, %g3
 	
 	/* Load UTSB reg into %g1.  */
-	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1
-
-	/* Load &trap_block[smp_processor_id()] into %g2.  */
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
+	mov	SCRATCHPAD_UTSBREG1, %g1
+	ldxa	[%g1] ASI_SCRATCHPAD, %g1
 
 	/* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
 	 * Branch if kernel TLB miss.  The kernel TSB and user TSB miss
 	 * code wants the missing virtual address in %g4, so that value
 	 * cannot be modified through the entirety of this handler.
 	 */
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
+	ldx	[%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_I_CTX_OFFSET], %g5
 	srlx	%g4, 22, %g3
 	sllx	%g5, 48, %g6
 	or	%g6, %g3, %g6
@@ -90,26 +84,20 @@ sun4v_itlb_load:
 	retry
 
 sun4v_dtlb_miss:
-	/* Load CPU ID into %g3.  */
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
+	/* Load MMU Miss base into %g2.  */
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
 	
 	/* Load UTSB reg into %g1.  */
+	mov	SCRATCHPAD_UTSBREG1, %g1
 	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1
 
-	/* Load &trap_block[smp_processor_id()] into %g2.  */
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-
 	/* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
 	 * Branch if kernel TLB miss.  The kernel TSB and user TSB miss
 	 * code wants the missing virtual address in %g4, so that value
 	 * cannot be modified through the entirety of this handler.
 	 */
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5
 	srlx	%g4, 22, %g3
 	sllx	%g5, 48, %g6
 	or	%g6, %g3, %g6
@@ -169,17 +157,10 @@ sun4v_dtlb_load:
 	retry
 
 sun4v_dtlb_prot:
-	/* Load CPU ID into %g3.  */
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
+	/* Load MMU Miss base into %g2.  */
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
 	
-	/* Load &trap_block[smp_processor_id()] into %g2.  */
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g5
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
 	rdpr	%tl, %g1
 	cmp	%g1, 1
 	bgu,pn		%xcc, winfix_trampoline
@@ -187,35 +168,17 @@ sun4v_dtlb_prot:
 	ba,pt		%xcc, sparc64_realfault_common
 	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
 
-	/* Called from trap table with &trap_block[smp_processor_id()] in
-	 * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1.
+	/* Called from trap table with TAG TARGET placed into
+	 * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1.
 	 */
 sun4v_itsb_miss:
-	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
-	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
-
-	srlx	%g4, 22, %g7
-	sllx	%g5, 48, %g6
-	or	%g6, %g7, %g6
-	brz,pn	%g5, kvmap_itlb_4v
-	 nop
-
 	ba,pt	%xcc, sun4v_tsb_miss_common
 	 mov	FAULT_CODE_ITLB, %g3
 
-	/* Called from trap table with &trap_block[smp_processor_id()] in
-	 * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1.
+	/* Called from trap table with TAG TARGET placed into
+	 * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1.
 	 */
 sun4v_dtsb_miss:
-	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
-	ldx	[%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
-
-	srlx	%g4, 22, %g7
-	sllx	%g5, 48, %g6
-	or	%g6, %g7, %g6
-	brz,pn	%g5, kvmap_dtlb_4v
-	 nop
-
 	mov	FAULT_CODE_DTLB, %g3
 
 	/* Create TSB pointer into %g1.  This is something like:
@@ -239,15 +202,10 @@ sun4v_tsb_miss_common:
 
 	/* Instruction Access Exception, tl0. */
 sun4v_iacc:
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldx	[%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
+	ldx	[%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_I_CTX_OFFSET], %g5
 	sllx	%g3, 16, %g3
 	or	%g5, %g3, %g5
 	ba,pt	%xcc, etrap
@@ -260,15 +218,10 @@ sun4v_iacc:
 
 	/* Instruction Access Exception, tl1. */
 sun4v_iacc_tl1:
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldx	[%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
+	ldx	[%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_I_CTX_OFFSET], %g5
 	sllx	%g3, 16, %g3
 	or	%g5, %g3, %g5
 	ba,pt	%xcc, etraptl1
@@ -281,15 +234,10 @@ sun4v_iacc_tl1:
 
 	/* Data Access Exception, tl0. */
 sun4v_dacc:
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldx	[%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5
 	sllx	%g3, 16, %g3
 	or	%g5, %g3, %g5
 	ba,pt	%xcc, etrap
@@ -302,15 +250,10 @@ sun4v_dacc:
 
 	/* Data Access Exception, tl1. */
 sun4v_dacc_tl1:
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldx	[%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5
 	sllx	%g3, 16, %g3
 	or	%g5, %g3, %g5
 	ba,pt	%xcc, etraptl1
@@ -323,15 +266,10 @@ sun4v_dacc_tl1:
 
 	/* Memory Address Unaligned.  */
 sun4v_mna:
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
 	mov	HV_FAULT_TYPE_UNALIGNED, %g3
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5
 	sllx	%g3, 16, %g3
 	or	%g5, %g3, %g5
 
@@ -359,15 +297,10 @@ sun4v_privact:
 
 	/* Unaligned ldd float, tl0. */
 sun4v_lddfmna:
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldx	[%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5
 	sllx	%g3, 16, %g3
 	or	%g5, %g3, %g5
 	ba,pt	%xcc, etrap
@@ -380,15 +313,10 @@ sun4v_lddfmna:
 
 	/* Unaligned std float, tl0. */
 sun4v_stdfmna:
-	mov	SCRATCHPAD_CPUID, %g1
-	ldxa	[%g1] ASI_SCRATCHPAD, %g3
-	sethi	%hi(trap_block), %g2
-	or	%g2, %lo(trap_block), %g2
-	sllx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
-	add	%g2, %g3, %g2
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
-	ldx	[%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldx	[%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
+	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5
 	sllx	%g3, 16, %g3
 	or	%g5, %g3, %g5
 	ba,pt	%xcc, etrap
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index c476f5b321f..88382200c7b 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -389,10 +389,35 @@ after_lock_tlb:
 	or		%o1, PSTATE_IE, %o1
 	wrpr		%o1, 0, %pstate
 
-	call		prom_set_trap_table
+	sethi		%hi(is_sun4v), %o0
+	lduw		[%o0 + %lo(is_sun4v)], %o0
+	brz,pt		%o0, 1f
+	 nop
+
+	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
+	add		%g2, TRAP_PER_CPU_FAULT_INFO, %g2
+	stxa		%g2, [%g0] ASI_SCRATCHPAD
+
+	/* Compute physical address:
+	 *
+	 * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
+	 */
+	sethi		%hi(KERNBASE), %g3
+	sub		%g2, %g3, %g2
+	sethi		%hi(kern_base), %g3
+	ldx		[%g3 + %lo(kern_base)], %g3
+	add		%g2, %g3, %o1
+
+	call		prom_set_trap_table_sun4v
+	 sethi		%hi(sparc64_ttable_tl0), %o0
+
+	ba,pt		%xcc, 2f
+	 nop
+
+1:	call		prom_set_trap_table
 	 sethi		%hi(sparc64_ttable_tl0), %o0
 
-	call		smp_callin
+2:	call		smp_callin
 	 nop
 	call		cpu_idle
 	 mov		0, %o0
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 7faba33202a..88eb6f6be56 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1109,24 +1109,6 @@ static void __init tsb_phys_patch(void)
 	}
 }
 
-/* Register this cpu's fault status area with the hypervisor.  */
-void __cpuinit sun4v_register_fault_status(void)
-{
-	register unsigned long func asm("%o5");
-	register unsigned long arg0 asm("%o0");
-	int cpu = hard_smp_processor_id();
-	struct trap_per_cpu *tb = &trap_block[cpu];
-	unsigned long pa;
-
-	pa = kern_base + ((unsigned long) tb - KERNBASE);
-	func = HV_FAST_MMU_FAULT_AREA_CONF;
-	arg0 = pa;
-	__asm__ __volatile__("ta	%4"
-			     : "=&r" (func), "=&r" (arg0)
-			     : "0" (func), "1" (arg0),
-			       "i" (HV_FAST_TRAP));
-}
-
 /* paging_init() sets up the page tables */
 
 extern void cheetah_ecache_flush_init(void);
@@ -1147,10 +1129,8 @@ void __init paging_init(void)
 	    tlb_type == hypervisor)
 		tsb_phys_patch();
 
-	if (tlb_type == hypervisor) {
+	if (tlb_type == hypervisor)
 		sun4v_patch_tlb_handlers();
-		sun4v_register_fault_status();
-	}
 
 	/* Find available physical memory... */
 	read_obp_memory("available", &pavail[0], &pavail_ents);
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 87f5cfce23b..713cbac5f9b 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -136,6 +136,11 @@ void prom_set_trap_table(unsigned long tba)
 	p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
 }
 
+void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa)
+{
+	p1275_cmd("SUNW,set-trap-table", P1275_INOUT(2, 0), tba, mmfsa);
+}
+
 int prom_get_mmu_ihandle(void)
 {
 	int node, ret;
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 338b0ca5b51..5a970f5ed9b 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -156,13 +156,16 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 	nop;						\
 	.previous;
 
-/* Clobbers TMP, current address space PGD phys address into DEST.  */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
 	__GET_CPUID(TMP)			\
 	sethi	%hi(trap_block), DEST;		\
 	sllx	TMP, TRAP_BLOCK_SZ_SHIFT, TMP;	\
 	or	DEST, %lo(trap_block), DEST;	\
 	add	DEST, TMP, DEST;		\
+
+/* Clobbers TMP, current address space PGD phys address into DEST.  */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
 	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
 
 /* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
@@ -175,11 +178,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 
 /* Clobbers TMP, loads DEST with current thread info pointer.  */
 #define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
-	__GET_CPUID(TMP)			\
-	sethi	%hi(trap_block), DEST;		\
-	sllx	TMP, TRAP_BLOCK_SZ_SHIFT, TMP;	\
-	or	DEST, %lo(trap_block), DEST;	\
-	ldx	[DEST + TMP], DEST;
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
 
 /* Given the current thread info pointer in THR, load the per-cpu
  * area base of the current processor into DEST.  REG1, REG2, and REG3 are
@@ -201,13 +201,13 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 
 #else
 
-#define __GET_CPUID(REG)				\
-	mov	0, REG;
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	sethi	%hi(trap_block), DEST;		\
+	or	DEST, %lo(trap_block), DEST;	\
 
 /* Uniprocessor versions, we know the cpuid is zero.  */
 #define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
-	sethi	%hi(trap_block), DEST;		\
-	or	DEST, %lo(trap_block), DEST;	\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
 	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
 
 #define TRAP_LOAD_IRQ_WORK(DEST, TMP)		\
@@ -215,8 +215,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 	or	DEST, %lo(__irq_work), DEST;
 
 #define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
-	sethi	%hi(trap_block), DEST;		\
-	ldx	[DEST + %lo(trap_block)], DEST;
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
 
 /* No per-cpu areas on uniprocessor, so no need to load DEST.  */
 #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 2ea545b931b..ce5066ef2dd 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -338,6 +338,7 @@ int cpu_find_by_mid(int mid, int *prom_node);
 
 /* Client interface level routines. */
 extern void prom_set_trap_table(unsigned long tba);
+extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa);
 
 extern long p1275_cmd(const char *, long, ...);
 				   
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index 972f913709a..6bb86a7a5b4 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -180,25 +180,25 @@
 #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
 #endif
 
-#define SUN4V_ITSB_MISS				\
-	mov	SCRATCHPAD_CPUID, %g1;		\
-	ldxa	[%g1] ASI_SCRATCHPAD, %g2;	\
-	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1;\
-	sethi	%hi(trap_block), %g5;		\
-	sllx	%g2, TRAP_BLOCK_SZ_SHIFT, %g2;	\
-	or	%g5, %lo(trap_block), %g5;	\
-	ba,pt	%xcc, sun4v_itsb_miss;		\
-	 add	%g5, %g2, %g5;
+#define SUN4V_ITSB_MISS					\
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2;		\
+	ldx	[%g2 + HV_FAULT_I_ADDR_OFFSET], %g4;	\
+	ldx	[%g2 + HV_FAULT_I_CTX_OFFSET], %g5;	\
+	srlx	%g4, 22, %g7;				\
+	sllx	%g5, 48, %g6;				\
+	brz,pn	%g5, kvmap_itlb_4v;			\
+	 or	%g6, %g7, %g6;				\
+	ba,a,pt	%xcc, sun4v_itsb_miss;
 
 #define SUN4V_DTSB_MISS				\
-	mov	SCRATCHPAD_CPUID, %g1;		\
-	ldxa	[%g1] ASI_SCRATCHPAD, %g2;	\
-	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1;\
-	sethi	%hi(trap_block), %g5;		\
-	sllx	%g2, TRAP_BLOCK_SZ_SHIFT, %g2;	\
-	or	%g5, %lo(trap_block), %g5;	\
-	ba,pt	%xcc, sun4v_dtsb_miss;		\
-	 add	%g5, %g2, %g5;
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2;		\
+	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4;	\
+	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5;	\
+	srlx	%g4, 22, %g7;				\
+	sllx	%g5, 48, %g6;				\
+	brz,pn	%g5, kvmap_dtlb_4v;			\
+	 or	%g6, %g7, %g6;				\
+	ba,a,pt	%xcc, sun4v_dtsb_miss;
 
 /* Before touching these macros, you owe it to yourself to go and
  * see how arch/sparc64/kernel/winfixup.S works... -DaveM
-- 
cgit v1.2.3-70-g09d2


From 459b6e621e0e15315c25bac47fa7113e5818d45d Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 11 Feb 2006 12:21:20 -0800
Subject: [SPARC64]: Fix some SUN4V TLB miss bugs.

Code patching did not sign extend negative branch
offsets correctly.

Kernel TLB miss path needs patching and %g4 register
preservation in order to handle SUN4V correctly.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/ktlb.S           | 67 +++++++++++++++++++++++++++++++-----
 arch/sparc64/kernel/sun4v_tlb_miss.S | 14 +++++---
 include/asm-sparc64/ttable.h         | 10 +++---
 3 files changed, 73 insertions(+), 18 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index f6bb2e08964..2d333ab4b91 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -48,7 +48,7 @@ kvmap_itlb_tsb_miss:
 kvmap_itlb_vmalloc_addr:
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 
-	KTSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g7)
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
@@ -60,8 +60,29 @@ kvmap_itlb_vmalloc_addr:
 	/* fallthrough to TLB load */
 
 kvmap_itlb_load:
-	stxa		%g5, [%g0] ASI_ITLB_DATA_IN	! Reload TLB
+
+661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
 	retry
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
+	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
+	 * instruction get nop'd out and we get here to branch
+	 * to the sun4v tlb load code.  The registers are setup
+	 * as follows:
+	 *
+	 * %g4: vaddr
+	 * %g5: PTE
+	 * %g6:	TAG
+	 *
+	 * The sun4v TLB load wants the PTE in %g3 so we fix that
+	 * up here.
+	 */
+	ba,pt		%xcc, sun4v_itlb_load
+	 mov		%g5, %g3
 
 kvmap_itlb_longpath:
 
@@ -80,7 +101,7 @@ kvmap_itlb_longpath:
 kvmap_itlb_obp:
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
 
-	KTSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g7)
 
 	KTSB_WRITE(%g1, %g5, %g6)
 
@@ -90,7 +111,7 @@ kvmap_itlb_obp:
 kvmap_dtlb_obp:
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
 
-	KTSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g7)
 
 	KTSB_WRITE(%g1, %g5, %g6)
 
@@ -129,7 +150,7 @@ kvmap_linear_patch:
 kvmap_dtlb_vmalloc_addr:
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
 
-	KTSB_LOCK_TAG(%g1, %g2, %g4)
+	KTSB_LOCK_TAG(%g1, %g2, %g7)
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
@@ -141,8 +162,29 @@ kvmap_dtlb_vmalloc_addr:
 	/* fallthrough to TLB load */
 
 kvmap_dtlb_load:
-	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
+
+661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
 	retry
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
+	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
+	 * instruction get nop'd out and we get here to branch
+	 * to the sun4v tlb load code.  The registers are setup
+	 * as follows:
+	 *
+	 * %g4: vaddr
+	 * %g5: PTE
+	 * %g6:	TAG
+	 *
+	 * The sun4v TLB load wants the PTE in %g3 so we fix that
+	 * up here.
+	 */
+	ba,pt		%xcc, sun4v_dtlb_load
+	 mov		%g5, %g3
 
 kvmap_dtlb_nonlinear:
 	/* Catch kernel NULL pointer derefs.  */
@@ -185,10 +227,17 @@ kvmap_dtlb_longpath:
 	nop
 	.previous
 
-	rdpr	%tl, %g4
-	cmp	%g4, 1
-	mov	TLB_TAG_ACCESS, %g4
+	rdpr	%tl, %g3
+	cmp	%g3, 1
+
+661:	mov	TLB_TAG_ACCESS, %g4
 	ldxa	[%g4] ASI_DMMU, %g5
+	.section .sun4v_2insn_patch, "ax"
+	.word	661b
+	mov	%g4, %g5
+	nop
+	.previous
+
 	be,pt	%xcc, sparc64_realfault_common
 	 mov	FAULT_CODE_DTLB, %g4
 	ba,pt	%xcc, winfix_trampoline
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index f7129137f9a..597359ced23 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -96,7 +96,7 @@ sun4v_dtlb_miss:
 	
 	/* Load UTSB reg into %g1.  */
 	mov	SCRATCHPAD_UTSBREG1, %g1
-	ldxa	[%g1 + %g1] ASI_SCRATCHPAD, %g1
+	ldxa	[%g1] ASI_SCRATCHPAD, %g1
 
 	LOAD_DTLB_INFO(%g2, %g4, %g5)
 	COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v)
@@ -149,14 +149,19 @@ sun4v_dtlb_prot:
 	 * SCRATCHPAD_MMU_MISS contents in %g2.
 	 */
 sun4v_itsb_miss:
-	ba,pt	%xcc, sun4v_tsb_miss_common
+	mov	SCRATCHPAD_UTSBREG1, %g1
+	ldxa	[%g1] ASI_SCRATCHPAD, %g1
+	brz,pn	%g5, kvmap_itlb_4v
 	 mov	FAULT_CODE_ITLB, %g3
 
 	/* Called from trap table with TAG TARGET placed into
 	 * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1.
 	 */
 sun4v_dtsb_miss:
-	mov	FAULT_CODE_DTLB, %g3
+	mov	SCRATCHPAD_UTSBREG1, %g1
+	ldxa	[%g1] ASI_SCRATCHPAD, %g1
+	brz,pn	%g5, kvmap_dtlb_4v
+	 mov	FAULT_CODE_DTLB, %g3
 
 	/* Create TSB pointer into %g1.  This is something like:
 	 *
@@ -312,7 +317,8 @@ sun4v_stdfmna:
 	or	%g2, %lo(OLD), %g2; \
 	sub	%g1, %g2, %g1; \
 	sethi	%hi(BRANCH_ALWAYS), %g3; \
-	srl	%g1, 2, %g1; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
 	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
 	or	%g3, %g1, %g3; \
 	stw	%g3, [%g2]; \
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index 6bb86a7a5b4..9e28b240f3a 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -186,19 +186,19 @@
 	ldx	[%g2 + HV_FAULT_I_CTX_OFFSET], %g5;	\
 	srlx	%g4, 22, %g7;				\
 	sllx	%g5, 48, %g6;				\
-	brz,pn	%g5, kvmap_itlb_4v;			\
+	ba,pt	%xcc, sun4v_itsb_miss;			\
 	 or	%g6, %g7, %g6;				\
-	ba,a,pt	%xcc, sun4v_itsb_miss;
+	nop;
 
-#define SUN4V_DTSB_MISS				\
+#define SUN4V_DTSB_MISS					\
 	ldxa	[%g0] ASI_SCRATCHPAD, %g2;		\
 	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4;	\
 	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5;	\
 	srlx	%g4, 22, %g7;				\
 	sllx	%g5, 48, %g6;				\
-	brz,pn	%g5, kvmap_dtlb_4v;			\
+	ba,pt	%xcc, sun4v_dtsb_miss;			\
 	 or	%g6, %g7, %g6;				\
-	ba,a,pt	%xcc, sun4v_dtsb_miss;
+	nop;
 
 /* Before touching these macros, you owe it to yourself to go and
  * see how arch/sparc64/kernel/winfixup.S works... -DaveM
-- 
cgit v1.2.3-70-g09d2


From 490384e752a43aa281ed533e9de2da36df25c337 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 11 Feb 2006 14:41:18 -0800
Subject: [SPARC64]: Register kernel TSB with hypervisor.

We do this right after we take over the trap table from OBP.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/smp.c     |  4 ++-
 arch/sparc64/mm/init.c        | 70 ++++++++++++++++++++++++++++++++++++++++++-
 include/asm-sparc64/pgtable.h |  1 +
 3 files changed, 73 insertions(+), 2 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index f553264588d..7d7e02ba297 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -122,8 +122,10 @@ void __init smp_callin(void)
 
 	__local_per_cpu_offset = __per_cpu_offset(cpuid);
 
-	if (tlb_type == hypervisor)
+	if (tlb_type == hypervisor) {
 		sun4v_register_fault_status();
+		sun4v_ktsb_register();
+	}
 
 	__flush_tlb_all();
 
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 88eb6f6be56..92756da273b 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1109,6 +1109,69 @@ static void __init tsb_phys_patch(void)
 	}
 }
 
+/* Don't mark as init, we give this to the Hypervisor.  */
+static struct hv_tsb_descr ktsb_descr[2];
+extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+
+static void __init sun4v_ktsb_init(void)
+{
+	unsigned long ktsb_pa;
+
+	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
+
+	switch (PAGE_SIZE) {
+	case 8 * 1024:
+	default:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
+		break;
+
+	case 64 * 1024:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
+		break;
+
+	case 512 * 1024:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
+		break;
+
+	case 4 * 1024 * 1024:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
+		break;
+	};
+
+	ktsb_descr[0].assoc = 0;
+	ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
+	ktsb_descr[0].ctx_idx = 0;
+	ktsb_descr[0].tsb_base = ktsb_pa;
+	ktsb_descr[0].resv = 0;
+
+	/* XXX When we have a kernel large page size TSB, describe
+	 * XXX it in ktsb_descr[1] here.
+	 */
+}
+
+void __cpuinit sun4v_ktsb_register(void)
+{
+	register unsigned long func asm("%o5");
+	register unsigned long arg0 asm("%o0");
+	register unsigned long arg1 asm("%o1");
+	unsigned long pa;
+
+	pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
+
+	func = HV_FAST_MMU_TSB_CTX0;
+	/* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */
+	arg0 = 1;
+	arg1 = pa;
+	__asm__ __volatile__("ta	%6"
+			     : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
+			     : "0" (func), "1" (arg0), "2" (arg1),
+			       "i" (HV_FAST_TRAP));
+}
+
 /* paging_init() sets up the page tables */
 
 extern void cheetah_ecache_flush_init(void);
@@ -1129,8 +1192,10 @@ void __init paging_init(void)
 	    tlb_type == hypervisor)
 		tsb_phys_patch();
 
-	if (tlb_type == hypervisor)
+	if (tlb_type == hypervisor) {
 		sun4v_patch_tlb_handlers();
+		sun4v_ktsb_init();
+	}
 
 	/* Find available physical memory... */
 	read_obp_memory("available", &pavail[0], &pavail_ents);
@@ -1171,6 +1236,9 @@ void __init paging_init(void)
 
 	__flush_tlb_all();
 
+	if (tlb_type == hypervisor)
+		sun4v_ktsb_register();
+
 	/* Setup bootmem... */
 	pages_avail = 0;
 	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index c42c06a37d1..a480007f0a9 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -438,6 +438,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
 
 extern void pgtable_cache_init(void);
 extern void sun4v_register_fault_status(void);
+extern void sun4v_ktsb_register(void);
 
 #endif /* !(__ASSEMBLY__) */
 
-- 
cgit v1.2.3-70-g09d2


From c4bce90ea2069e5a87beac806de3090ab32128d5 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 11 Feb 2006 21:57:54 -0800
Subject: [SPARC64]: Deal with PTE layout differences in SUN4V.

Yes, you heard it right, they changed the PTE layout for
SUN4V.  Ho hum...

This is the simple and inefficient way to support this.
It'll get optimized, don't worry.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/itlb_miss.S      |   4 +-
 arch/sparc64/kernel/ktlb.S           |  12 +-
 arch/sparc64/kernel/setup.c          | 274 --------------
 arch/sparc64/kernel/sun4v_tlb_miss.S |   3 +-
 arch/sparc64/kernel/tsb.S            |   9 +-
 arch/sparc64/lib/clear_page.S        |   8 +-
 arch/sparc64/lib/copy_page.S         |   7 +-
 arch/sparc64/mm/fault.c              |   2 +-
 arch/sparc64/mm/generic.c            |  40 +-
 arch/sparc64/mm/init.c               | 703 ++++++++++++++++++++++++++++++-----
 arch/sparc64/mm/tsb.c                |  12 +-
 include/asm-sparc64/pgtable.h        | 266 ++++---------
 12 files changed, 717 insertions(+), 623 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 97facce27aa..730caa4a150 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -6,9 +6,10 @@
 	 nop					! Delay slot (fill me)
 	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
 	cmp	%g4, %g6			! Compare TAG
-	sethi	%hi(_PAGE_EXEC), %g4		! Setup exec check
+	sethi	%hi(PAGE_EXEC), %g4		! Setup exec check
 
 /* ITLB ** ICACHE line 2: TSB compare and TLB load	*/
+	ldx	[%g4 + %lo(PAGE_EXEC)], %g4
 	bne,pn	%xcc, tsb_miss_itlb		! Miss
 	 mov	FAULT_CODE_ITLB, %g3
 	andcc	%g5, %g4, %g0			! Executable?
@@ -16,7 +17,6 @@
 	 nop					! Delay slot, fill me
 	stxa	%g5, [%g0] ASI_ITLB_DATA_IN	! Load TLB
 	retry					! Trap done
-	nop
 
 /* ITLB ** ICACHE line 3: 				*/
 	nop
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 2d333ab4b91..47dfd45971e 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -131,16 +131,8 @@ kvmap_dtlb_4v:
 	brgez,pn	%g4, kvmap_dtlb_nonlinear
 	 nop
 
-#define KERN_HIGHBITS	((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
-#define KERN_LOWBITS	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
-
-	sethi		%uhi(KERN_HIGHBITS), %g2
-	or		%g2, %ulo(KERN_HIGHBITS), %g2
-	sllx		%g2, 32, %g2
-	or		%g2, KERN_LOWBITS, %g2
-
-#undef KERN_HIGHBITS
-#undef KERN_LOWBITS
+	sethi		%hi(kern_linear_pte_xor), %g2
+	ldx		[%g2 + %lo(kern_linear_pte_xor)], %g2
 
 	.globl		kvmap_linear_patch
 kvmap_linear_patch:
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index f36b257b2e4..ca75f3b26a3 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -64,12 +64,6 @@ struct screen_info screen_info = {
 	16                      /* orig-video-points */
 };
 
-/* Typing sync at the prom prompt calls the function pointed to by
- * the sync callback which I set to the following function.
- * This should sync all filesystems and return, for now it just
- * prints out pretty messages and returns.
- */
-
 void (*prom_palette)(int);
 void (*prom_keyboard)(void);
 
@@ -79,263 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n)
 	prom_write(s, n);
 }
 
-static struct console prom_console = {
-	.name =		"prom",
-	.write =	prom_console_write,
-	.flags =	CON_CONSDEV | CON_ENABLED,
-	.index =	-1,
-};
-
-#define PROM_TRUE	-1
-#define PROM_FALSE	0
-
-/* Pretty sick eh? */
-int prom_callback(long *args)
-{
-	struct console *cons, *saved_console = NULL;
-	unsigned long flags;
-	char *cmd;
-	extern spinlock_t prom_entry_lock;
-
-	if (!args)
-		return -1;
-	if (!(cmd = (char *)args[0]))
-		return -1;
-
-	/*
-	 * The callback can be invoked on the cpu that first dropped 
-	 * into prom_cmdline after taking the serial interrupt, or on 
-	 * a slave processor that was smp_captured() if the 
-	 * administrator has done a switch-cpu inside obp. In either 
-	 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
-	 */
-	irq_exit();
-
-	/* XXX Revisit the locking here someday.  This is a debugging
-	 * XXX feature so it isnt all that critical.  -DaveM
-	 */
-	local_irq_save(flags);
-
-	spin_unlock(&prom_entry_lock);
-	cons = console_drivers;
-	while (cons) {
-		unregister_console(cons);
-		cons->flags &= ~(CON_PRINTBUFFER);
-		cons->next = saved_console;
-		saved_console = cons;
-		cons = console_drivers;
-	}
-	register_console(&prom_console);
-	if (!strcmp(cmd, "sync")) {
-		prom_printf("PROM `%s' command...\n", cmd);
-		show_free_areas();
-		if (current->pid != 0) {
-			local_irq_enable();
-			sys_sync();
-			local_irq_disable();
-		}
-		args[2] = 0;
-		args[args[1] + 3] = -1;
-		prom_printf("Returning to PROM\n");
-	} else if (!strcmp(cmd, "va>tte-data")) {
-		unsigned long ctx, va;
-		unsigned long tte = 0;
-		long res = PROM_FALSE;
-
-		ctx = args[3];
-		va = args[4];
-		if (ctx) {
-			/*
-			 * Find process owning ctx, lookup mapping.
-			 */
-			struct task_struct *p;
-			struct mm_struct *mm = NULL;
-			pgd_t *pgdp;
-			pud_t *pudp;
-			pmd_t *pmdp;
-			pte_t *ptep;
-			pte_t pte;
-
-			for_each_process(p) {
-				mm = p->mm;
-				if (CTX_NRBITS(mm->context) == ctx)
-					break;
-			}
-			if (!mm ||
-			    CTX_NRBITS(mm->context) != ctx)
-				goto done;
-
-			pgdp = pgd_offset(mm, va);
-			if (pgd_none(*pgdp))
-				goto done;
-			pudp = pud_offset(pgdp, va);
-			if (pud_none(*pudp))
-				goto done;
-			pmdp = pmd_offset(pudp, va);
-			if (pmd_none(*pmdp))
-				goto done;
-
-			/* Preemption implicitly disabled by virtue of
-			 * being called from inside OBP.
-			 */
-			ptep = pte_offset_map(pmdp, va);
-			pte = *ptep;
-			if (pte_present(pte)) {
-				tte = pte_val(pte);
-				res = PROM_TRUE;
-			}
-			pte_unmap(ptep);
-			goto done;
-		}
-
-		if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
-			if (tlb_type == spitfire) {
-				extern unsigned long sparc64_kern_pri_context;
-
-				/* Spitfire Errata #32 workaround */
-				__asm__ __volatile__(
-					"stxa 	%0, [%1] %2\n\t"
-					"flush	%%g6"
-					: /* No outputs */
-					: "r" (sparc64_kern_pri_context),
-					  "r" (PRIMARY_CONTEXT),
-					  "i" (ASI_DMMU));
-			}
-
-			/*
-			 * Locked down tlb entry.
-			 */
-
-			if (tlb_type == spitfire) {
-				tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
-				res = PROM_TRUE;
-			} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-				tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
-				res = PROM_TRUE;
-			}
-			goto done;
-		}
-
-		if (va < PGDIR_SIZE) {
-			/*
-			 * vmalloc or prom_inherited mapping.
-			 */
-			pgd_t *pgdp;
-			pud_t *pudp;
-			pmd_t *pmdp;
-			pte_t *ptep;
-			pte_t pte;
-			int error;
-
-			if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
-				tte = prom_virt_to_phys(va, &error);
-				if (!error)
-					res = PROM_TRUE;
-				goto done;
-			}
-			pgdp = pgd_offset_k(va);
-			if (pgd_none(*pgdp))
-				goto done;
-			pudp = pud_offset(pgdp, va);
-			if (pud_none(*pudp))
-				goto done;
-			pmdp = pmd_offset(pudp, va);
-			if (pmd_none(*pmdp))
-				goto done;
-
-			/* Preemption implicitly disabled by virtue of
-			 * being called from inside OBP.
-			 */
-			ptep = pte_offset_kernel(pmdp, va);
-			pte = *ptep;
-			if (pte_present(pte)) {
-				tte = pte_val(pte);
-				res = PROM_TRUE;
-			}
-			goto done;
-		}
-
-		if (va < PAGE_OFFSET) {
-			/*
-			 * No mappings here.
-			 */
-			goto done;
-		}
-
-		if (va & (1UL << 40)) {
-			/*
-			 * I/O page.
-			 */
-
-			tte = (__pa(va) & _PAGE_PADDR) |
-			      _PAGE_VALID | _PAGE_SZ4MB |
-			      _PAGE_E | _PAGE_P | _PAGE_W;
-			res = PROM_TRUE;
-			goto done;
-		}
-
-		/*
-		 * Normal page.
-		 */
-		tte = (__pa(va) & _PAGE_PADDR) |
-		      _PAGE_VALID | _PAGE_SZ4MB |
-		      _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
-		res = PROM_TRUE;
-
-	done:
-		if (res == PROM_TRUE) {
-			args[2] = 3;
-			args[args[1] + 3] = 0;
-			args[args[1] + 4] = res;
-			args[args[1] + 5] = tte;
-		} else {
-			args[2] = 2;
-			args[args[1] + 3] = 0;
-			args[args[1] + 4] = res;
-		}
-	} else if (!strcmp(cmd, ".soft1")) {
-		unsigned long tte;
-
-		tte = args[3];
-		prom_printf("%lx:\"%s%s%s%s%s\" ",
-			    (tte & _PAGE_SOFT) >> 7,
-			    tte & _PAGE_MODIFIED ? "M" : "-",
-			    tte & _PAGE_ACCESSED ? "A" : "-",
-			    tte & _PAGE_READ     ? "W" : "-",
-			    tte & _PAGE_WRITE    ? "R" : "-",
-			    tte & _PAGE_PRESENT  ? "P" : "-");
-
-		args[2] = 2;
-		args[args[1] + 3] = 0;
-		args[args[1] + 4] = PROM_TRUE;
-	} else if (!strcmp(cmd, ".soft2")) {
-		unsigned long tte;
-
-		tte = args[3];
-		prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
-
-		args[2] = 2;
-		args[args[1] + 3] = 0;
-		args[args[1] + 4] = PROM_TRUE;
-	} else {
-		prom_printf("unknown PROM `%s' command...\n", cmd);
-	}
-	unregister_console(&prom_console);
-	while (saved_console) {
-		cons = saved_console;
-		saved_console = cons->next;
-		register_console(cons);
-	}
-	spin_lock(&prom_entry_lock);
-	local_irq_restore(flags);
-
-	/*
-	 * Restore in-interrupt status for a resume from obp.
-	 */
-	irq_enter();
-	return 0;
-}
-
 unsigned int boot_flags = 0;
 #define BOOTME_DEBUG  0x1
 #define BOOTME_SINGLE 0x2
@@ -483,17 +220,6 @@ char reboot_command[COMMAND_LINE_SIZE];
 
 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
 
-void register_prom_callbacks(void)
-{
-	prom_setcallback(prom_callback);
-	prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
-		   "' linux-va>tte-data to va>tte-data");
-	prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
-		   "' linux-.soft1 to .soft1");
-	prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
-		   "' linux-.soft2 to .soft2");
-}
-
 static void __init per_cpu_patch(void)
 {
 #ifdef CONFIG_SMP
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index 597359ced23..950ca74b4a5 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -59,7 +59,8 @@ sun4v_itlb_miss:
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
 	ldda	[%g1] ASI_QUAD_LDD_PHYS, %g2
 	cmp	%g2, %g6
-	sethi	%hi(_PAGE_EXEC), %g7
+	sethi	%hi(PAGE_EXEC), %g7
+	ldx	[%g7 + %lo(PAGE_EXEC)], %g7
 	bne,a,pn %xcc, tsb_miss_page_table_walk
 	 mov	FAULT_CODE_ITLB, %g3
 	andcc	%g3, %g7, %g0
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 667dcb077be..be8f0892d72 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -56,10 +56,11 @@ tsb_reload:
 	/* If it is larger than the base page size, don't
 	 * bother putting it into the TSB.
 	 */
-	srlx		%g5, 32, %g2
-	sethi		%hi(_PAGE_ALL_SZ_BITS >> 32), %g7
-	and		%g2, %g7, %g2
-	sethi		%hi(_PAGE_SZBITS >> 32), %g7
+	sethi		%hi(_PAGE_ALL_SZ_BITS), %g7
+	ldx		[%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
+	and		%g5, %g7, %g2
+	sethi		%hi(_PAGE_SZBITS), %g7
+	ldx		[%g7 + %lo(_PAGE_SZBITS)], %g7
 	cmp		%g2, %g7
 	bne,a,pn	%xcc, tsb_tlb_reload
 	 TSB_STORE(%g1, %g0)
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
index cdc634bceba..77e531f6c2a 100644
--- a/arch/sparc64/lib/clear_page.S
+++ b/arch/sparc64/lib/clear_page.S
@@ -23,9 +23,6 @@
 	 * disable preemption during the clear.
 	 */
 
-#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
-
 	.text
 
 	.globl		_clear_page
@@ -44,12 +41,11 @@ clear_user_page:	/* %o0=dest, %o1=vaddr */
 	sethi		%hi(PAGE_SIZE), %o4
 
 	sllx		%g2, 32, %g2
-	sethi		%uhi(TTE_BITS_TOP), %g3
+	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 
-	sllx		%g3, 32, %g3
+	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
 	sub		%o0, %g2, %g1		! paddr
 
-	or		%g3, TTE_BITS_BOTTOM, %g3
 	and		%o1, %o4, %o0		! vaddr D-cache alias bit
 
 	or		%g1, %g3, %g1		! TTE data
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
index feebb14fd27..37460666a5c 100644
--- a/arch/sparc64/lib/copy_page.S
+++ b/arch/sparc64/lib/copy_page.S
@@ -23,8 +23,6 @@
 	 * disable preemption during the clear.
 	 */
 
-#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
 #define	DCACHE_SIZE	(PAGE_SIZE * 2)
 
 #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
@@ -52,13 +50,12 @@ copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
 	sethi		%hi(PAGE_SIZE), %o3
 
 	sllx		%g2, 32, %g2
-	sethi		%uhi(TTE_BITS_TOP), %g3
+	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 
-	sllx		%g3, 32, %g3
+	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
 	sub		%o0, %g2, %g1		! dest paddr
 
 	sub		%o1, %g2, %g2		! src paddr
-	or		%g3, TTE_BITS_BOTTOM, %g3
 
 	and		%o2, %o3, %o0		! vaddr D-cache alias bit
 	or		%g1, %g3, %g1		! dest TTE data
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6f0539aa44d..439a53c1e56 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -137,7 +137,7 @@ static unsigned int get_user_insn(unsigned long tpc)
 	if (!pte_present(pte))
 		goto out;
 
-	pa  = (pte_val(pte) & _PAGE_PADDR);
+	pa  = (pte_pfn(pte) << PAGE_SHIFT);
 	pa += (tpc & ~PAGE_MASK);
 
 	/* Use phys bypass so we don't pollute dtlb/dcache. */
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 580b63da836..5fc5c579e35 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -15,15 +15,6 @@
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 
-static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
-{
-	pte_t pte;
-	pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
-			~(unsigned long)_PAGE_CACHE);
-	pte_val(pte) |= (((unsigned long)space) << 32);
-	return pte;
-}
-
 /* Remap IO memory, the same way as remap_pfn_range(), but use
  * the obio memory space.
  *
@@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
 		pte_t entry;
 		unsigned long curend = address + PAGE_SIZE;
 		
-		entry = mk_pte_io(offset, prot, space);
+		entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
 		if (!(address & 0xffff)) {
-			if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
-				entry = mk_pte_io(offset,
-						  __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
-						  space);
+			if (PAGE_SIZE < (4 * 1024 * 1024) &&
+			    !(address & 0x3fffff) &&
+			    !(offset & 0x3ffffe) &&
+			    end >= address + 0x400000) {
+				entry = mk_pte_io(offset, prot, space,
+						  4 * 1024 * 1024);
 				curend = address + 0x400000;
 				offset += 0x400000;
-			} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
-				entry = mk_pte_io(offset,
-						  __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
-						  space);
+			} else if (PAGE_SIZE < (512 * 1024) &&
+				   !(address & 0x7ffff) &&
+				   !(offset & 0x7fffe) &&
+				   end >= address + 0x80000) {
+				entry = mk_pte_io(offset, prot, space,
+						  512 * 1024 * 1024);
 				curend = address + 0x80000;
 				offset += 0x80000;
-			} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
-				entry = mk_pte_io(offset,
-						  __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
-						  space);
+			} else if (PAGE_SIZE < (64 * 1024) &&
+				   !(offset & 0xfffe) &&
+				   end >= address + 0x10000) {
+				entry = mk_pte_io(offset, prot, space,
+						  64 * 1024);
 				curend = address + 0x10000;
 				offset += 0x10000;
 			} else
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 92756da273b..9c2fc239f3e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -6,6 +6,7 @@
  */
  
 #include <linux/config.h>
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/string.h>
@@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly;
 unsigned long kern_base __read_mostly;
 unsigned long kern_size __read_mostly;
 unsigned long pfn_base __read_mostly;
+unsigned long kern_linear_pte_xor __read_mostly;
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long
 	__tsb_insert(tsb_addr, tag, pte);
 }
 
+unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
+unsigned long _PAGE_SZBITS __read_mostly;
+
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
 	struct mm_struct *mm;
@@ -398,39 +403,9 @@ struct linux_prom_translation {
 struct linux_prom_translation prom_trans[512] __read_mostly;
 unsigned int prom_trans_ents __read_mostly;
 
-extern unsigned long prom_boot_page;
-extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
-extern int prom_get_mmu_ihandle(void);
-extern void register_prom_callbacks(void);
-
 /* Exported for SMP bootup purposes. */
 unsigned long kern_locked_tte_data;
 
-/*
- * Translate PROM's mapping we capture at boot time into physical address.
- * The second parameter is only set from prom_callback() invocations.
- */
-unsigned long prom_virt_to_phys(unsigned long promva, int *error)
-{
-	int i;
-
-	for (i = 0; i < prom_trans_ents; i++) {
-		struct linux_prom_translation *p = &prom_trans[i];
-
-		if (promva >= p->virt &&
-		    promva < (p->virt + p->size)) {
-			unsigned long base = p->data & _PAGE_PADDR;
-
-			if (error)
-				*error = 0;
-			return base + (promva & (8192 - 1));
-		}
-	}
-	if (error)
-		*error = 1;
-	return 0UL;
-}
-
 /* The obp translations are saved based on 8k pagesize, since obp can
  * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
  * HI_OBP_ADDRESS range are handled in ktlb.S.
@@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
 			       "3" (arg2), "4" (arg3));
 }
 
+static unsigned long kern_large_tte(unsigned long paddr);
+
 static void __init remap_kernel(void)
 {
 	unsigned long phys_page, tte_vaddr, tte_data;
@@ -544,9 +521,7 @@ static void __init remap_kernel(void)
 
 	tte_vaddr = (unsigned long) KERNBASE;
 	phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
-	tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
-				 _PAGE_CP | _PAGE_CV | _PAGE_P |
-				 _PAGE_L | _PAGE_W));
+	tte_data = kern_large_tte(phys_page);
 
 	kern_locked_tte_data = tte_data;
 
@@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void)
 	prom_printf("Remapping the kernel... ");
 	remap_kernel();
 	prom_printf("done.\n");
-
-	prom_printf("Registering callbacks... ");
-	register_prom_callbacks();
-	prom_printf("done.\n");
 }
 
 void prom_world(int enter)
@@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
 }
 #endif /* DCACHE_ALIASING_POSSIBLE */
 
-/* If not locked, zap it. */
-void __flush_tlb_all(void)
-{
-	unsigned long pstate;
-	int i;
-
-	__asm__ __volatile__("flushw\n\t"
-			     "rdpr	%%pstate, %0\n\t"
-			     "wrpr	%0, %1, %%pstate"
-			     : "=r" (pstate)
-			     : "i" (PSTATE_IE));
-	if (tlb_type == spitfire) {
-		for (i = 0; i < 64; i++) {
-			/* Spitfire Errata #32 workaround */
-			/* NOTE: Always runs on spitfire, so no
-			 *       cheetah+ page size encodings.
-			 */
-			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
-					     "flush	%%g6"
-					     : /* No outputs */
-					     : "r" (0),
-					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
-			if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
-				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
-						     "membar #Sync"
-						     : /* no outputs */
-						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
-				spitfire_put_dtlb_data(i, 0x0UL);
-			}
-
-			/* Spitfire Errata #32 workaround */
-			/* NOTE: Always runs on spitfire, so no
-			 *       cheetah+ page size encodings.
-			 */
-			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
-					     "flush	%%g6"
-					     : /* No outputs */
-					     : "r" (0),
-					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
-			if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
-				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
-						     "membar #Sync"
-						     : /* no outputs */
-						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
-				spitfire_put_itlb_data(i, 0x0UL);
-			}
-		}
-	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-		cheetah_flush_dtlb_all();
-		cheetah_flush_itlb_all();
-	}
-	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
-			     : : "r" (pstate));
-}
-
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
  *
@@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void);
 static unsigned long last_valid_pfn;
 pgd_t swapper_pg_dir[2048];
 
+static void sun4u_pgprot_init(void);
+static void sun4v_pgprot_init(void);
+
 void __init paging_init(void)
 {
 	unsigned long end_pfn, pages_avail, shift;
@@ -1188,6 +1105,11 @@ void __init paging_init(void)
 	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
 	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
+	if (tlb_type == hypervisor)
+		sun4v_pgprot_init();
+	else
+		sun4u_pgprot_init();
+
 	if (tlb_type == cheetah_plus ||
 	    tlb_type == hypervisor)
 		tsb_phys_patch();
@@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 	}
 }
 #endif
+
+/* SUN4U pte bits... */
+#define _PAGE_SZ4MB_4U	  0x6000000000000000 /* 4MB Page               */
+#define _PAGE_SZ512K_4U	  0x4000000000000000 /* 512K Page              */
+#define _PAGE_SZ64K_4U	  0x2000000000000000 /* 64K Page               */
+#define _PAGE_SZ8K_4U	  0x0000000000000000 /* 8K Page                */
+#define _PAGE_NFO_4U	  0x1000000000000000 /* No Fault Only          */
+#define _PAGE_IE_4U	  0x0800000000000000 /* Invert Endianness      */
+#define _PAGE_SOFT2_4U	  0x07FC000000000000 /* Software bits, set 2   */
+#define _PAGE_RES1_4U	  0x0002000000000000 /* Reserved               */
+#define _PAGE_SZ32MB_4U	  0x0001000000000000 /* (Panther) 32MB page    */
+#define _PAGE_SZ256MB_4U  0x2001000000000000 /* (Panther) 256MB page   */
+#define _PAGE_SN_4U	  0x0000800000000000 /* (Cheetah) Snoop        */
+#define _PAGE_RES2_4U	  0x0000780000000000 /* Reserved               */
+#define _PAGE_PADDR_4U	  0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */
+#define _PAGE_SOFT_4U	  0x0000000000001F80 /* Software bits:         */
+#define _PAGE_EXEC_4U	  0x0000000000001000 /* Executable SW bit      */
+#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty)       */
+#define _PAGE_FILE_4U	  0x0000000000000800 /* Pagecache page         */
+#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd)       */
+#define _PAGE_READ_4U	  0x0000000000000200 /* Readable SW Bit        */
+#define _PAGE_WRITE_4U	  0x0000000000000100 /* Writable SW Bit        */
+#define _PAGE_PRESENT_4U  0x0000000000000080 /* Present                */
+#define _PAGE_L_4U	  0x0000000000000040 /* Locked TTE             */
+#define _PAGE_CP_4U	  0x0000000000000020 /* Cacheable in P-Cache   */
+#define _PAGE_CV_4U	  0x0000000000000010 /* Cacheable in V-Cache   */
+#define _PAGE_E_4U	  0x0000000000000008 /* side-Effect            */
+#define _PAGE_P_4U	  0x0000000000000004 /* Privileged Page        */
+#define _PAGE_W_4U	  0x0000000000000002 /* Writable               */
+
+/* SUN4V pte bits... */
+#define _PAGE_NFO_4V	  0x4000000000000000 /* No Fault Only          */
+#define _PAGE_SOFT2_4V	  0x3F00000000000000 /* Software bits, set 2   */
+#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty)       */
+#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd)       */
+#define _PAGE_READ_4V	  0x0800000000000000 /* Readable SW Bit        */
+#define _PAGE_WRITE_4V	  0x0400000000000000 /* Writable SW Bit        */
+#define _PAGE_PADDR_4V	  0x00FFFFFFFFFFE000 /* paddr[55:13]           */
+#define _PAGE_IE_4V	  0x0000000000001000 /* Invert Endianness      */
+#define _PAGE_E_4V	  0x0000000000000800 /* side-Effect            */
+#define _PAGE_CP_4V	  0x0000000000000400 /* Cacheable in P-Cache   */
+#define _PAGE_CV_4V	  0x0000000000000200 /* Cacheable in V-Cache   */
+#define _PAGE_P_4V	  0x0000000000000100 /* Privileged Page        */
+#define _PAGE_EXEC_4V	  0x0000000000000080 /* Executable Page        */
+#define _PAGE_W_4V	  0x0000000000000040 /* Writable               */
+#define _PAGE_SOFT_4V	  0x0000000000000030 /* Software bits          */
+#define _PAGE_FILE_4V	  0x0000000000000020 /* Pagecache page         */
+#define _PAGE_PRESENT_4V  0x0000000000000010 /* Present                */
+#define _PAGE_RESV_4V	  0x0000000000000008 /* Reserved               */
+#define _PAGE_SZ16GB_4V	  0x0000000000000007 /* 16GB Page              */
+#define _PAGE_SZ2GB_4V	  0x0000000000000006 /* 2GB Page               */
+#define _PAGE_SZ256MB_4V  0x0000000000000005 /* 256MB Page             */
+#define _PAGE_SZ32MB_4V	  0x0000000000000004 /* 32MB Page              */
+#define _PAGE_SZ4MB_4V	  0x0000000000000003 /* 4MB Page               */
+#define _PAGE_SZ512K_4V	  0x0000000000000002 /* 512K Page              */
+#define _PAGE_SZ64K_4V	  0x0000000000000001 /* 64K Page               */
+#define _PAGE_SZ8K_4V	  0x0000000000000000 /* 8K Page                */
+
+#if PAGE_SHIFT == 13
+#define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
+#elif PAGE_SHIFT == 16
+#define _PAGE_SZBITS_4U	_PAGE_SZ64K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ64K_4V
+#elif PAGE_SHIFT == 19
+#define _PAGE_SZBITS_4U	_PAGE_SZ512K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ512K_4V
+#elif PAGE_SHIFT == 22
+#define _PAGE_SZBITS_4U	_PAGE_SZ4MB_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ4MB_4V
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ512K_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ512K_4V
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ64K_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ64K_4V
+#endif
+
+#define _PAGE_CACHE_4U	(_PAGE_CP_4U | _PAGE_CV_4U)
+#define _PAGE_CACHE_4V	(_PAGE_CP_4V | _PAGE_CV_4V)
+#define __DIRTY_BITS_4U	 (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
+#define __DIRTY_BITS_4V	 (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
+#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
+#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
+
+pgprot_t PAGE_KERNEL __read_mostly;
+EXPORT_SYMBOL(PAGE_KERNEL);
+
+pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
+pgprot_t PAGE_COPY __read_mostly;
+pgprot_t PAGE_EXEC __read_mostly;
+unsigned long pg_iobits __read_mostly;
+
+unsigned long _PAGE_IE __read_mostly;
+unsigned long _PAGE_E __read_mostly;
+unsigned long _PAGE_CACHE __read_mostly;
+
+static void prot_init_common(unsigned long page_none,
+			     unsigned long page_shared,
+			     unsigned long page_copy,
+			     unsigned long page_readonly,
+			     unsigned long page_exec_bit)
+{
+	PAGE_COPY = __pgprot(page_copy);
+
+	protection_map[0x0] = __pgprot(page_none);
+	protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
+	protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
+	protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
+	protection_map[0x4] = __pgprot(page_readonly);
+	protection_map[0x5] = __pgprot(page_readonly);
+	protection_map[0x6] = __pgprot(page_copy);
+	protection_map[0x7] = __pgprot(page_copy);
+	protection_map[0x8] = __pgprot(page_none);
+	protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
+	protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
+	protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
+	protection_map[0xc] = __pgprot(page_readonly);
+	protection_map[0xd] = __pgprot(page_readonly);
+	protection_map[0xe] = __pgprot(page_shared);
+	protection_map[0xf] = __pgprot(page_shared);
+}
+
+static void __init sun4u_pgprot_init(void)
+{
+	unsigned long page_none, page_shared, page_copy, page_readonly;
+	unsigned long page_exec_bit;
+
+	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
+				_PAGE_CACHE_4U | _PAGE_P_4U |
+				__ACCESS_BITS_4U | __DIRTY_BITS_4U |
+				_PAGE_EXEC_4U);
+	PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
+				       _PAGE_CACHE_4U | _PAGE_P_4U |
+				       __ACCESS_BITS_4U | __DIRTY_BITS_4U |
+				       _PAGE_EXEC_4U | _PAGE_L_4U);
+	PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
+
+	_PAGE_IE = _PAGE_IE_4U;
+	_PAGE_E = _PAGE_E_4U;
+	_PAGE_CACHE = _PAGE_CACHE_4U;
+
+	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
+		     __ACCESS_BITS_4U | _PAGE_E_4U);
+
+	kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
+		0xfffff80000000000;
+	kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U |
+				_PAGE_P_4U | _PAGE_W_4U);
+
+	_PAGE_SZBITS = _PAGE_SZBITS_4U;
+	_PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
+			      _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
+			      _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
+
+
+	page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
+	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+		       __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
+	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+		       __ACCESS_BITS_4U | _PAGE_EXEC_4U);
+	page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+			   __ACCESS_BITS_4U | _PAGE_EXEC_4U);
+
+	page_exec_bit = _PAGE_EXEC_4U;
+
+	prot_init_common(page_none, page_shared, page_copy, page_readonly,
+			 page_exec_bit);
+}
+
+static void __init sun4v_pgprot_init(void)
+{
+	unsigned long page_none, page_shared, page_copy, page_readonly;
+	unsigned long page_exec_bit;
+
+	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
+				_PAGE_CACHE_4V | _PAGE_P_4V |
+				__ACCESS_BITS_4V | __DIRTY_BITS_4V |
+				_PAGE_EXEC_4V);
+	PAGE_KERNEL_LOCKED = PAGE_KERNEL;
+	PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
+
+	_PAGE_IE = _PAGE_IE_4V;
+	_PAGE_E = _PAGE_E_4V;
+	_PAGE_CACHE = _PAGE_CACHE_4V;
+
+	kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
+		0xfffff80000000000;
+	kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V |
+				_PAGE_P_4V | _PAGE_W_4V);
+
+	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
+		     __ACCESS_BITS_4V | _PAGE_E_4V);
+
+	_PAGE_SZBITS = _PAGE_SZBITS_4V;
+	_PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
+			     _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
+			     _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
+			     _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
+
+	page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
+	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+		       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
+	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+		       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
+	page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+			 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
+
+	page_exec_bit = _PAGE_EXEC_4V;
+
+	prot_init_common(page_none, page_shared, page_copy, page_readonly,
+			 page_exec_bit);
+}
+
+unsigned long pte_sz_bits(unsigned long sz)
+{
+	if (tlb_type == hypervisor) {
+		switch (sz) {
+		case 8 * 1024:
+		default:
+			return _PAGE_SZ8K_4V;
+		case 64 * 1024:
+			return _PAGE_SZ64K_4V;
+		case 512 * 1024:
+			return _PAGE_SZ512K_4V;
+		case 4 * 1024 * 1024:
+			return _PAGE_SZ4MB_4V;
+		};
+	} else {
+		switch (sz) {
+		case 8 * 1024:
+		default:
+			return _PAGE_SZ8K_4U;
+		case 64 * 1024:
+			return _PAGE_SZ64K_4U;
+		case 512 * 1024:
+			return _PAGE_SZ512K_4U;
+		case 4 * 1024 * 1024:
+			return _PAGE_SZ4MB_4U;
+		};
+	}
+}
+
+pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
+{
+	pte_t pte;
+	if (tlb_type == hypervisor) {
+		pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) &
+				~(unsigned long)_PAGE_CACHE_4V);
+	} else {
+		pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) &
+				~(unsigned long)_PAGE_CACHE_4U);
+	}
+	pte_val(pte) |= (((unsigned long)space) << 32);
+	pte_val(pte) |= pte_sz_bits(page_size);
+	return pte;
+}
+
+unsigned long pte_present(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_PRESENT_4V : _PAGE_PRESENT_4U));
+}
+
+unsigned long pte_file(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_FILE_4V : _PAGE_FILE_4U));
+}
+
+unsigned long pte_read(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_READ_4V : _PAGE_READ_4U));
+}
+
+unsigned long pte_exec(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_EXEC_4V : _PAGE_EXEC_4U));
+}
+
+unsigned long pte_write(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_WRITE_4V : _PAGE_WRITE_4U));
+}
+
+unsigned long pte_dirty(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U));
+}
+
+unsigned long pte_young(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U));
+}
+
+pte_t pte_wrprotect(pte_t pte)
+{
+	unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_WRITE_4V | _PAGE_W_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_rdprotect(pte_t pte)
+{
+	unsigned long mask = _PAGE_R | _PAGE_READ_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_R | _PAGE_READ_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_mkclean(pte_t pte)
+{
+	unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_mkold(pte_t pte)
+{
+	unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_R | _PAGE_ACCESSED_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_mkyoung(pte_t pte)
+{
+	unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_R | _PAGE_ACCESSED_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pte_mkwrite(pte_t pte)
+{
+	unsigned long mask = _PAGE_WRITE_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_WRITE_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pte_mkdirty(pte_t pte)
+{
+	unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pte_mkhuge(pte_t pte)
+{
+	unsigned long mask = _PAGE_SZHUGE_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_SZHUGE_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pgoff_to_pte(unsigned long off)
+{
+	unsigned long bit = _PAGE_FILE_4U;
+
+	if (tlb_type == hypervisor)
+		bit = _PAGE_FILE_4V;
+
+	return __pte((off << PAGE_SHIFT) | bit);
+}
+
+pgprot_t pgprot_noncached(pgprot_t prot)
+{
+	unsigned long val = pgprot_val(prot);
+	unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U;
+	unsigned long on = _PAGE_E_4U;
+
+	if (tlb_type == hypervisor) {
+		off = _PAGE_CP_4V | _PAGE_CV_4V;
+		on = _PAGE_E_4V;
+	}
+
+	return __pgprot((val & ~off) | on);
+}
+
+pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+	unsigned long sz_bits = _PAGE_SZBITS_4U;
+
+	if (tlb_type == hypervisor)
+		sz_bits = _PAGE_SZBITS_4V;
+
+	return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits);
+}
+
+unsigned long pte_pfn(pte_t pte)
+{
+	unsigned long mask = _PAGE_PADDR_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_PADDR_4V;
+
+	return (pte_val(pte) & mask) >> PAGE_SHIFT;
+}
+
+pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
+{
+	unsigned long preserve_mask;
+	unsigned long val;
+
+	preserve_mask = (_PAGE_PADDR_4U |
+			 _PAGE_MODIFIED_4U |
+			 _PAGE_ACCESSED_4U |
+			 _PAGE_CP_4U |
+			 _PAGE_CV_4U |
+			 _PAGE_E_4U |
+			 _PAGE_PRESENT_4U |
+			 _PAGE_SZBITS_4U);
+	if (tlb_type == hypervisor)
+		preserve_mask = (_PAGE_PADDR_4V |
+				 _PAGE_MODIFIED_4V |
+				 _PAGE_ACCESSED_4V |
+				 _PAGE_CP_4V |
+				 _PAGE_CV_4V |
+				 _PAGE_E_4V |
+				 _PAGE_PRESENT_4V |
+				 _PAGE_SZBITS_4V);
+
+	val = (pte_val(orig_pte) & preserve_mask);
+
+	return __pte(val | (pgprot_val(new_prot) & ~preserve_mask));
+}
+
+static unsigned long kern_large_tte(unsigned long paddr)
+{
+	unsigned long val;
+
+	val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
+	       _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
+	if (tlb_type == hypervisor)
+		val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+		       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
+		       _PAGE_EXEC_4V | _PAGE_W_4V);
+
+	return val | paddr;
+}
+
+/*
+ * Translate PROM's mapping we capture at boot time into physical address.
+ * The second parameter is only set from prom_callback() invocations.
+ */
+unsigned long prom_virt_to_phys(unsigned long promva, int *error)
+{
+	unsigned long mask;
+	int i;
+
+	mask = _PAGE_PADDR_4U;
+	if (tlb_type == hypervisor)
+		mask = _PAGE_PADDR_4V;
+
+	for (i = 0; i < prom_trans_ents; i++) {
+		struct linux_prom_translation *p = &prom_trans[i];
+
+		if (promva >= p->virt &&
+		    promva < (p->virt + p->size)) {
+			unsigned long base = p->data & mask;
+
+			if (error)
+				*error = 0;
+			return base + (promva & (8192 - 1));
+		}
+	}
+	if (error)
+		*error = 1;
+	return 0UL;
+}
+
+/* XXX We should kill off this ugly thing at so me point. XXX */
+unsigned long sun4u_get_pte(unsigned long addr)
+{
+	pgd_t *pgdp;
+	pud_t *pudp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	unsigned long mask = _PAGE_PADDR_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_PADDR_4V;
+
+	if (addr >= PAGE_OFFSET)
+		return addr & mask;
+
+	if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
+		return prom_virt_to_phys(addr, NULL);
+
+	pgdp = pgd_offset_k(addr);
+	pudp = pud_offset(pgdp, addr);
+	pmdp = pmd_offset(pudp, addr);
+	ptep = pte_offset_kernel(pmdp, addr);
+
+	return pte_val(*ptep) & mask;
+}
+
+/* If not locked, zap it. */
+void __flush_tlb_all(void)
+{
+	unsigned long pstate;
+	int i;
+
+	__asm__ __volatile__("flushw\n\t"
+			     "rdpr	%%pstate, %0\n\t"
+			     "wrpr	%0, %1, %%pstate"
+			     : "=r" (pstate)
+			     : "i" (PSTATE_IE));
+	if (tlb_type == spitfire) {
+		for (i = 0; i < 64; i++) {
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				spitfire_put_dtlb_data(i, 0x0UL);
+			}
+
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+				spitfire_put_itlb_data(i, 0x0UL);
+			}
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		cheetah_flush_dtlb_all();
+		cheetah_flush_itlb_all();
+	}
+	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
+			     : : "r" (pstate));
+}
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index c5dc4b0cc1c..975242ab88e 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -85,8 +85,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 	mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
 
 	base = TSBMAP_BASE;
-	tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
-	       _PAGE_CV    | _PAGE_P | _PAGE_W);
+	tte = pgprot_val(PAGE_KERNEL_LOCKED);
 	tsb_paddr = __pa(mm->context.tsb);
 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
 
@@ -99,55 +98,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 #ifdef DCACHE_ALIASING_POSSIBLE
 		base += (tsb_paddr & 8192);
 #endif
-		tte |= _PAGE_SZ8K;
 		page_sz = 8192;
 		break;
 
 	case 8192 << 1:
 		tsb_reg = 0x1UL;
-		tte |= _PAGE_SZ64K;
 		page_sz = 64 * 1024;
 		break;
 
 	case 8192 << 2:
 		tsb_reg = 0x2UL;
-		tte |= _PAGE_SZ64K;
 		page_sz = 64 * 1024;
 		break;
 
 	case 8192 << 3:
 		tsb_reg = 0x3UL;
-		tte |= _PAGE_SZ64K;
 		page_sz = 64 * 1024;
 		break;
 
 	case 8192 << 4:
 		tsb_reg = 0x4UL;
-		tte |= _PAGE_SZ512K;
 		page_sz = 512 * 1024;
 		break;
 
 	case 8192 << 5:
 		tsb_reg = 0x5UL;
-		tte |= _PAGE_SZ512K;
 		page_sz = 512 * 1024;
 		break;
 
 	case 8192 << 6:
 		tsb_reg = 0x6UL;
-		tte |= _PAGE_SZ512K;
 		page_sz = 512 * 1024;
 		break;
 
 	case 8192 << 7:
 		tsb_reg = 0x7UL;
-		tte |= _PAGE_SZ4MB;
 		page_sz = 4 * 1024 * 1024;
 		break;
 
 	default:
 		BUG();
 	};
+	tte |= pte_sz_bits(page_sz);
 
 	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
 		/* Physical mapping, no locked TLB entry for TSB.  */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index a480007f0a9..bd8bce704a9 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -90,134 +90,48 @@
 
 #endif /* !(__ASSEMBLY__) */
 
-/* Spitfire/Cheetah TTE bits. */
-#define _PAGE_VALID	_AC(0x8000000000000000,UL) /* Valid TTE              */
-#define _PAGE_R		_AC(0x8000000000000000,UL) /* Keep ref bit up to date*/
-#define _PAGE_SZ4MB	_AC(0x6000000000000000,UL) /* 4MB Page               */
-#define _PAGE_SZ512K	_AC(0x4000000000000000,UL) /* 512K Page              */
-#define _PAGE_SZ64K	_AC(0x2000000000000000,UL) /* 64K Page               */
-#define _PAGE_SZ8K	_AC(0x0000000000000000,UL) /* 8K Page                */
-#define _PAGE_NFO	_AC(0x1000000000000000,UL) /* No Fault Only          */
-#define _PAGE_IE	_AC(0x0800000000000000,UL) /* Invert Endianness      */
-#define _PAGE_SOFT2	_AC(0x07FC000000000000,UL) /* Software bits, set 2   */
-#define _PAGE_RES1	_AC(0x0002000000000000,UL) /* Reserved               */
-#define _PAGE_SZ32MB	_AC(0x0001000000000000,UL) /* (Panther) 32MB page    */
-#define _PAGE_SZ256MB	_AC(0x2001000000000000,UL) /* (Panther) 256MB page   */
-#define _PAGE_SN	_AC(0x0000800000000000,UL) /* (Cheetah) Snoop        */
-#define _PAGE_RES2	_AC(0x0000780000000000,UL) /* Reserved               */
-#define _PAGE_PADDR_SF	_AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/
-#define _PAGE_PADDR	_AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */
-#define _PAGE_SOFT	_AC(0x0000000000001F80,UL) /* Software bits          */
-#define _PAGE_L		_AC(0x0000000000000040,UL) /* Locked TTE             */
-#define _PAGE_CP	_AC(0x0000000000000020,UL) /* Cacheable in P-Cache   */
-#define _PAGE_CV	_AC(0x0000000000000010,UL) /* Cacheable in V-Cache   */
-#define _PAGE_E		_AC(0x0000000000000008,UL) /* side-Effect            */
-#define _PAGE_P		_AC(0x0000000000000004,UL) /* Privileged Page        */
-#define _PAGE_W		_AC(0x0000000000000002,UL) /* Writable               */
-#define _PAGE_G		_AC(0x0000000000000001,UL) /* Global                 */
-
-#define _PAGE_ALL_SZ_BITS	\
-	(_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \
-	 _PAGE_SZ8K  | _PAGE_SZ32MB | _PAGE_SZ256MB)
-
-/* Here are the SpitFire software bits we use in the TTE's.
- *
- * WARNING: If you are going to try and start using some
- *          of the soft2 bits, you will need to make
- *          modifications to the swap entry implementation.
- *	    For example, one thing that could happen is that
- *          swp_entry_to_pte() would BUG_ON() if you tried
- *          to use one of the soft2 bits for _PAGE_FILE.
- *
- * Like other architectures, I have aliased _PAGE_FILE with
- * _PAGE_MODIFIED.  This works because _PAGE_FILE is never
- * interpreted that way unless _PAGE_PRESENT is clear.
- */
-#define _PAGE_EXEC	_AC(0x0000000000001000,UL)	/* Executable SW bit */
-#define _PAGE_MODIFIED	_AC(0x0000000000000800,UL)	/* Modified (dirty)  */
-#define _PAGE_FILE	_AC(0x0000000000000800,UL)	/* Pagecache page    */
-#define _PAGE_ACCESSED	_AC(0x0000000000000400,UL)	/* Accessed (ref'd)  */
-#define _PAGE_READ	_AC(0x0000000000000200,UL)	/* Readable SW Bit   */
-#define _PAGE_WRITE	_AC(0x0000000000000100,UL)	/* Writable SW Bit   */
-#define _PAGE_PRESENT	_AC(0x0000000000000080,UL)	/* Present           */
-
-#if PAGE_SHIFT == 13
-#define _PAGE_SZBITS	_PAGE_SZ8K
-#elif PAGE_SHIFT == 16
-#define _PAGE_SZBITS	_PAGE_SZ64K
-#elif PAGE_SHIFT == 19
-#define _PAGE_SZBITS	_PAGE_SZ512K
-#elif PAGE_SHIFT == 22
-#define _PAGE_SZBITS	_PAGE_SZ4MB
-#else
-#error Wrong PAGE_SHIFT specified
-#endif
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
-#define _PAGE_SZHUGE	_PAGE_SZ4MB
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-#define _PAGE_SZHUGE	_PAGE_SZ512K
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE	_PAGE_SZ64K
-#endif
-
-#define _PAGE_CACHE	(_PAGE_CP | _PAGE_CV)
-
-#define __DIRTY_BITS	(_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
-#define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
-#define __PRIV_BITS	_PAGE_P
-
-#define PAGE_NONE	__pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE)
-
-/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
-#define PAGE_SHARED	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
-
-#define PAGE_COPY	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __ACCESS_BITS | _PAGE_EXEC)
-
-#define PAGE_READONLY	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __ACCESS_BITS | _PAGE_EXEC)
-
-#define PAGE_KERNEL	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __PRIV_BITS | \
-				  __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC)
-
-#define PAGE_SHARED_NOEXEC	__pgprot (_PAGE_PRESENT | _PAGE_VALID | \
-					  _PAGE_CACHE | \
-					  __ACCESS_BITS | _PAGE_WRITE)
-
-#define PAGE_COPY_NOEXEC	__pgprot (_PAGE_PRESENT | _PAGE_VALID | \
-					  _PAGE_CACHE | __ACCESS_BITS)
-
-#define PAGE_READONLY_NOEXEC	__pgprot (_PAGE_PRESENT | _PAGE_VALID | \
-					  _PAGE_CACHE | __ACCESS_BITS)
-
-#define _PFN_MASK	_PAGE_PADDR
-
-#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \
-		   __ACCESS_BITS | _PAGE_E)
-
-#define __P000	PAGE_NONE
-#define __P001	PAGE_READONLY_NOEXEC
-#define __P010	PAGE_COPY_NOEXEC
-#define __P011	PAGE_COPY_NOEXEC
-#define __P100	PAGE_READONLY
-#define __P101	PAGE_READONLY
-#define __P110	PAGE_COPY
-#define __P111	PAGE_COPY
-
-#define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY_NOEXEC
-#define __S010	PAGE_SHARED_NOEXEC
-#define __S011	PAGE_SHARED_NOEXEC
-#define __S100	PAGE_READONLY
-#define __S101	PAGE_READONLY
-#define __S110	PAGE_SHARED
-#define __S111	PAGE_SHARED
+/* PTE bits which are the same in SUN4U and SUN4V format.  */
+#define _PAGE_VALID	0x8000000000000000 /* Valid TTE              */
+#define _PAGE_R	  	0x8000000000000000 /* Keep ref bit up to date*/
+
+/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
+#define __P000	__pgprot(0)
+#define __P001	__pgprot(0)
+#define __P010	__pgprot(0)
+#define __P011	__pgprot(0)
+#define __P100	__pgprot(0)
+#define __P101	__pgprot(0)
+#define __P110	__pgprot(0)
+#define __P111	__pgprot(0)
+
+#define __S000	__pgprot(0)
+#define __S001	__pgprot(0)
+#define __S010	__pgprot(0)
+#define __S011	__pgprot(0)
+#define __S100	__pgprot(0)
+#define __S101	__pgprot(0)
+#define __S110	__pgprot(0)
+#define __S111	__pgprot(0)
 
 #ifndef __ASSEMBLY__
 
+extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
+
+extern unsigned long pte_sz_bits(unsigned long size);
+
+extern pgprot_t PAGE_KERNEL;
+extern pgprot_t PAGE_KERNEL_LOCKED;
+extern pgprot_t PAGE_COPY;
+
+/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
+extern unsigned long _PAGE_IE;
+extern unsigned long _PAGE_E;
+extern unsigned long _PAGE_CACHE;
+
+extern unsigned long pg_iobits;
+extern unsigned long _PAGE_ALL_SZ_BITS;
+extern unsigned long _PAGE_SZBITS;
+
 extern unsigned long phys_base;
 extern unsigned long pfn_base;
 
@@ -229,27 +143,12 @@ extern struct page *mem_map_zero;
  * the first physical page in the machine is at some huge physical address,
  * such as 4GB.   This is common on a partitioned E10000, for example.
  */
-
-#define pfn_pte(pfn, prot)	\
-	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS)
+extern pte_t pfn_pte(unsigned long, pgprot_t);
 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
+extern unsigned long pte_pfn(pte_t);
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+extern pte_t pte_modify(pte_t, pgprot_t);
 
-#define pte_pfn(x)		((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)
-#define pte_page(x)		pfn_to_page(pte_pfn(x))
-
-static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
-{
-	pte_t __pte;
-	const unsigned long preserve_mask = (_PFN_MASK |
-					     _PAGE_MODIFIED | _PAGE_ACCESSED |
-					     _PAGE_CACHE | _PAGE_E |
-					     _PAGE_PRESENT | _PAGE_SZBITS);
-
-	pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) |
-		(pgprot_val(new_prot) & ~preserve_mask);
-
-	return __pte;
-}
 #define pmd_set(pmdp, ptep)	\
 	(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
 #define pud_set(pudp, pmdp)	\
@@ -259,8 +158,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
 #define pmd_page(pmd) 			virt_to_page((void *)__pmd_page(pmd))
 #define pud_page(pud)		\
 	((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
-#define pte_none(pte) 			(!pte_val(pte))
-#define pte_present(pte)		(pte_val(pte) & _PAGE_PRESENT)
 #define pmd_none(pmd)			(!pmd_val(pmd))
 #define pmd_bad(pmd)			(0)
 #define pmd_present(pmd)		(pmd_val(pmd) != 0U)
@@ -270,30 +167,29 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
 #define pud_present(pud)		(pud_val(pud) != 0U)
 #define pud_clear(pudp)			(pud_val(*(pudp)) = 0U)
 
+/* Same in both SUN4V and SUN4U.  */
+#define pte_none(pte) 			(!pte_val(pte))
+
+extern unsigned long pte_present(pte_t);
+
 /* The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-#define pte_read(pte)		(pte_val(pte) & _PAGE_READ)
-#define pte_exec(pte)		(pte_val(pte) & _PAGE_EXEC)
-#define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
-#define pte_dirty(pte)		(pte_val(pte) & _PAGE_MODIFIED)
-#define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
-#define pte_wrprotect(pte)	(__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))
-#define pte_rdprotect(pte)	\
-	(__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))
-#define pte_mkclean(pte)	\
-	(__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
-#define pte_mkold(pte)		\
-	(__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
-
-/* Permanent address of a page. */
-#define __page_address(page)	page_address(page)
+extern unsigned long pte_read(pte_t);
+extern unsigned long pte_exec(pte_t);
+extern unsigned long pte_write(pte_t);
+extern unsigned long pte_dirty(pte_t);
+extern unsigned long pte_young(pte_t);
+extern pte_t pte_wrprotect(pte_t);
+extern pte_t pte_rdprotect(pte_t);
+extern pte_t pte_mkclean(pte_t);
+extern pte_t pte_mkold(pte_t);
 
 /* Be very careful when you change these three, they are delicate. */
-#define pte_mkyoung(pte)	(__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
-#define pte_mkwrite(pte)	(__pte(pte_val(pte) | _PAGE_WRITE))
-#define pte_mkdirty(pte)	(__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
-#define pte_mkhuge(pte)		(__pte(pte_val(pte) | _PAGE_SZHUGE))
+extern pte_t pte_mkyoung(pte_t);
+extern pte_t pte_mkwrite(pte_t);
+extern pte_t pte_mkdirty(pte_t);
+extern pte_t pte_mkhuge(pte_t);
 
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -328,6 +224,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
 
 	/* It is more efficient to let flush_tlb_kernel_range()
 	 * handle init_mm tlb flushes.
+	 *
+	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
+	 *             and SUN4V pte layout, so this inline test is fine.
 	 */
 	if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
 		tlb_batch_add(mm, addr, ptep, orig);
@@ -362,42 +261,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
 
 /* File offset in PTE support. */
-#define pte_file(pte)		(pte_val(pte) & _PAGE_FILE)
+extern unsigned long pte_file(pte_t);
 #define pte_to_pgoff(pte)	(pte_val(pte) >> PAGE_SHIFT)
-#define pgoff_to_pte(off)	(__pte(((off) << PAGE_SHIFT) | _PAGE_FILE))
+extern pte_t pgoff_to_pte(unsigned long);
 #define PTE_FILE_MAX_BITS	(64UL - PAGE_SHIFT - 1UL)
 
 extern unsigned long prom_virt_to_phys(unsigned long, int *);
 
-static __inline__ unsigned long
-sun4u_get_pte (unsigned long addr)
-{
-	pgd_t *pgdp;
-	pud_t *pudp;
-	pmd_t *pmdp;
-	pte_t *ptep;
-
-	if (addr >= PAGE_OFFSET)
-		return addr & _PAGE_PADDR;
-	if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
-		return prom_virt_to_phys(addr, NULL);
-	pgdp = pgd_offset_k(addr);
-	pudp = pud_offset(pgdp, addr);
-	pmdp = pmd_offset(pudp, addr);
-	ptep = pte_offset_kernel(pmdp, addr);
-	return pte_val(*ptep) & _PAGE_PADDR;
-}
+extern unsigned long sun4u_get_pte(unsigned long);
 
-static __inline__ unsigned long
-__get_phys (unsigned long addr)
+static inline unsigned long __get_phys(unsigned long addr)
 {
-	return sun4u_get_pte (addr);
+	return sun4u_get_pte(addr);
 }
 
-static __inline__ int
-__get_iospace (unsigned long addr)
+static inline int __get_iospace(unsigned long addr)
 {
-	return ((sun4u_get_pte (addr) & 0xf0000000) >> 28);
+	return ((sun4u_get_pte(addr) & 0xf0000000) >> 28);
 }
 
 extern unsigned long *sparc64_valid_addr_bitmap;
@@ -411,9 +291,7 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 			       unsigned long size, pgprot_t prot);
 
 /* Clear virtual and physical cachability, set side-effect bit.  */
-#define pgprot_noncached(prot) \
-	(__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \
-	 _PAGE_E))
+extern pgprot_t pgprot_noncached(pgprot_t);
 
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
-- 
cgit v1.2.3-70-g09d2


From 1a7a242c898dd131f2df005c492e9b44fb8900e0 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 11 Feb 2006 23:24:30 -0800
Subject: [SPARC64]: Recognize "virtual-console" as input and output console
 device.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/setup.c | 5 +++++
 arch/sparc64/prom/console.c | 6 ++++++
 include/asm-sparc/oplib.h   | 2 ++
 include/asm-sparc64/oplib.h | 2 ++
 4 files changed, 15 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index ca75f3b26a3..4f253a0755b 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -405,6 +405,11 @@ static int __init set_preferred_console(void)
 		serial_console = 2;
 	} else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) {
 		serial_console = 3;
+	} else if (idev == PROMDEV_IVCONS && odev == PROMDEV_OVCONS) {
+		/* sunhv_console_init() doesn't check the serial_console
+		 * value anyways...
+		 */
+		serial_console = 4;
 	} else {
 		prom_printf("Inconsistent console: "
 			    "input %d, output %d\n",
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index ac6d035dd15..7c25c54cefd 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -102,6 +102,9 @@ prom_query_input_device(void)
 	if (!strncmp (propb, "rsc", 3))
 		return PROMDEV_IRSC;
 
+	if (!strncmp (propb, "virtual-console", 3))
+		return PROMDEV_IVCONS;
+
 	if (strncmp (propb, "tty", 3) || !propb[3])
 		return PROMDEV_I_UNK;
 
@@ -143,6 +146,9 @@ prom_query_output_device(void)
 	if (!strncmp (propb, "rsc", 3))
 		return PROMDEV_ORSC;
 
+	if (!strncmp (propb, "virtual-console", 3))
+		return PROMDEV_OVCONS;
+
 	if (strncmp (propb, "tty", 3) || !propb[3])
 		return PROMDEV_O_UNK;
 
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index d0d76b30eb4..f283f8aaf6a 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -165,6 +165,7 @@ enum prom_input_device {
 	PROMDEV_ITTYA,			/* input from ttya */
 	PROMDEV_ITTYB,			/* input from ttyb */
 	PROMDEV_IRSC,			/* input from rsc */
+	PROMDEV_IVCONS,			/* input from virtual-console */
 	PROMDEV_I_UNK,
 };
 
@@ -177,6 +178,7 @@ enum prom_output_device {
 	PROMDEV_OTTYA,			/* to ttya */
 	PROMDEV_OTTYB,			/* to ttyb */
 	PROMDEV_ORSC,			/* to rsc */
+	PROMDEV_OVCONS,			/* to virtual-console */
 	PROMDEV_O_UNK,
 };
 
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index ce5066ef2dd..0631d13475f 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -167,6 +167,7 @@ enum prom_input_device {
 	PROMDEV_ITTYA,			/* input from ttya */
 	PROMDEV_ITTYB,			/* input from ttyb */
 	PROMDEV_IRSC,			/* input from rsc */
+	PROMDEV_IVCONS,			/* input from virtual-console */
 	PROMDEV_I_UNK,
 };
 
@@ -179,6 +180,7 @@ enum prom_output_device {
 	PROMDEV_OTTYA,			/* to ttya */
 	PROMDEV_OTTYB,			/* to ttyb */
 	PROMDEV_ORSC,			/* to rsc */
+	PROMDEV_OVCONS,			/* to virtual-console */
 	PROMDEV_O_UNK,
 };
 
-- 
cgit v1.2.3-70-g09d2


From ff02e0d26f139ad95ec3a7e94f88faccaa180dff Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sun, 12 Feb 2006 17:07:51 -0800
Subject: [SPARC64]: Move PTE field definitions back into asm/pgtable.h

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/mm/init.c        | 84 -----------------------------------------
 include/asm-sparc64/pgtable.h | 88 ++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 86 insertions(+), 86 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 9c2fc239f3e..81f9f4bffaf 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1334,90 +1334,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-/* SUN4U pte bits... */
-#define _PAGE_SZ4MB_4U	  0x6000000000000000 /* 4MB Page               */
-#define _PAGE_SZ512K_4U	  0x4000000000000000 /* 512K Page              */
-#define _PAGE_SZ64K_4U	  0x2000000000000000 /* 64K Page               */
-#define _PAGE_SZ8K_4U	  0x0000000000000000 /* 8K Page                */
-#define _PAGE_NFO_4U	  0x1000000000000000 /* No Fault Only          */
-#define _PAGE_IE_4U	  0x0800000000000000 /* Invert Endianness      */
-#define _PAGE_SOFT2_4U	  0x07FC000000000000 /* Software bits, set 2   */
-#define _PAGE_RES1_4U	  0x0002000000000000 /* Reserved               */
-#define _PAGE_SZ32MB_4U	  0x0001000000000000 /* (Panther) 32MB page    */
-#define _PAGE_SZ256MB_4U  0x2001000000000000 /* (Panther) 256MB page   */
-#define _PAGE_SN_4U	  0x0000800000000000 /* (Cheetah) Snoop        */
-#define _PAGE_RES2_4U	  0x0000780000000000 /* Reserved               */
-#define _PAGE_PADDR_4U	  0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */
-#define _PAGE_SOFT_4U	  0x0000000000001F80 /* Software bits:         */
-#define _PAGE_EXEC_4U	  0x0000000000001000 /* Executable SW bit      */
-#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty)       */
-#define _PAGE_FILE_4U	  0x0000000000000800 /* Pagecache page         */
-#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd)       */
-#define _PAGE_READ_4U	  0x0000000000000200 /* Readable SW Bit        */
-#define _PAGE_WRITE_4U	  0x0000000000000100 /* Writable SW Bit        */
-#define _PAGE_PRESENT_4U  0x0000000000000080 /* Present                */
-#define _PAGE_L_4U	  0x0000000000000040 /* Locked TTE             */
-#define _PAGE_CP_4U	  0x0000000000000020 /* Cacheable in P-Cache   */
-#define _PAGE_CV_4U	  0x0000000000000010 /* Cacheable in V-Cache   */
-#define _PAGE_E_4U	  0x0000000000000008 /* side-Effect            */
-#define _PAGE_P_4U	  0x0000000000000004 /* Privileged Page        */
-#define _PAGE_W_4U	  0x0000000000000002 /* Writable               */
-
-/* SUN4V pte bits... */
-#define _PAGE_NFO_4V	  0x4000000000000000 /* No Fault Only          */
-#define _PAGE_SOFT2_4V	  0x3F00000000000000 /* Software bits, set 2   */
-#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty)       */
-#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd)       */
-#define _PAGE_READ_4V	  0x0800000000000000 /* Readable SW Bit        */
-#define _PAGE_WRITE_4V	  0x0400000000000000 /* Writable SW Bit        */
-#define _PAGE_PADDR_4V	  0x00FFFFFFFFFFE000 /* paddr[55:13]           */
-#define _PAGE_IE_4V	  0x0000000000001000 /* Invert Endianness      */
-#define _PAGE_E_4V	  0x0000000000000800 /* side-Effect            */
-#define _PAGE_CP_4V	  0x0000000000000400 /* Cacheable in P-Cache   */
-#define _PAGE_CV_4V	  0x0000000000000200 /* Cacheable in V-Cache   */
-#define _PAGE_P_4V	  0x0000000000000100 /* Privileged Page        */
-#define _PAGE_EXEC_4V	  0x0000000000000080 /* Executable Page        */
-#define _PAGE_W_4V	  0x0000000000000040 /* Writable               */
-#define _PAGE_SOFT_4V	  0x0000000000000030 /* Software bits          */
-#define _PAGE_FILE_4V	  0x0000000000000020 /* Pagecache page         */
-#define _PAGE_PRESENT_4V  0x0000000000000010 /* Present                */
-#define _PAGE_RESV_4V	  0x0000000000000008 /* Reserved               */
-#define _PAGE_SZ16GB_4V	  0x0000000000000007 /* 16GB Page              */
-#define _PAGE_SZ2GB_4V	  0x0000000000000006 /* 2GB Page               */
-#define _PAGE_SZ256MB_4V  0x0000000000000005 /* 256MB Page             */
-#define _PAGE_SZ32MB_4V	  0x0000000000000004 /* 32MB Page              */
-#define _PAGE_SZ4MB_4V	  0x0000000000000003 /* 4MB Page               */
-#define _PAGE_SZ512K_4V	  0x0000000000000002 /* 512K Page              */
-#define _PAGE_SZ64K_4V	  0x0000000000000001 /* 64K Page               */
-#define _PAGE_SZ8K_4V	  0x0000000000000000 /* 8K Page                */
-
-#if PAGE_SHIFT == 13
-#define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
-#define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
-#elif PAGE_SHIFT == 16
-#define _PAGE_SZBITS_4U	_PAGE_SZ64K_4U
-#define _PAGE_SZBITS_4V	_PAGE_SZ64K_4V
-#elif PAGE_SHIFT == 19
-#define _PAGE_SZBITS_4U	_PAGE_SZ512K_4U
-#define _PAGE_SZBITS_4V	_PAGE_SZ512K_4V
-#elif PAGE_SHIFT == 22
-#define _PAGE_SZBITS_4U	_PAGE_SZ4MB_4U
-#define _PAGE_SZBITS_4V	_PAGE_SZ4MB_4V
-#else
-#error Wrong PAGE_SHIFT specified
-#endif
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
-#define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
-#define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-#define _PAGE_SZHUGE_4U	_PAGE_SZ512K_4U
-#define _PAGE_SZHUGE_4V	_PAGE_SZ512K_4V
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE_4U	_PAGE_SZ64K_4U
-#define _PAGE_SZHUGE_4V	_PAGE_SZ64K_4V
-#endif
-
 #define _PAGE_CACHE_4U	(_PAGE_CP_4U | _PAGE_CV_4U)
 #define _PAGE_CACHE_4V	(_PAGE_CP_4V | _PAGE_CV_4V)
 #define __DIRTY_BITS_4U	 (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index bd8bce704a9..3c02d5d9a53 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -91,8 +91,92 @@
 #endif /* !(__ASSEMBLY__) */
 
 /* PTE bits which are the same in SUN4U and SUN4V format.  */
-#define _PAGE_VALID	0x8000000000000000 /* Valid TTE              */
-#define _PAGE_R	  	0x8000000000000000 /* Keep ref bit up to date*/
+#define _PAGE_VALID	  _AC(0x8000000000000000,UL) /* Valid TTE            */
+#define _PAGE_R	  	  _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
+
+/* SUN4U pte bits... */
+#define _PAGE_SZ4MB_4U	  _AC(0x6000000000000000,UL) /* 4MB Page             */
+#define _PAGE_SZ512K_4U	  _AC(0x4000000000000000,UL) /* 512K Page            */
+#define _PAGE_SZ64K_4U	  _AC(0x2000000000000000,UL) /* 64K Page             */
+#define _PAGE_SZ8K_4U	  _AC(0x0000000000000000,UL) /* 8K Page              */
+#define _PAGE_NFO_4U	  _AC(0x1000000000000000,UL) /* No Fault Only        */
+#define _PAGE_IE_4U	  _AC(0x0800000000000000,UL) /* Invert Endianness    */
+#define _PAGE_SOFT2_4U	  _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
+#define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
+#define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
+#define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
+#define _PAGE_SN_4U	  _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
+#define _PAGE_RES2_4U	  _AC(0x0000780000000000,UL) /* Reserved             */
+#define _PAGE_PADDR_4U	  _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
+#define _PAGE_SOFT_4U	  _AC(0x0000000000001F80,UL) /* Software bits:       */
+#define _PAGE_EXEC_4U	  _AC(0x0000000000001000,UL) /* Executable SW bit    */
+#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
+#define _PAGE_FILE_4U	  _AC(0x0000000000000800,UL) /* Pagecache page       */
+#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
+#define _PAGE_READ_4U	  _AC(0x0000000000000200,UL) /* Readable SW Bit      */
+#define _PAGE_WRITE_4U	  _AC(0x0000000000000100,UL) /* Writable SW Bit      */
+#define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
+#define _PAGE_L_4U	  _AC(0x0000000000000040,UL) /* Locked TTE           */
+#define _PAGE_CP_4U	  _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
+#define _PAGE_CV_4U	  _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
+#define _PAGE_E_4U	  _AC(0x0000000000000008,UL) /* side-Effect          */
+#define _PAGE_P_4U	  _AC(0x0000000000000004,UL) /* Privileged Page      */
+#define _PAGE_W_4U	  _AC(0x0000000000000002,UL) /* Writable             */
+
+/* SUN4V pte bits... */
+#define _PAGE_NFO_4V	  _AC(0x4000000000000000,UL) /* No Fault Only        */
+#define _PAGE_SOFT2_4V	  _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
+#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
+#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
+#define _PAGE_READ_4V	  _AC(0x0800000000000000,UL) /* Readable SW Bit      */
+#define _PAGE_WRITE_4V	  _AC(0x0400000000000000,UL) /* Writable SW Bit      */
+#define _PAGE_PADDR_4V	  _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
+#define _PAGE_IE_4V	  _AC(0x0000000000001000,UL) /* Invert Endianness    */
+#define _PAGE_E_4V	  _AC(0x0000000000000800,UL) /* side-Effect          */
+#define _PAGE_CP_4V	  _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
+#define _PAGE_CV_4V	  _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
+#define _PAGE_P_4V	  _AC(0x0000000000000100,UL) /* Privileged Page      */
+#define _PAGE_EXEC_4V	  _AC(0x0000000000000080,UL) /* Executable Page      */
+#define _PAGE_W_4V	  _AC(0x0000000000000040,UL) /* Writable             */
+#define _PAGE_SOFT_4V	  _AC(0x0000000000000030,UL) /* Software bits        */
+#define _PAGE_FILE_4V	  _AC(0x0000000000000020,UL) /* Pagecache page       */
+#define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
+#define _PAGE_RESV_4V	  _AC(0x0000000000000008,UL) /* Reserved             */
+#define _PAGE_SZ16GB_4V	  _AC(0x0000000000000007,UL) /* 16GB Page            */
+#define _PAGE_SZ2GB_4V	  _AC(0x0000000000000006,UL) /* 2GB Page             */
+#define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
+#define _PAGE_SZ32MB_4V	  _AC(0x0000000000000004,UL) /* 32MB Page            */
+#define _PAGE_SZ4MB_4V	  _AC(0x0000000000000003,UL) /* 4MB Page             */
+#define _PAGE_SZ512K_4V	  _AC(0x0000000000000002,UL) /* 512K Page            */
+#define _PAGE_SZ64K_4V	  _AC(0x0000000000000001,UL) /* 64K Page             */
+#define _PAGE_SZ8K_4V	  _AC(0x0000000000000000,UL) /* 8K Page              */
+
+#if PAGE_SHIFT == 13
+#define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
+#elif PAGE_SHIFT == 16
+#define _PAGE_SZBITS_4U	_PAGE_SZ64K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ64K_4V
+#elif PAGE_SHIFT == 19
+#define _PAGE_SZBITS_4U	_PAGE_SZ512K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ512K_4V
+#elif PAGE_SHIFT == 22
+#define _PAGE_SZBITS_4U	_PAGE_SZ4MB_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ4MB_4V
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ512K_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ512K_4V
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ64K_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ64K_4V
+#endif
 
 /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
 #define __P000	__pgprot(0)
-- 
cgit v1.2.3-70-g09d2


From cf627156c450cd5a0741b31f55181db3400d4887 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sun, 12 Feb 2006 21:10:07 -0800
Subject: [SPARC64]: Use inline patching for critical PTE operations.

This handles the SUN4U vs SUN4V PTE layout differences
with near zero performance cost.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/mm/init.c        | 211 +-----------------
 include/asm-sparc64/pgtable.h | 488 +++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 488 insertions(+), 211 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 81f9f4bffaf..6f860c39db8 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1502,217 +1502,12 @@ unsigned long pte_sz_bits(unsigned long sz)
 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
 {
 	pte_t pte;
-	if (tlb_type == hypervisor) {
-		pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) &
-				~(unsigned long)_PAGE_CACHE_4V);
-	} else {
-		pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) &
-				~(unsigned long)_PAGE_CACHE_4U);
-	}
+
+	pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
 	pte_val(pte) |= (((unsigned long)space) << 32);
 	pte_val(pte) |= pte_sz_bits(page_size);
-	return pte;
-}
-
-unsigned long pte_present(pte_t pte)
-{
-	return (pte_val(pte) &
-		((tlb_type == hypervisor) ?
-		 _PAGE_PRESENT_4V : _PAGE_PRESENT_4U));
-}
-
-unsigned long pte_file(pte_t pte)
-{
-	return (pte_val(pte) &
-		((tlb_type == hypervisor) ?
-		 _PAGE_FILE_4V : _PAGE_FILE_4U));
-}
-
-unsigned long pte_read(pte_t pte)
-{
-	return (pte_val(pte) &
-		((tlb_type == hypervisor) ?
-		 _PAGE_READ_4V : _PAGE_READ_4U));
-}
-
-unsigned long pte_exec(pte_t pte)
-{
-	return (pte_val(pte) &
-		((tlb_type == hypervisor) ?
-		 _PAGE_EXEC_4V : _PAGE_EXEC_4U));
-}
-
-unsigned long pte_write(pte_t pte)
-{
-	return (pte_val(pte) &
-		((tlb_type == hypervisor) ?
-		 _PAGE_WRITE_4V : _PAGE_WRITE_4U));
-}
-
-unsigned long pte_dirty(pte_t pte)
-{
-	return (pte_val(pte) &
-		((tlb_type == hypervisor) ?
-		 _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U));
-}
-
-unsigned long pte_young(pte_t pte)
-{
-	return (pte_val(pte) &
-		((tlb_type == hypervisor) ?
-		 _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U));
-}
 
-pte_t pte_wrprotect(pte_t pte)
-{
-	unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_WRITE_4V | _PAGE_W_4V;
-
-	return __pte(pte_val(pte) & ~mask);
-}
-
-pte_t pte_rdprotect(pte_t pte)
-{
-	unsigned long mask = _PAGE_R | _PAGE_READ_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_R | _PAGE_READ_4V;
-
-	return __pte(pte_val(pte) & ~mask);
-}
-
-pte_t pte_mkclean(pte_t pte)
-{
-	unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
-
-	return __pte(pte_val(pte) & ~mask);
-}
-
-pte_t pte_mkold(pte_t pte)
-{
-	unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_R | _PAGE_ACCESSED_4V;
-
-	return __pte(pte_val(pte) & ~mask);
-}
-
-pte_t pte_mkyoung(pte_t pte)
-{
-	unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_R | _PAGE_ACCESSED_4V;
-
-	return __pte(pte_val(pte) | mask);
-}
-
-pte_t pte_mkwrite(pte_t pte)
-{
-	unsigned long mask = _PAGE_WRITE_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_WRITE_4V;
-
-	return __pte(pte_val(pte) | mask);
-}
-
-pte_t pte_mkdirty(pte_t pte)
-{
-	unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
-
-	return __pte(pte_val(pte) | mask);
-}
-
-pte_t pte_mkhuge(pte_t pte)
-{
-	unsigned long mask = _PAGE_SZHUGE_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_SZHUGE_4V;
-
-	return __pte(pte_val(pte) | mask);
-}
-
-pte_t pgoff_to_pte(unsigned long off)
-{
-	unsigned long bit = _PAGE_FILE_4U;
-
-	if (tlb_type == hypervisor)
-		bit = _PAGE_FILE_4V;
-
-	return __pte((off << PAGE_SHIFT) | bit);
-}
-
-pgprot_t pgprot_noncached(pgprot_t prot)
-{
-	unsigned long val = pgprot_val(prot);
-	unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U;
-	unsigned long on = _PAGE_E_4U;
-
-	if (tlb_type == hypervisor) {
-		off = _PAGE_CP_4V | _PAGE_CV_4V;
-		on = _PAGE_E_4V;
-	}
-
-	return __pgprot((val & ~off) | on);
-}
-
-pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
-{
-	unsigned long sz_bits = _PAGE_SZBITS_4U;
-
-	if (tlb_type == hypervisor)
-		sz_bits = _PAGE_SZBITS_4V;
-
-	return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits);
-}
-
-unsigned long pte_pfn(pte_t pte)
-{
-	unsigned long mask = _PAGE_PADDR_4U;
-
-	if (tlb_type == hypervisor)
-		mask = _PAGE_PADDR_4V;
-
-	return (pte_val(pte) & mask) >> PAGE_SHIFT;
-}
-
-pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
-{
-	unsigned long preserve_mask;
-	unsigned long val;
-
-	preserve_mask = (_PAGE_PADDR_4U |
-			 _PAGE_MODIFIED_4U |
-			 _PAGE_ACCESSED_4U |
-			 _PAGE_CP_4U |
-			 _PAGE_CV_4U |
-			 _PAGE_E_4U |
-			 _PAGE_PRESENT_4U |
-			 _PAGE_SZBITS_4U);
-	if (tlb_type == hypervisor)
-		preserve_mask = (_PAGE_PADDR_4V |
-				 _PAGE_MODIFIED_4V |
-				 _PAGE_ACCESSED_4V |
-				 _PAGE_CP_4V |
-				 _PAGE_CV_4V |
-				 _PAGE_E_4V |
-				 _PAGE_PRESENT_4V |
-				 _PAGE_SZBITS_4V);
-
-	val = (pte_val(orig_pte) & preserve_mask);
-
-	return __pte(val | (pgprot_val(new_prot) & ~preserve_mask));
+	return pte;
 }
 
 static unsigned long kern_large_tte(unsigned long paddr)
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 3c02d5d9a53..00eecbb52f9 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -227,11 +227,493 @@ extern struct page *mem_map_zero;
  * the first physical page in the machine is at some huge physical address,
  * such as 4GB.   This is common on a partitioned E10000, for example.
  */
-extern pte_t pfn_pte(unsigned long, pgprot_t);
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+	unsigned long paddr = pfn << PAGE_SHIFT;
+	unsigned long sz_bits;
+
+	BUILD_BUG_ON(!__builtin_constant_p(_PAGE_SZBITS_4U) ||
+		     !__builtin_constant_p(_PAGE_SZBITS_4V));
+
+	sz_bits = 0UL;
+	if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
+		BUILD_BUG_ON((_PAGE_SZBITS_4U & ~(0xfffffc0000000000UL)) ||
+			     (_PAGE_SZBITS_4V & ~(0x0000000000000fffUL)));
+		__asm__ __volatile__(
+		"\n661:	sethi		%uhi(%1), %0\n"
+		"	sllx		%0, 32, %0\n"
+		"	.section	.sun4v_2insn_patch, \"ax\"\n"
+		"	.word		661b\n"
+		"	mov		%2, %0\n"
+		"	nop\n"
+		"	.previous\n"
+		: "=r" (sz_bits)
+		: "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V));
+	}
+	return __pte(paddr | sz_bits | pgprot_val(prot));
+}
 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
-extern unsigned long pte_pfn(pte_t);
+
+/* This one can be done with two shifts.  */
+static inline unsigned long pte_pfn(pte_t pte)
+{
+	const unsigned long pte_paddr_shl_sun4u = 21;
+	const unsigned long pte_paddr_shr_sun4u = 21 + PAGE_SHIFT;
+	const unsigned long pte_paddr_shl_sun4v =  8;
+	const unsigned long pte_paddr_shr_sun4v =  8 + PAGE_SHIFT;
+	unsigned long ret;
+
+	__asm__ __volatile__(
+	"\n661:	sllx		%1, %2, %0\n"
+	"	srlx		%0, %3, %0\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sllx		%1, %4, %0\n"
+	"	srlx		%0, %5, %0\n"
+	"	.previous\n"
+	: "=r" (ret)
+	: "r" (pte_val(pte)),
+	  "i" (pte_paddr_shl_sun4u), "i" (pte_paddr_shr_sun4u),
+	  "i" (pte_paddr_shl_sun4v), "i" (pte_paddr_shr_sun4v));
+
+	return ret;
+}
 #define pte_page(x) pfn_to_page(pte_pfn(x))
-extern pte_t pte_modify(pte_t, pgprot_t);
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
+{
+	const unsigned long preserve_mask_sun4u = (_PAGE_PADDR_4U |
+						   _PAGE_MODIFIED_4U |
+						   _PAGE_ACCESSED_4U |
+						   _PAGE_CP_4U |
+						   _PAGE_CV_4U |
+						   _PAGE_E_4U |
+						   _PAGE_PRESENT_4U |
+						   _PAGE_SZBITS_4U);
+	const unsigned long preserve_mask_sun4v = (_PAGE_PADDR_4V |
+						   _PAGE_MODIFIED_4V |
+						   _PAGE_ACCESSED_4V |
+						   _PAGE_CP_4V |
+						   _PAGE_CV_4V |
+						   _PAGE_E_4V |
+						   _PAGE_PRESENT_4V |
+						   _PAGE_SZBITS_4V);
+	unsigned long mask, tmp;
+
+	/* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
+	 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
+	 *
+	 * Even if we use negation tricks the result is still a 6
+	 * instruction sequence, so don't try to play fancy and just
+	 * do the most straightforward implementation.
+	 *
+	 * Note: We encode this into 3 sun4v 2-insn patch sequences.
+	 */
+
+	__asm__ __volatile__(
+	"\n661:	sethi		%%uhi(%2), %1\n"
+	"	sethi		%%hi(%2), %0\n"
+	"\n662:	or		%1, %%ulo(%2), %1\n"
+	"	or		%0, %%lo(%2), %0\n"
+	"\n663:	sllx		%1, 32, %1\n"
+	"	or		%0, %1, %0\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%3), %1\n"
+	"	sethi		%%hi(%3), %0\n"
+	"	.word		662b\n"
+	"	or		%1, %%ulo(%3), %1\n"
+	"	or		%0, %%lo(%3), %0\n"
+	"	.word		663b\n"
+	"	sllx		%1, 32, %1\n"
+	"	or		%0, %1, %0\n"
+	"	.previous\n"
+	: "=r" (mask), "=r" (tmp)
+	: "i" (preserve_mask_sun4u), "i" (preserve_mask_sun4v));
+
+	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
+}
+
+static inline pte_t pgoff_to_pte(unsigned long off)
+{
+	off <<= PAGE_SHIFT;
+
+	BUILD_BUG_ON((_PAGE_FILE_4U & ~0xfffUL) ||
+		     (_PAGE_FILE_4V & ~0xfffUL));
+
+	__asm__ __volatile__(
+	"\n661:	or		%0, %2, %0\n"
+	"	.section	.sun4v_1insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	or		%0, %3, %0\n"
+	"	.previous\n"
+	: "=r" (off)
+	: "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
+
+	return __pte(off);
+}
+
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+	unsigned long val = pgprot_val(prot);
+
+	BUILD_BUG_ON(((_PAGE_CP_4U | _PAGE_CP_4U | _PAGE_E_4U) & ~(0xfffUL)) ||
+		     ((_PAGE_CP_4V | _PAGE_CP_4V | _PAGE_E_4V) & ~(0xfffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	andn		%0, %2, %0\n"
+	"	or		%0, %3, %0\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	andn		%0, %4, %0\n"
+	"	or		%0, %3, %0\n"
+	"	.previous\n"
+	: "=r" (val)
+	: "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
+	             "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
+
+	return __pgprot(val);
+}
+/* Various pieces of code check for platform support by ifdef testing
+ * on "pgprot_noncached".  That's broken and should be fixed, but for
+ * now...
+ */
+#define pgprot_noncached pgprot_noncached
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_SZHUGE_4U;
+	const unsigned long mask_4v = _PAGE_SZHUGE_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0xfffffc0000000000UL)) ||
+		     (mask_4v & ~(0xfffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	sethi		%%uhi(%1), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	mov		%2, %0\n"
+	"	nop\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	return __pte(pte_val(pte) | mask);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U;
+	const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V;
+	unsigned long val = pte_val(pte), tmp;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000fffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	or		%0, %3, %0\n"
+	"	nop\n"
+	"\n662:	nop\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%4), %1\n"
+	"	sllx		%1, 32, %1\n"
+	"	.word		662b\n"
+	"	or		%1, %%lo(%4), %1\n"
+	"	or		%0, %1, %0\n"
+	"	.previous\n"
+	: "=r" (val), "=r" (tmp)
+	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+
+	return __pte(val);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U;
+	const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V;
+	unsigned long val = pte_val(pte), tmp;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000fffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	andn		%0, %3, %0\n"
+	"	nop\n"
+	"\n662:	nop\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%4), %1\n"
+	"	sllx		%1, 32, %1\n"
+	"	.word		662b\n"
+	"	or		%1, %%lo(%4), %1\n"
+	"	andn		%0, %1, %0\n"
+	"	.previous\n"
+	: "=r" (val), "=r" (tmp)
+	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+
+	return __pte(val);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_WRITE_4U;
+	const unsigned long mask_4v = _PAGE_WRITE_4V;
+	unsigned long val = pte_val(pte), mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000000UL)));
+
+	__asm__ __volatile__(
+	"\n661:	mov		%1, %0\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%2), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	return __pte(val | mask);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_WRITE_4U | _PAGE_W_4U;
+	const unsigned long mask_4v = _PAGE_WRITE_4V | _PAGE_W_4V;
+	unsigned long val = pte_val(pte), tmp;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000fffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	andn		%0, %3, %0\n"
+	"	nop\n"
+	"\n662:	nop\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%4), %1\n"
+	"	sllx		%1, 32, %1\n"
+	"	.word		662b\n"
+	"	or		%1, %%lo(%4), %1\n"
+	"	andn		%0, %1, %0\n"
+	"	.previous\n"
+	: "=r" (val), "=r" (tmp)
+	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+
+	return __pte(val);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_ACCESSED_4U;
+	const unsigned long mask_4v = _PAGE_ACCESSED_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000000UL)));
+
+	__asm__ __volatile__(
+	"\n661:	mov		%1, %0\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%2), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	mask |= _PAGE_R;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_ACCESSED_4U;
+	const unsigned long mask_4v = _PAGE_ACCESSED_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000000UL)));
+
+	__asm__ __volatile__(
+	"\n661:	mov		%1, %0\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%2), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	mask |= _PAGE_R;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+static inline unsigned long pte_young(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_ACCESSED_4U;
+	const unsigned long mask_4v = _PAGE_ACCESSED_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000000UL)));
+
+	__asm__ __volatile__(
+	"\n661:	mov		%1, %0\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%2), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	return (pte_val(pte) & mask);
+}
+
+static inline unsigned long pte_dirty(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_MODIFIED_4U;
+	const unsigned long mask_4v = _PAGE_MODIFIED_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000000UL)));
+
+	__asm__ __volatile__(
+	"\n661:	mov		%1, %0\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%2), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	return (pte_val(pte) & mask);
+}
+
+static inline unsigned long pte_write(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_WRITE_4U;
+	const unsigned long mask_4v = _PAGE_WRITE_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000000UL)));
+
+	__asm__ __volatile__(
+	"\n661:	mov		%1, %0\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%2), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	return (pte_val(pte) & mask);
+}
+
+static inline unsigned long pte_exec(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_EXEC_4U;
+	const unsigned long mask_4v = _PAGE_EXEC_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x00000000fffffc00UL)) ||
+		     (mask_4v & ~(0x0000000000000fffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	sethi		%%hi(%1), %0\n"
+	"	.section	.sun4v_1insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	mov		%2, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	return (pte_val(pte) & mask);
+}
+
+static inline unsigned long pte_read(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_READ_4U;
+	const unsigned long mask_4v = _PAGE_READ_4V;
+	unsigned long mask;
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0xfffffc0000000000UL)));
+
+	__asm__ __volatile__(
+	"\n661:	mov		%1, %0\n"
+	"	nop\n"
+	"	.section	.sun4v_2insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	sethi		%%uhi(%2), %0\n"
+	"	sllx		%0, 32, %0\n"
+	"	.previous\n"
+	: "=r" (mask)
+	: "i" (mask_4u), "i" (mask_4v));
+
+	return (pte_val(pte) & mask);
+}
+
+static inline unsigned long pte_file(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_FILE_4U;
+	const unsigned long mask_4v = _PAGE_FILE_4V;
+	unsigned long val = pte_val(pte);
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0x0000000000000fffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	and		%0, %2, %0\n"
+	"	.section	.sun4v_1insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	and		%0, %3, %0\n"
+	"	.previous\n"
+	: "=r" (val)
+	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+
+	return val;
+}
+
+static inline unsigned long pte_present(pte_t pte)
+{
+	const unsigned long mask_4u = _PAGE_PRESENT_4U;
+	const unsigned long mask_4v = _PAGE_PRESENT_4V;
+	unsigned long val = pte_val(pte);
+
+	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
+		     (mask_4v & ~(0x0000000000000fffUL)));
+
+	__asm__ __volatile__(
+	"\n661:	and		%0, %2, %0\n"
+	"	.section	.sun4v_1insn_patch, \"ax\"\n"
+	"	.word		661b\n"
+	"	and		%0, %3, %0\n"
+	"	.previous\n"
+	: "=r" (val)
+	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+
+	return val;
+}
 
 #define pmd_set(pmdp, ptep)	\
 	(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
-- 
cgit v1.2.3-70-g09d2


From 85dfa19ba92f88fa1c1482f655c7247119dfdcd5 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 13 Feb 2006 00:02:16 -0800
Subject: [SPARC64]: Move devino_to_sysino out of pci_sun4v_asm.S

It is not PCI specific, it is for all system interrupts.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S         | 12 ++++++++++++
 arch/sparc64/kernel/pci_sun4v.h     |  2 --
 arch/sparc64/kernel/pci_sun4v_asm.S | 12 ------------
 include/asm-sparc64/hypervisor.h    |  5 +++++
 4 files changed, 17 insertions(+), 14 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index f51b66a1687..fa185f22705 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1695,3 +1695,15 @@ hard_smp_processor_id:
 	retl
 	 nop
 #endif
+
+	/* %o0: devhandle
+	 * %o1:	devino
+	 *
+	 * returns %o0: sysino
+	 */
+	.globl	pci_sun4v_devino_to_sysino
+sun4v_devino_to_sysino:
+	mov	HV_FAST_INTR_DEVINO2SYSINO, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 mov	%o1, %o0
diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h
index 00322ed0cf8..88f199e11a7 100644
--- a/arch/sparc64/kernel/pci_sun4v.h
+++ b/arch/sparc64/kernel/pci_sun4v.h
@@ -6,8 +6,6 @@
 #ifndef _PCI_SUN4V_H
 #define _PCI_SUN4V_H
 
-extern unsigned long pci_sun4v_devino_to_sysino(unsigned long devhandle,
-						unsigned long deino);
 extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle,
 					 unsigned long tsbid,
 					 unsigned long num_ttes,
diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S
index 4a12341dd5d..424db652664 100644
--- a/arch/sparc64/kernel/pci_sun4v_asm.S
+++ b/arch/sparc64/kernel/pci_sun4v_asm.S
@@ -5,18 +5,6 @@
 
 #include <asm/hypervisor.h>
 
-	/* %o0: devhandle
-	 * %o1:	devino
-	 *
-	 * returns %o0: sysino
-	 */
-	.globl	pci_sun4v_devino_to_sysino
-pci_sun4v_devino_to_sysino:
-	mov	HV_FAST_INTR_DEVINO2SYSINO, %o5
-	ta	HV_FAST_TRAP
-	retl
-	 mov	%o1, %o0
-
 	/* %o0: devhandle
 	 * %o1:	tsbid
 	 * %o2:	num ttes
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index 5d795ee5192..16a40f48beb 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -1203,6 +1203,11 @@ struct hv_trap_trace_entry {
  */
 #define HV_FAST_INTR_DEVINO2SYSINO	0xa0
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
+					    unsigned long devino);
+#endif
+
 /* intr_getenabled()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_INTR_GETENABLED
-- 
cgit v1.2.3-70-g09d2


From 6c0f402f6cc62314ef83b975f3430350dcb6055f Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 13 Feb 2006 00:23:32 -0800
Subject: [SPARC64]: Implement rest of generic interrupt hypervisor calls.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S      | 66 +++++++++++++++++++++++++++++++++++++++-
 include/asm-sparc64/hypervisor.h | 24 +++++++++++++++
 2 files changed, 89 insertions(+), 1 deletion(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index fa185f22705..a2842a72f8e 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1701,9 +1701,73 @@ hard_smp_processor_id:
 	 *
 	 * returns %o0: sysino
 	 */
-	.globl	pci_sun4v_devino_to_sysino
+	.globl	sun4v_devino_to_sysino
 sun4v_devino_to_sysino:
 	mov	HV_FAST_INTR_DEVINO2SYSINO, %o5
 	ta	HV_FAST_TRAP
 	retl
 	 mov	%o1, %o0
+
+	/* %o0: sysino
+	 *
+	 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
+	 */
+	.globl	sun4v_intr_getenabled
+sun4v_intr_getenabled:
+	mov	HV_FAST_INTR_GETENABLED, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 mov	%o1, %o0
+
+	/* %o0: sysino
+	 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
+	 */
+	.globl	sun4v_intr_setenabled
+sun4v_intr_setenabled:
+	mov	HV_FAST_INTR_SETENABLED, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+
+	/* %o0: sysino
+	 *
+	 * returns %o0: intr_state (HV_INTR_STATE_*)
+	 */
+	.globl	sun4v_intr_getstate
+sun4v_intr_getstate:
+	mov	HV_FAST_INTR_GETSTATE, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 mov	%o1, %o0
+
+	/* %o0: sysino
+	 * %o1: intr_state (HV_INTR_STATE_*)
+	 */
+	.globl	sun4v_intr_setstate
+sun4v_intr_setstate:
+	mov	HV_FAST_INTR_SETSTATE, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+
+	/* %o0: sysino
+	 *
+	 * returns %o0: cpuid
+	 */
+	.globl	sun4v_intr_gettarget
+sun4v_intr_gettarget:
+	mov	HV_FAST_INTR_GETTARGET, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 mov	%o1, %o0
+
+	/* %o0: sysino
+	 * %o1: cpuid
+	 */
+	.globl	sun4v_intr_settarget
+sun4v_intr_settarget:
+	mov	HV_FAST_INTR_SETTARGET, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index 16a40f48beb..587a0f6a0a7 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -1221,6 +1221,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
  */
 #define HV_FAST_INTR_GETENABLED		0xa1
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
+#endif
+
 /* intr_setenabled()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_INTR_SETENABLED
@@ -1233,6 +1237,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
  */
 #define HV_FAST_INTR_SETENABLED		0xa2
 
+#ifndef __ASSEMBLY__
+extern void sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
+#endif
+
 /* intr_getstate()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_INTR_GETSTATE
@@ -1245,6 +1253,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
  */
 #define HV_FAST_INTR_GETSTATE		0xa3
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_intr_getstate(unsigned long sysino);
+#endif
+
 /* intr_setstate()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_INTR_SETSTATE
@@ -1261,6 +1273,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
  */
 #define HV_FAST_INTR_SETSTATE		0xa4
 
+#ifndef __ASSEMBLY__
+extern void sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
+#endif
+
 /* intr_gettarget()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_INTR_GETTARGET
@@ -1275,6 +1291,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
  */
 #define HV_FAST_INTR_GETTARGET		0xa5
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
+#endif
+
 /* intr_settarget()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_INTR_SETTARGET
@@ -1288,6 +1308,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
  */
 #define HV_FAST_INTR_SETTARGET		0xa6
 
+#ifndef __ASSEMBLY__
+extern void sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
+#endif
+
 /* PCI IO services.
  *
  * See the terminology descriptions in the device interrupt services
-- 
cgit v1.2.3-70-g09d2


From e3999574b48125c9bb0c95e3e9f1c696bf96c3e3 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 13 Feb 2006 18:16:10 -0800
Subject: [SPARC64]: Generic sun4v_build_irq().

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/irq.c       | 32 ++++++++++++++++++++++++++++++++
 arch/sparc64/kernel/pci_sun4v.c | 30 ++----------------------------
 include/asm-sparc64/irq.h       |  1 +
 3 files changed, 35 insertions(+), 28 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 51f65054bf1..bcc889a5332 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -303,6 +303,38 @@ out:
 	return __irq(bucket);
 }
 
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags)
+{
+	struct ino_bucket *bucket;
+	unsigned long sysino;
+
+	sysino = sun4v_devino_to_sysino(devhandle, devino);
+
+	printk(KERN_INFO "sun4v_irq: Mapping ( devh[%08x] devino[%08x] ) "
+	       "--> sysino[%016lx]\n", devhandle, devino, sysino);
+
+	bucket = &ivector_table[sysino];
+
+	/* Catch accidental accesses to these things.  IMAP/ICLR handling
+	 * is done by hypervisor calls on sun4v platforms, not by direct
+	 * register accesses.
+	 */
+	bucket->imap = ~0UL;
+	bucket->iclr = ~0UL;
+
+	bucket->pil = pil;
+	bucket->flags = flags;
+
+	bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
+	if (!bucket->irq_info) {
+		prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
+		prom_halt();
+	}
+	memset(bucket->irq_info, 0, sizeof(struct irq_desc));
+
+	return __irq(bucket);
+}
+
 static void atomic_bucket_insert(struct ino_bucket *bucket)
 {
 	unsigned long pstate;
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 5174346ce35..b8846b271f9 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -644,18 +644,11 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p)
 
 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
 					struct pci_dev *pdev,
-					unsigned int ino)
+					unsigned int devino)
 {
-	struct ino_bucket *bucket;
-	unsigned long sysino;
 	u32 devhandle = pbm->devhandle;
 	int pil;
 
-	sysino = sun4v_devino_to_sysino(devhandle, ino);
-
-	printk(KERN_INFO "pci_irq_buld: Mapping ( devh[%08x] ino[%08x] ) "
-	       "--> sysino[%016lx]\n", devhandle, ino, sysino);
-
 	pil = 4;
 	if (pdev) {
 		switch ((pdev->class >> 16) & 0xff) {
@@ -685,26 +678,7 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
 	}
 	BUG_ON(PIL_RESERVED(pil));
 
-	bucket = &ivector_table[sysino];
-
-	/* Catch accidental accesses to these things.  IMAP/ICLR handling
-	 * is done by hypervisor calls on sun4v platforms, not by direct
-	 * register accesses.
-	 */
-	bucket->imap = ~0UL;
-	bucket->iclr = ~0UL;
-
-	bucket->pil = pil;
-	bucket->flags = IBF_PCI;
-
-	bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
-	if (!bucket->irq_info) {
-		prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
-		prom_halt();
-	}
-	memset(bucket->irq_info, 0, sizeof(struct irq_desc));
-
-	return __irq(bucket);
+	return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
 }
 
 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 8b70edcb80d..529a9df1ad4 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -111,6 +111,7 @@ extern void disable_irq(unsigned int);
 #define disable_irq_nosync disable_irq
 extern void enable_irq(unsigned int);
 extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap);
+extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags);
 extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
 
 static __inline__ void set_softint(unsigned long bits)
-- 
cgit v1.2.3-70-g09d2


From e77227eb4e17591a6a511b9c0ff6e8ad7350c575 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 13 Feb 2006 20:42:16 -0800
Subject: [SPARC64]: Probe virtual-devices root node on sun4v.

This is where we learn how to get the interrupts
for things like the hypervisor console device.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/devices.c | 58 +++++++++++++++++++++++++++++++++++++++++++
 include/asm-sparc64/vdev.h    | 18 ++++++++++++++
 2 files changed, 76 insertions(+)
 create mode 100644 include/asm-sparc64/vdev.h

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index ac11d872ef7..817132826e0 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -12,6 +12,7 @@
 #include <linux/string.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
+#include <linux/bootmem.h>
 
 #include <asm/page.h>
 #include <asm/oplib.h>
@@ -20,6 +21,7 @@
 #include <asm/spitfire.h>
 #include <asm/timer.h>
 #include <asm/cpudata.h>
+#include <asm/vdev.h>
 
 /* Used to synchronize acceses to NatSemi SUPER I/O chip configure
  * operations in asm/ns87303.h
@@ -29,6 +31,61 @@ DEFINE_SPINLOCK(ns87303_lock);
 extern void cpu_probe(void);
 extern void central_probe(void);
 
+u32 sun4v_vdev_devhandle;
+int sun4v_vdev_root;
+struct linux_prom_pci_intmap *sun4v_vdev_intmap;
+int sun4v_vdev_num_intmap;
+struct linux_prom_pci_intmap sun4v_vdev_intmask;
+
+static void __init sun4v_virtual_device_probe(void)
+{
+	struct linux_prom64_registers regs;
+	struct linux_prom_pci_intmap *ip;
+	int node, sz, err;
+
+	if (tlb_type != hypervisor)
+		return;
+
+	node = prom_getchild(prom_root_node);
+	node = prom_searchsiblings(node, "virtual-devices");
+	if (!node) {
+		prom_printf("SUN4V: Fatal error, no virtual-devices node.\n");
+		prom_halt();
+	}
+
+	sun4v_vdev_root = node;
+
+	prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
+	sun4v_vdev_devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
+
+	sz = sizeof(*ip) * 64;
+	sun4v_vdev_intmap = ip = alloc_bootmem_low_pages(sz);
+	if (!sun4v_vdev_intmap) {
+		prom_printf("SUN4V: Error, cannot allocate vdev intmap.\n");
+		prom_halt();
+	}
+
+	err = prom_getproperty(node, "interrupt-map", (char *) ip, sz);
+	if (err == -1) {
+		prom_printf("SUN4V: Fatal error, no vdev interrupt-map.\n");
+		prom_halt();
+	}
+
+	sun4v_vdev_num_intmap = err / sizeof(*ip);
+
+	err = prom_getproperty(node, "interrupt-map-mask",
+			       (char *) &sun4v_vdev_intmask,
+			       sizeof(sun4v_vdev_intmask));
+	if (err == -1) {
+		prom_printf("SUN4V: Fatal error, no vdev "
+			    "interrupt-map-mask.\n");
+		prom_halt();
+	}
+
+	printk("SUN4V: virtual-devices devhandle[%x]\n",
+	       sun4v_vdev_devhandle);
+}
+
 static const char *cpu_mid_prop(void)
 {
 	if (tlb_type == spitfire)
@@ -177,6 +234,7 @@ void __init device_scan(void)
 	}
 #endif
 
+	sun4v_virtual_device_probe();
 	central_probe();
 
 	cpu_probe();
diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h
new file mode 100644
index 00000000000..ebaac41a6e1
--- /dev/null
+++ b/include/asm-sparc64/vdev.h
@@ -0,0 +1,18 @@
+/* vdev.h: SUN4V virtual device interfaces and defines.
+ *
+ * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
+ */
+
+#ifndef _SPARC64_VDEV_H
+#define _SPARC64_VDEV_H
+
+#include <linux/types.h>
+#include <asm/oplib.h>
+
+extern u32 sun4v_vdev_devhandle;
+extern int sun4v_vdev_root;
+extern struct linux_prom_pci_intmap *sun4v_vdev_intmap;
+extern int sun4v_vdev_num_intmap;
+extern struct linux_prom_pci_intmap sun4v_vdev_intmask;
+
+#endif /* !(_SPARC64_VDEV_H) */
-- 
cgit v1.2.3-70-g09d2


From 5259d5bfaf5b2953b130e9a500277a905bd37823 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 13 Feb 2006 21:15:44 -0800
Subject: [SPARC64]: Fix comment typo in asm/hypervisor.h

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/hypervisor.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index 587a0f6a0a7..76a9d0fa272 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -912,7 +912,7 @@ struct hv_fault_status {
  * ARG0:	character
  * RET0:	status
  * ERRORS:	EINVAL		Illegal character
- *		EWOULDBLOCK	Output buffer currentl full, would block
+ *		EWOULDBLOCK	Output buffer currently full, would block
  *
  * Send a character to the console device.  Only character values
  * between 0 and 255 may be used.  Values outside this range are
-- 
cgit v1.2.3-70-g09d2


From c4bea2883974a59ab7a0ac6c01d34f7ae0e8cd8e Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 13 Feb 2006 22:56:27 -0800
Subject: [SPARC64]: Make error codes available from sun4v_intr_get*().

And check for errors at call sites.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/irq.c        | 21 ++++++++++++++++++---
 include/asm-sparc64/hypervisor.h |  6 +++---
 2 files changed, 21 insertions(+), 6 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c57b1708ae8..0d3b0ea329c 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -154,9 +154,16 @@ void enable_irq(unsigned int irq)
 	if (tlb_type == hypervisor) {
 		unsigned int ino = __irq_ino(irq);
 		int cpu = hard_smp_processor_id();
+		int err;
 
-		sun4v_intr_settarget(ino, cpu);
+		err = sun4v_intr_settarget(ino, cpu);
+		if (err != HV_EOK)
+			printk("sun4v_intr_settarget(%x,%d): err(%d)\n",
+			       ino, cpu, err);
 		sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
+		if (err != HV_EOK)
+			printk("sun4v_intr_setenabled(%x): err(%d)\n",
+			       ino, err);
 	} else {
 		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 			unsigned long ver;
@@ -216,8 +223,12 @@ void disable_irq(unsigned int irq)
 	if (imap != 0UL) {
 		if (tlb_type == hypervisor) {
 			unsigned int ino = __irq_ino(irq);
+			int err;
 
-			sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
+			err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
+			if (err != HV_EOK)
+				printk("sun4v_intr_setenabled(%x): "
+				       "err(%d)\n", ino, err);
 		} else {
 			u32 tmp;
 
@@ -647,8 +658,12 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
 	if (bp->pil != 0) {
 		if (tlb_type == hypervisor) {
 			unsigned int ino = __irq_ino(bp);
+			int err;
 
-			sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+			err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+			if (err != HV_EOK)
+				printk("sun4v_intr_setstate(%x): "
+				       "err(%d)\n", ino, err);
 		} else {
 			upa_writel(ICLR_IDLE, bp->iclr);
 			/* Test and add entropy */
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index 76a9d0fa272..f14992ab7fe 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -1238,7 +1238,7 @@ extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
 #define HV_FAST_INTR_SETENABLED		0xa2
 
 #ifndef __ASSEMBLY__
-extern void sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
+extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
 #endif
 
 /* intr_getstate()
@@ -1274,7 +1274,7 @@ extern unsigned long sun4v_intr_getstate(unsigned long sysino);
 #define HV_FAST_INTR_SETSTATE		0xa4
 
 #ifndef __ASSEMBLY__
-extern void sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
+extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
 #endif
 
 /* intr_gettarget()
@@ -1309,7 +1309,7 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
 #define HV_FAST_INTR_SETTARGET		0xa6
 
 #ifndef __ASSEMBLY__
-extern void sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
+extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
 #endif
 
 /* PCI IO services.
-- 
cgit v1.2.3-70-g09d2


From 50f4f23c3bd95afc82e931254a71e7b1b3699fd2 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 14 Feb 2006 01:32:29 -0800
Subject: [SPARC64]: Fix gcc-3.3.x warnings.

It doesn't like const variables being passed into
"i" constraing asm operations.  It's a bug, but
there is nothing we can really do but work around
it.

Based upon a report from Andrew Morton.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/pgtable.h | 143 ++++++++----------------------------------
 1 file changed, 25 insertions(+), 118 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 00eecbb52f9..bc446f302ea 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -232,13 +232,8 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 	unsigned long paddr = pfn << PAGE_SHIFT;
 	unsigned long sz_bits;
 
-	BUILD_BUG_ON(!__builtin_constant_p(_PAGE_SZBITS_4U) ||
-		     !__builtin_constant_p(_PAGE_SZBITS_4V));
-
 	sz_bits = 0UL;
 	if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
-		BUILD_BUG_ON((_PAGE_SZBITS_4U & ~(0xfffffc0000000000UL)) ||
-			     (_PAGE_SZBITS_4V & ~(0x0000000000000fffUL)));
 		__asm__ __volatile__(
 		"\n661:	sethi		%uhi(%1), %0\n"
 		"	sllx		%0, 32, %0\n"
@@ -257,10 +252,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 /* This one can be done with two shifts.  */
 static inline unsigned long pte_pfn(pte_t pte)
 {
-	const unsigned long pte_paddr_shl_sun4u = 21;
-	const unsigned long pte_paddr_shr_sun4u = 21 + PAGE_SHIFT;
-	const unsigned long pte_paddr_shl_sun4v =  8;
-	const unsigned long pte_paddr_shr_sun4v =  8 + PAGE_SHIFT;
 	unsigned long ret;
 
 	__asm__ __volatile__(
@@ -273,8 +264,8 @@ static inline unsigned long pte_pfn(pte_t pte)
 	"	.previous\n"
 	: "=r" (ret)
 	: "r" (pte_val(pte)),
-	  "i" (pte_paddr_shl_sun4u), "i" (pte_paddr_shr_sun4u),
-	  "i" (pte_paddr_shl_sun4v), "i" (pte_paddr_shr_sun4v));
+	  "i" (21), "i" (21 + PAGE_SHIFT),
+	  "i" (8), "i" (8 + PAGE_SHIFT));
 
 	return ret;
 }
@@ -282,22 +273,6 @@ static inline unsigned long pte_pfn(pte_t pte)
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
 {
-	const unsigned long preserve_mask_sun4u = (_PAGE_PADDR_4U |
-						   _PAGE_MODIFIED_4U |
-						   _PAGE_ACCESSED_4U |
-						   _PAGE_CP_4U |
-						   _PAGE_CV_4U |
-						   _PAGE_E_4U |
-						   _PAGE_PRESENT_4U |
-						   _PAGE_SZBITS_4U);
-	const unsigned long preserve_mask_sun4v = (_PAGE_PADDR_4V |
-						   _PAGE_MODIFIED_4V |
-						   _PAGE_ACCESSED_4V |
-						   _PAGE_CP_4V |
-						   _PAGE_CV_4V |
-						   _PAGE_E_4V |
-						   _PAGE_PRESENT_4V |
-						   _PAGE_SZBITS_4V);
 	unsigned long mask, tmp;
 
 	/* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
@@ -329,7 +304,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
 	"	or		%0, %1, %0\n"
 	"	.previous\n"
 	: "=r" (mask), "=r" (tmp)
-	: "i" (preserve_mask_sun4u), "i" (preserve_mask_sun4v));
+	: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
+	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
+	       _PAGE_SZBITS_4U),
+	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
+	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
+	       _PAGE_SZBITS_4V));
 
 	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
 }
@@ -338,9 +318,6 @@ static inline pte_t pgoff_to_pte(unsigned long off)
 {
 	off <<= PAGE_SHIFT;
 
-	BUILD_BUG_ON((_PAGE_FILE_4U & ~0xfffUL) ||
-		     (_PAGE_FILE_4V & ~0xfffUL));
-
 	__asm__ __volatile__(
 	"\n661:	or		%0, %2, %0\n"
 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
@@ -357,9 +334,6 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
 {
 	unsigned long val = pgprot_val(prot);
 
-	BUILD_BUG_ON(((_PAGE_CP_4U | _PAGE_CP_4U | _PAGE_E_4U) & ~(0xfffUL)) ||
-		     ((_PAGE_CP_4V | _PAGE_CP_4V | _PAGE_E_4V) & ~(0xfffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	andn		%0, %2, %0\n"
 	"	or		%0, %3, %0\n"
@@ -382,13 +356,8 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
 
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_SZHUGE_4U;
-	const unsigned long mask_4v = _PAGE_SZHUGE_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0xfffffc0000000000UL)) ||
-		     (mask_4v & ~(0xfffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	sethi		%%uhi(%1), %0\n"
 	"	sllx		%0, 32, %0\n"
@@ -398,20 +367,15 @@ static inline pte_t pte_mkhuge(pte_t pte)
 	"	nop\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
 
 	return __pte(pte_val(pte) | mask);
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U;
-	const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V;
 	unsigned long val = pte_val(pte), tmp;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000fffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	or		%0, %3, %0\n"
 	"	nop\n"
@@ -426,20 +390,16 @@ static inline pte_t pte_mkdirty(pte_t pte)
 	"	or		%0, %1, %0\n"
 	"	.previous\n"
 	: "=r" (val), "=r" (tmp)
-	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
+	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
 
 	return __pte(val);
 }
 
 static inline pte_t pte_mkclean(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U;
-	const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V;
 	unsigned long val = pte_val(pte), tmp;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000fffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	andn		%0, %3, %0\n"
 	"	nop\n"
@@ -454,20 +414,16 @@ static inline pte_t pte_mkclean(pte_t pte)
 	"	andn		%0, %1, %0\n"
 	"	.previous\n"
 	: "=r" (val), "=r" (tmp)
-	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
+	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
 
 	return __pte(val);
 }
 
 static inline pte_t pte_mkwrite(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_WRITE_4U;
-	const unsigned long mask_4v = _PAGE_WRITE_4V;
 	unsigned long val = pte_val(pte), mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000000UL)));
-
 	__asm__ __volatile__(
 	"\n661:	mov		%1, %0\n"
 	"	nop\n"
@@ -477,20 +433,15 @@ static inline pte_t pte_mkwrite(pte_t pte)
 	"	sllx		%0, 32, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
 
 	return __pte(val | mask);
 }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_WRITE_4U | _PAGE_W_4U;
-	const unsigned long mask_4v = _PAGE_WRITE_4V | _PAGE_W_4V;
 	unsigned long val = pte_val(pte), tmp;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000fffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	andn		%0, %3, %0\n"
 	"	nop\n"
@@ -505,20 +456,16 @@ static inline pte_t pte_wrprotect(pte_t pte)
 	"	andn		%0, %1, %0\n"
 	"	.previous\n"
 	: "=r" (val), "=r" (tmp)
-	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+	: "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
+	  "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
 
 	return __pte(val);
 }
 
 static inline pte_t pte_mkold(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_ACCESSED_4U;
-	const unsigned long mask_4v = _PAGE_ACCESSED_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000000UL)));
-
 	__asm__ __volatile__(
 	"\n661:	mov		%1, %0\n"
 	"	nop\n"
@@ -528,7 +475,7 @@ static inline pte_t pte_mkold(pte_t pte)
 	"	sllx		%0, 32, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
 
 	mask |= _PAGE_R;
 
@@ -537,13 +484,8 @@ static inline pte_t pte_mkold(pte_t pte)
 
 static inline pte_t pte_mkyoung(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_ACCESSED_4U;
-	const unsigned long mask_4v = _PAGE_ACCESSED_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000000UL)));
-
 	__asm__ __volatile__(
 	"\n661:	mov		%1, %0\n"
 	"	nop\n"
@@ -553,7 +495,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
 	"	sllx		%0, 32, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
 
 	mask |= _PAGE_R;
 
@@ -562,13 +504,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
 
 static inline unsigned long pte_young(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_ACCESSED_4U;
-	const unsigned long mask_4v = _PAGE_ACCESSED_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000000UL)));
-
 	__asm__ __volatile__(
 	"\n661:	mov		%1, %0\n"
 	"	nop\n"
@@ -578,20 +515,15 @@ static inline unsigned long pte_young(pte_t pte)
 	"	sllx		%0, 32, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
 
 	return (pte_val(pte) & mask);
 }
 
 static inline unsigned long pte_dirty(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_MODIFIED_4U;
-	const unsigned long mask_4v = _PAGE_MODIFIED_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000000UL)));
-
 	__asm__ __volatile__(
 	"\n661:	mov		%1, %0\n"
 	"	nop\n"
@@ -601,20 +533,15 @@ static inline unsigned long pte_dirty(pte_t pte)
 	"	sllx		%0, 32, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
 
 	return (pte_val(pte) & mask);
 }
 
 static inline unsigned long pte_write(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_WRITE_4U;
-	const unsigned long mask_4v = _PAGE_WRITE_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000000UL)));
-
 	__asm__ __volatile__(
 	"\n661:	mov		%1, %0\n"
 	"	nop\n"
@@ -624,20 +551,15 @@ static inline unsigned long pte_write(pte_t pte)
 	"	sllx		%0, 32, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
 
 	return (pte_val(pte) & mask);
 }
 
 static inline unsigned long pte_exec(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_EXEC_4U;
-	const unsigned long mask_4v = _PAGE_EXEC_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x00000000fffffc00UL)) ||
-		     (mask_4v & ~(0x0000000000000fffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	sethi		%%hi(%1), %0\n"
 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
@@ -645,20 +567,15 @@ static inline unsigned long pte_exec(pte_t pte)
 	"	mov		%2, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
 
 	return (pte_val(pte) & mask);
 }
 
 static inline unsigned long pte_read(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_READ_4U;
-	const unsigned long mask_4v = _PAGE_READ_4V;
 	unsigned long mask;
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0xfffffc0000000000UL)));
-
 	__asm__ __volatile__(
 	"\n661:	mov		%1, %0\n"
 	"	nop\n"
@@ -668,20 +585,15 @@ static inline unsigned long pte_read(pte_t pte)
 	"	sllx		%0, 32, %0\n"
 	"	.previous\n"
 	: "=r" (mask)
-	: "i" (mask_4u), "i" (mask_4v));
+	: "i" (_PAGE_READ_4U), "i" (_PAGE_READ_4V));
 
 	return (pte_val(pte) & mask);
 }
 
 static inline unsigned long pte_file(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_FILE_4U;
-	const unsigned long mask_4v = _PAGE_FILE_4V;
 	unsigned long val = pte_val(pte);
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0x0000000000000fffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	and		%0, %2, %0\n"
 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
@@ -689,20 +601,15 @@ static inline unsigned long pte_file(pte_t pte)
 	"	and		%0, %3, %0\n"
 	"	.previous\n"
 	: "=r" (val)
-	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+	: "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
 
 	return val;
 }
 
 static inline unsigned long pte_present(pte_t pte)
 {
-	const unsigned long mask_4u = _PAGE_PRESENT_4U;
-	const unsigned long mask_4v = _PAGE_PRESENT_4V;
 	unsigned long val = pte_val(pte);
 
-	BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) ||
-		     (mask_4v & ~(0x0000000000000fffUL)));
-
 	__asm__ __volatile__(
 	"\n661:	and		%0, %2, %0\n"
 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
@@ -710,7 +617,7 @@ static inline unsigned long pte_present(pte_t pte)
 	"	and		%0, %3, %0\n"
 	"	.previous\n"
 	: "=r" (val)
-	: "0" (val), "i" (mask_4u), "i" (mask_4v));
+	: "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
 
 	return val;
 }
-- 
cgit v1.2.3-70-g09d2


From 7c3514e4501565d76f9e4dec43e1fc17389f4811 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 15 Feb 2006 00:41:47 -0800
Subject: [SPARC64]: Fixup TSTATE layout diagram in asm/pstate.h

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/pstate.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h
index 1181d73315a..49a7924a89a 100644
--- a/include/asm-sparc64/pstate.h
+++ b/include/asm-sparc64/pstate.h
@@ -28,9 +28,9 @@
 
 /* The V9 TSTATE Register (with SpitFire and Linux extensions).
  *
- * ---------------------------------------------------------------
+ * ---------------------------------------------------------------------
  * |  Resv |  GL  |  CCR  |  ASI  |  %pil  |  PSTATE  |  Resv  |  CWP  |
- * ---------------------------------------------------------------
+ * ---------------------------------------------------------------------
  *  63   43 42  40 39   32 31   24 23    20 19       8 7      5 4     0
  */
 #define TSTATE_GL	_AC(0x0000070000000000,UL) /* Global reg level  */
-- 
cgit v1.2.3-70-g09d2


From 7890f794e0e6f7dce2a5f4a03ba64b0b3fe306bd Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 15 Feb 2006 02:26:54 -0800
Subject: [SPARC64]: Add prom_{start,stop}cpu_cpuid().

Use prom_startcpu_cpuid() on SUN4V instead of prom_startcpu().

We should really test for "SUNW,start-cpu-by-cpuid" presence
and use it if present even on SUN4U.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/smp.c   | 12 +++++++++---
 arch/sparc64/prom/misc.c    | 16 ++++++++++++++--
 include/asm-sparc64/oplib.h | 14 +++++++++++---
 3 files changed, 34 insertions(+), 8 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 64046d37bbf..527dfd7ae21 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -331,15 +331,21 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
 	unsigned long cookie =
 		(unsigned long)(&cpu_new_thread);
 	struct task_struct *p;
-	int timeout, ret, cpu_node;
+	int timeout, ret;
 
 	p = fork_idle(cpu);
 	callin_flag = 0;
 	cpu_new_thread = task_thread_info(p);
 	cpu_set(cpu, cpu_callout_map);
 
-	cpu_find_by_mid(cpu, &cpu_node);
-	prom_startcpu(cpu_node, entry, cookie);
+	if (tlb_type == hypervisor) {
+		prom_startcpu_cpuid(cpu, entry, cookie);
+	} else {
+		int cpu_node;
+
+		cpu_find_by_mid(cpu, &cpu_node);
+		prom_startcpu(cpu_node, entry, cookie);
+	}
 
 	for (timeout = 0; timeout < 5000000; timeout++) {
 		if (callin_flag)
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 713cbac5f9b..36d2b9c1622 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -308,9 +308,21 @@ int prom_wakeupsystem(void)
 }
 
 #ifdef CONFIG_SMP
-void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0)
+void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
 {
-	p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0);
+	p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg);
+}
+
+void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
+{
+	p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0),
+		  cpuid, pc, arg);
+}
+
+void prom_stopcpu_cpuid(int cpuid)
+{
+	p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0),
+		  cpuid);
 }
 
 void prom_stopself(void)
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 0631d13475f..84618f8a9e6 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -188,10 +188,18 @@ extern enum prom_output_device prom_query_output_device(void);
 
 /* Multiprocessor operations... */
 #ifdef CONFIG_SMP
-/* Start the CPU with the given device tree node, context table, and context
- * at the passed program counter.
+/* Start the CPU with the given device tree node at the passed program
+ * counter with the given arg passed in via register %o0.
  */
-extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0);
+extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
+
+/* Start the CPU with the given cpu ID at the passed program
+ * counter with the given arg passed in via register %o0.
+ */
+extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
+
+/* Stop the CPU with the given cpu ID.  */
+extern void prom_stopcpu_cpuid(int cpuid);
 
 /* Stop the current CPU. */
 extern void prom_stopself(void);
-- 
cgit v1.2.3-70-g09d2


From 9d29a3fafd06534ad73427fee3c968c094d05b9b Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 15 Feb 2006 19:48:54 -0800
Subject: [SPARC64]: Decode virtual-devices interrupts correctly.

Need to translate through the interrupt-map{,-mask] properties.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/devices.c | 98 +++++++++++++++++++++++++++++++++++++------
 drivers/serial/sunhv.c        | 14 ++-----
 include/asm-sparc64/vdev.h    |  6 +--
 3 files changed, 92 insertions(+), 26 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index 71eee392e14..1341b99ca7a 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -22,6 +22,7 @@
 #include <asm/timer.h>
 #include <asm/cpudata.h>
 #include <asm/vdev.h>
+#include <asm/irq.h>
 
 /* Used to synchronize acceses to NatSemi SUPER I/O chip configure
  * operations in asm/ns87303.h
@@ -33,14 +34,28 @@ extern void central_probe(void);
 
 u32 sun4v_vdev_devhandle;
 int sun4v_vdev_root;
-struct linux_prom_pci_intmap *sun4v_vdev_intmap;
-int sun4v_vdev_num_intmap;
-struct linux_prom_pci_intmap sun4v_vdev_intmask;
+
+struct vdev_intmap {
+	unsigned int phys;
+	unsigned int irq;
+	unsigned int cnode;
+	unsigned int cinterrupt;
+};
+
+struct vdev_intmask {
+	unsigned int phys;
+	unsigned int interrupt;
+	unsigned int __unused;
+};
+
+static struct vdev_intmap *vdev_intmap;
+static int vdev_num_intmap;
+static struct vdev_intmask vdev_intmask;
 
 static void __init sun4v_virtual_device_probe(void)
 {
 	struct linux_prom64_registers regs;
-	struct linux_prom_pci_intmap *ip;
+	struct vdev_intmap *ip;
 	int node, sz, err;
 
 	if (tlb_type != hypervisor)
@@ -58,10 +73,21 @@ static void __init sun4v_virtual_device_probe(void)
 	prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
 	sun4v_vdev_devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
 
-	sz = sizeof(*ip) * 64;
-	sun4v_vdev_intmap = ip = alloc_bootmem_low_pages(sz);
-	if (!sun4v_vdev_intmap) {
-		prom_printf("SUN4V: Error, cannot allocate vdev intmap.\n");
+	sz = prom_getproplen(node, "interrupt-map");
+	if (sz <= 0) {
+		prom_printf("SUN4V: Error, no vdev interrupt-map.\n");
+		prom_halt();
+	}
+
+	if ((sz % sizeof(*ip)) != 0) {
+		prom_printf("SUN4V: Bogus interrupt-map property size %d\n",
+			    sz);
+		prom_halt();
+	}
+
+	vdev_intmap = ip = alloc_bootmem_low_pages(sz);
+	if (!vdev_intmap) {
+		prom_printf("SUN4V: Error, cannot allocate vdev_intmap.\n");
 		prom_halt();
 	}
 
@@ -70,22 +96,70 @@ static void __init sun4v_virtual_device_probe(void)
 		prom_printf("SUN4V: Fatal error, no vdev interrupt-map.\n");
 		prom_halt();
 	}
+	if (err != sz) {
+		prom_printf("SUN4V: Inconsistent interrupt-map size, "
+			    "proplen(%d) vs getprop(%d).\n", sz,err);
+		prom_halt();
+	}
 
-	sun4v_vdev_num_intmap = err / sizeof(*ip);
+	vdev_num_intmap = err / sizeof(*ip);
 
 	err = prom_getproperty(node, "interrupt-map-mask",
-			       (char *) &sun4v_vdev_intmask,
-			       sizeof(sun4v_vdev_intmask));
-	if (err == -1) {
+			       (char *) &vdev_intmask,
+			       sizeof(vdev_intmask));
+	if (err <= 0) {
 		prom_printf("SUN4V: Fatal error, no vdev "
 			    "interrupt-map-mask.\n");
 		prom_halt();
 	}
+	if (err % sizeof(vdev_intmask)) {
+		prom_printf("SUN4V: Bogus interrupt-map-mask "
+			    "property size %d\n", err);
+		prom_halt();
+	}
 
 	printk("SUN4V: virtual-devices devhandle[%x]\n",
 	       sun4v_vdev_devhandle);
 }
 
+unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node)
+{
+	unsigned int irq, reg;
+	int err, i;
+
+	err = prom_getproperty(dev_node, "interrupts",
+			       (char *) &irq, sizeof(irq));
+	if (err <= 0) {
+		printk("VDEV: Cannot get \"interrupts\" "
+		       "property for OBP node %x\n", dev_node);
+		return 0;
+	}
+
+	err = prom_getproperty(dev_node, "reg",
+			       (char *) &reg, sizeof(reg));
+	if (err <= 0) {
+		printk("VDEV: Cannot get \"reg\" "
+		       "property for OBP node %x\n", dev_node);
+		return 0;
+	}
+
+	for (i = 0; i < vdev_num_intmap; i++) {
+		if (vdev_intmap[i].phys == (reg & vdev_intmask.phys) &&
+		    vdev_intmap[i].irq == (irq & vdev_intmask.interrupt)) {
+			irq = vdev_intmap[i].cinterrupt;
+			break;
+		}
+	}
+
+	if (i == vdev_num_intmap) {
+		printk("VDEV: No matching interrupt map entry "
+		       "for OBP node %x\n", dev_node);
+		return 0;
+	}
+
+	return sun4v_build_irq(sun4v_vdev_devhandle, irq, 4, 0);
+}
+
 static const char *cpu_mid_prop(void)
 {
 	if (tlb_type == spitfire)
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index 71c70d7a998..cc46206a106 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -21,6 +21,7 @@
 #include <asm/hypervisor.h>
 #include <asm/spitfire.h>
 #include <asm/vdev.h>
+#include <asm/oplib.h>
 #include <asm/irq.h>
 
 #if defined(CONFIG_MAGIC_SYSRQ)
@@ -427,7 +428,6 @@ static unsigned int __init get_interrupt(void)
 	const char *cons_str = "console";
 	const char *compat_str = "compatible";
 	int node = prom_getchild(sun4v_vdev_root);
-	unsigned int irq;
 	char buf[64];
 	int err, len;
 
@@ -449,12 +449,7 @@ static unsigned int __init get_interrupt(void)
 	/* Ok, the this is the OBP node for the sun4v hypervisor
 	 * console device.  Decode the interrupt.
 	 */
-	err = prom_getproperty(node, "interrupts",
-			       (char *) &irq, sizeof(irq));
-	if (err == -1)
-		return 0;
-
-	return sun4v_build_irq(sun4v_vdev_devhandle, irq, 4, 0);
+	return sun4v_vdev_device_interrupt(node);
 }
 
 static u32 sunhv_irq;
@@ -487,8 +482,8 @@ static int __init sunhv_init(void)
 		return -ENODEV;
 	}
 
-	printk("SUNHV: SUN4V virtual console, IRQ[%08x]\n",
-	       sunhv_irq);
+	printk("SUNHV: SUN4V virtual console, IRQ %s\n",
+	       __irq_itoa(sunhv_irq));
 
 	sunhv_reg.minor = sunserial_current_minor;
 	sunhv_reg.nr = 1;
@@ -520,7 +515,6 @@ static void __exit sunhv_exit(void)
 
 	uart_remove_one_port(&sunhv_reg, port);
 	free_irq(sunhv_irq, port);
-
 	sunserial_current_minor -= 1;
 
 	uart_unregister_driver(&sunhv_reg);
diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h
index ebaac41a6e1..996e6be7b97 100644
--- a/include/asm-sparc64/vdev.h
+++ b/include/asm-sparc64/vdev.h
@@ -7,12 +7,10 @@
 #define _SPARC64_VDEV_H
 
 #include <linux/types.h>
-#include <asm/oplib.h>
 
 extern u32 sun4v_vdev_devhandle;
 extern int sun4v_vdev_root;
-extern struct linux_prom_pci_intmap *sun4v_vdev_intmap;
-extern int sun4v_vdev_num_intmap;
-extern struct linux_prom_pci_intmap sun4v_vdev_intmask;
+
+extern unsigned int sun4v_vdev_device_interrupt(unsigned int);
 
 #endif /* !(_SPARC64_VDEV_H) */
-- 
cgit v1.2.3-70-g09d2


From bc45e32e0fbf661d0c5c5b9c981bc0fe5da4901f Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sun, 5 Mar 2006 16:46:58 -0800
Subject: [SPARC]: Kill off these __put_user_ret things.

They are bogus and haven't been referenced in years.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc/uaccess.h   | 47 -------------------------------------------
 include/asm-sparc64/uaccess.h | 37 ----------------------------------
 2 files changed, 84 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h
index f8f1ec1f06e..3cf132e1aa2 100644
--- a/include/asm-sparc/uaccess.h
+++ b/include/asm-sparc/uaccess.h
@@ -120,17 +120,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
 default: __pu_ret = __put_user_bad(); break; \
 } } else { __pu_ret = -EFAULT; } __pu_ret; })
 
-#define __put_user_check_ret(x,addr,size,retval) ({ \
-register int __foo __asm__ ("l1"); \
-if (__access_ok(addr,size)) { \
-switch (size) { \
-case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \
-case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \
-case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \
-case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \
-default: if (__put_user_bad()) return retval; break; \
-} } else return retval; })
-
 #define __put_user_nocheck(x,addr,size) ({ \
 register int __pu_ret; \
 switch (size) { \
@@ -141,16 +130,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
 default: __pu_ret = __put_user_bad(); break; \
 } __pu_ret; })
 
-#define __put_user_nocheck_ret(x,addr,size,retval) ({ \
-register int __foo __asm__ ("l1"); \
-switch (size) { \
-case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \
-case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \
-case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \
-case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \
-default: if (__put_user_bad()) return retval; break; \
-} })
-
 #define __put_user_asm(x,size,addr,ret)					\
 __asm__ __volatile__(							\
 	"/* Put user asm, inline. */\n"					\
@@ -170,32 +149,6 @@ __asm__ __volatile__(							\
        : "=&r" (ret) : "r" (x), "m" (*__m(addr)),			\
 	 "i" (-EFAULT))
 
-#define __put_user_asm_ret(x,size,addr,ret,foo)				\
-if (__builtin_constant_p(ret) && ret == -EFAULT)			\
-__asm__ __volatile__(							\
-	"/* Put user asm ret, inline. */\n"				\
-"1:\t"	"st"#size " %1, %2\n\n\t"					\
-	".section __ex_table,#alloc\n\t"				\
-	".align	4\n\t"							\
-	".word	1b, __ret_efault\n\n\t"					\
-	".previous\n\n\t"						\
-       : "=r" (foo) : "r" (x), "m" (*__m(addr)));			\
-else									\
-__asm__ __volatile(							\
-	"/* Put user asm ret, inline. */\n"				\
-"1:\t"	"st"#size " %1, %2\n\n\t"					\
-	".section .fixup,#alloc,#execinstr\n\t"				\
-	".align	4\n"							\
-"3:\n\t"								\
-	"ret\n\t"							\
-	" restore %%g0, %3, %%o0\n\t"					\
-	".previous\n\n\t"						\
-	".section __ex_table,#alloc\n\t"				\
-	".align	4\n\t"							\
-	".word	1b, 3b\n\n\t"						\
-	".previous\n\n\t"						\
-       : "=r" (foo) : "r" (x), "m" (*__m(addr)), "i" (ret))
-
 extern int __put_user_bad(void);
 
 #define __get_user_check(x,addr,size,type) ({ \
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index c91d1e38eac..0c375a989be 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -114,16 +114,6 @@ case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
 default: __pu_ret = __put_user_bad(); break; \
 } __pu_ret; })
 
-#define __put_user_nocheck_ret(data,addr,size,retval) ({ \
-register int __foo __asm__ ("l1"); \
-switch (size) { \
-case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \
-case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \
-case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \
-case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \
-default: if (__put_user_bad()) return retval; break; \
-} })
-
 #define __put_user_asm(x,size,addr,ret)					\
 __asm__ __volatile__(							\
 	"/* Put user asm, inline. */\n"					\
@@ -143,33 +133,6 @@ __asm__ __volatile__(							\
        : "=r" (ret) : "r" (x), "r" (__m(addr)),				\
 	 "i" (-EFAULT))
 
-#define __put_user_asm_ret(x,size,addr,ret,foo)				\
-if (__builtin_constant_p(ret) && ret == -EFAULT)			\
-__asm__ __volatile__(							\
-	"/* Put user asm ret, inline. */\n"				\
-"1:\t"	"st"#size "a %1, [%2] %%asi\n\n\t"				\
-	".section __ex_table,\"a\"\n\t"					\
-	".align	4\n\t"							\
-	".word	1b, __ret_efault\n\n\t"					\
-	".previous\n\n\t"						\
-       : "=r" (foo) : "r" (x), "r" (__m(addr)));			\
-else									\
-__asm__ __volatile__(							\
-	"/* Put user asm ret, inline. */\n"				\
-"1:\t"	"st"#size "a %1, [%2] %%asi\n\n\t"				\
-	".section .fixup,#alloc,#execinstr\n\t"				\
-	".align	4\n"							\
-"3:\n\t"								\
-	"ret\n\t"							\
-	" restore %%g0, %3, %%o0\n\n\t"					\
-	".previous\n\t"							\
-	".section __ex_table,\"a\"\n\t"					\
-	".align	4\n\t"							\
-	".word	1b, 3b\n\n\t"						\
-	".previous\n\n\t"						\
-       : "=r" (foo) : "r" (x), "r" (__m(addr)),				\
-         "i" (ret))
-
 extern int __put_user_bad(void);
 
 #define __get_user_nocheck(data,addr,size,type) ({ \
-- 
cgit v1.2.3-70-g09d2


From 94f8762db9a80ed34252e9fe5fa38be87bb7826b Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 16 Feb 2006 14:26:53 -0800
Subject: [SPARC64]: Add sun4v_cpu_qconf() hypervisor call.

Call it from register_one_mondo().

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S      | 12 ++++++++++++
 arch/sparc64/kernel/irq.c        | 26 +++++++-------------------
 include/asm-sparc64/hypervisor.h |  6 ++++++
 3 files changed, 25 insertions(+), 19 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index bf40b065bcc..f5c8a293979 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1772,3 +1772,15 @@ sun4v_intr_settarget:
 	retl
 	 nop
 
+	/* %o0:	type
+	 * %o1:	queue paddr
+	 * %o2:	num queue entries
+	 *
+	 * returns %o0:	status
+	 */
+	.globl	sun4v_cpu_qconf
+sun4v_cpu_qconf:
+	mov	HV_FAST_CPU_QCONF, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 4d9931d124a..e1729e5189a 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -939,25 +939,13 @@ void init_irqwork_curcpu(void)
 
 static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
 {
-	register unsigned long func __asm__("%o5");
-	register unsigned long arg0 __asm__("%o0");
-	register unsigned long arg1 __asm__("%o1");
-	register unsigned long arg2 __asm__("%o2");
-
-	func = HV_FAST_CPU_QCONF;
-	arg0 = type;
-	arg1 = paddr;
-	arg2 = 128; /* XXX Implied by Niagara queue offsets. XXX */
-	__asm__ __volatile__("ta	%8"
-			     : "=&r" (func), "=&r" (arg0),
-			       "=&r" (arg1), "=&r" (arg2)
-			     : "0" (func), "1" (arg0),
-			       "2" (arg1), "3" (arg2),
-			       "i" (HV_FAST_TRAP));
-
-	if (arg0 != HV_EOK) {
-		prom_printf("SUN4V: cpu_qconf(%lu) failed with error %lu\n",
-			    type, func);
+	unsigned long num_entries = 128;
+	unsigned long status;
+
+	status = sun4v_cpu_qconf(type, paddr, num_entries);
+	if (status != HV_EOK) {
+		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
+			    "err %lu\n", type, paddr, num_entries, status);
 		prom_halt();
 	}
 }
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index f14992ab7fe..cd5fbcd9556 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -301,6 +301,12 @@
 #define  HV_CPU_QUEUE_RES_ERROR		 0x3e
 #define  HV_CPU_QUEUE_NONRES_ERROR	 0x3f
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_cpu_qconf(unsigned long type,
+				     unsigned long queue_paddr,
+				     unsigned long num_queue_entries);
+#endif
+
 /* cpu_qinfo()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_CPU_QINFO
-- 
cgit v1.2.3-70-g09d2


From 4ff7ac417d4b628c23df3ae8301d17e29e6e8f16 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 16 Feb 2006 16:22:26 -0800
Subject: [SPARC64]: Add GET_GL_GLOBAL() macro for SUN4V.

So we can read the %gl register for debugging.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/head.h | 4 ++++
 1 file changed, 4 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index c4ac3e87aa5..67960a751f4 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -8,6 +8,10 @@
 #define SET_GL(val)	\
 	.word	0xa1902000 | val
 
+	/* rdpr %gl, %gN */
+#define GET_GL_GLOBAL(N)	\
+	.word	0x81540000 | (N << 25)
+
 #define KERNBASE	0x400000
 
 #define	PTREGS_OFF	(STACK_BIAS + STACKFRAME_SZ)
-- 
cgit v1.2.3-70-g09d2


From 72aff53f1fe74153eccef303ab2f79de888d248c Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Feb 2006 01:29:17 -0800
Subject: [SPARC64]: Get SUN4V SMP working.

The sibling cpu bringup is extremely fragile.  We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.

This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.

In order to achieve this:

1) Change sun4v_init_mondo_queues() so that it can operate in
   several modes.

   It can allocate the queues, or install them in the current
   processor, or both.

   The boot cpu does both in it's call early on.

   Later, the boot cpu allocates the sibling cpu queue, starts
   the sibling cpu, then the sibling cpu loads them in.

2) init_cur_cpu_trap() is changed to take the current_thread_info()
   as an argument instead of reading %g6 directly on the current
   cpu.

3) Create a trampoline stack for the sibling cpus.  We do our basic
   kernel calls using this stack, which is locked into the kernel
   image, then go to our proper thread stack after taking over the
   trap table.

4) While we are in this delicate startup state, we put 0xdeadbeef
   into %g4/%g5/%g6 in order to catch accidental accesses.

5) On the final prom_set_trap_table*() call, we put &init_thread_union
   into %g6.  This is a hack to make prom_world(0) work.  All that
   wants to do is restore the %asi register using
   get_thread_current_ds().

Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else.  This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/irq.c        | 30 ++++++++-----
 arch/sparc64/kernel/setup.c      |  2 +-
 arch/sparc64/kernel/smp.c        |  6 +++
 arch/sparc64/kernel/trampoline.S | 92 ++++++++++++++++++++++++----------------
 arch/sparc64/kernel/traps.c      |  4 +-
 include/asm-sparc64/cpudata.h    |  2 +-
 6 files changed, 85 insertions(+), 51 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 6eb44ca5dba..bb0bb34555d 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -1018,21 +1018,29 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_
 }
 
 /* Allocate and register the mondo and error queues for this cpu.  */
-void __cpuinit sun4v_init_mondo_queues(int use_bootmem)
+void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
 {
-	int cpu = hard_smp_processor_id();
 	struct trap_per_cpu *tb = &trap_block[cpu];
 
-	alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
-	alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
-	alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
-	alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
-	alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
-	alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
+	if (alloc) {
+		alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
+		alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
+		alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
+		alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
+		alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
+		alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
 
-	init_cpu_send_mondo_info(tb, use_bootmem);
+		init_cpu_send_mondo_info(tb, use_bootmem);
+	}
 
-	sun4v_register_mondo_queues(cpu);
+	if (load) {
+		if (cpu != hard_smp_processor_id()) {
+			prom_printf("SUN4V: init mondo on cpu %d not %d\n",
+				    cpu, hard_smp_processor_id());
+			prom_halt();
+		}
+		sun4v_register_mondo_queues(cpu);
+	}
 }
 
 /* Only invoked on boot processor. */
@@ -1043,7 +1051,7 @@ void __init init_IRQ(void)
 	memset(&ivector_table[0], 0, sizeof(ivector_table));
 
 	if (tlb_type == hypervisor)
-		sun4v_init_mondo_queues(1);
+		sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
 
 	/* We need to clear any IRQ's pending in the soft interrupt
 	 * registers, a spurious one could be left around from the
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 06807cf95ee..9b0c409d5b6 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -384,7 +384,7 @@ void __init setup_arch(char **cmdline_p)
 	paging_init();
 
 	/* Get boot processor trap_block[] setup.  */
-	init_cur_cpu_trap();
+	init_cur_cpu_trap(current_thread_info());
 }
 
 static int __init set_preferred_console(void)
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 527dfd7ae21..b586345fe3b 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -316,6 +316,8 @@ static void smp_synchronize_one_tick(int cpu)
 	spin_unlock_irqrestore(&itc_sync_lock, flags);
 }
 
+extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
+
 extern unsigned long sparc64_cpu_startup;
 
 /* The OBP cpu startup callback truncates the 3rd arg cookie to
@@ -339,6 +341,9 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
 	cpu_set(cpu, cpu_callout_map);
 
 	if (tlb_type == hypervisor) {
+		/* Alloc the mondo queues, cpu will load them.  */
+		sun4v_init_mondo_queues(0, cpu, 1, 0);
+
 		prom_startcpu_cpuid(cpu, entry, cookie);
 	} else {
 		int cpu_node;
@@ -352,6 +357,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
 			break;
 		udelay(100);
 	}
+
 	if (callin_flag) {
 		ret = 0;
 	} else {
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index b9c9f54b0a0..a4dc01a3d23 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -30,12 +30,16 @@ itlb_load:
 dtlb_load:
 	.asciz	"SUNW,dtlb-load"
 
+	/* XXX __cpuinit this thing XXX */
+#define TRAMP_STACK_SIZE	1024
+	.align	16
+tramp_stack:
+	.skip	TRAMP_STACK_SIZE
+
 	.text
 	.align		8
 	.globl		sparc64_cpu_startup, sparc64_cpu_startup_end
 sparc64_cpu_startup:
-	flushw
-
 	BRANCH_IF_SUN4V(g1, niagara_startup)
 	BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
 	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
@@ -58,6 +62,7 @@ cheetah_startup:
 	or	%g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
 	stxa	%g5, [%g0] ASI_DCU_CONTROL_REG
 	membar	#Sync
+	/* fallthru */
 
 cheetah_generic_startup:
 	mov	TSB_EXTENSION_P, %g3
@@ -90,19 +95,17 @@ spitfire_startup:
 	membar		#Sync
 
 startup_continue:
-	wrpr		%g0, 15, %pil
-
 	sethi		%hi(0x80000000), %g2
 	sllx		%g2, 32, %g2
 	wr		%g2, 0, %tick_cmpr
 
+	mov		%o0, %l0
+
 	BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
 
 	/* Call OBP by hand to lock KERNBASE into i/d tlbs.
 	 * We lock 2 consequetive entries if we are 'bigkernel'.
 	 */
-	mov		%o0, %l0
-
 	sethi		%hi(prom_entry_lock), %g2
 1:	ldstub		[%g2 + %lo(prom_entry_lock)], %g1
 	membar		#StoreLoad | #StoreStore
@@ -112,7 +115,6 @@ startup_continue:
 	sethi		%hi(p1275buf), %g2
 	or		%g2, %lo(p1275buf), %g2
 	ldx		[%g2 + 0x10], %l2
-	mov		%sp, %l1
 	add		%l2, -(192 + 128), %sp
 	flushw
 
@@ -308,18 +310,9 @@ niagara_lock_tlb:
 	ta		HV_FAST_TRAP
 
 after_lock_tlb:
-	mov		%l1, %sp
-	flushw
-
-	mov		%l0, %o0
-
 	wrpr		%g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
 	wr		%g0, 0, %fprs
 
-	/* XXX Buggy PROM... */
-	srl		%o0, 0, %o0
-	ldx		[%o0], %g6
-
 	wr		%g0, ASI_P, %asi
 
 	mov		PRIMARY_CONTEXT, %g7
@@ -341,22 +334,25 @@ after_lock_tlb:
 
 	membar		#Sync
 
-	mov		1, %g5
-	sllx		%g5, THREAD_SHIFT, %g5
-	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
-	add		%g6, %g5, %sp
+	/* Everything we do here, until we properly take over the
+	 * trap table, must be done with extreme care.  We cannot
+	 * make any references to %g6 (current thread pointer),
+	 * %g4 (current task pointer), or %g5 (base of current cpu's
+	 * per-cpu area) until we properly take over the trap table
+	 * from the firmware and hypervisor.
+	 *
+	 * Get onto temporary stack which is in the locked kernel image.
+	 */
+	sethi		%hi(tramp_stack), %g1
+	or		%g1, %lo(tramp_stack), %g1
+	add		%g1, TRAMP_STACK_SIZE, %g1
+	sub		%g1, STACKFRAME_SZ + STACK_BIAS, %sp
 	mov		0, %fp
 
-	wrpr		%g0, 0, %wstate
-	wrpr		%g0, 0, %tl
-
-	/* Load TBA, then we can resurface. */
-	sethi		%hi(sparc64_ttable_tl0), %g5
-	wrpr		%g5, %tba
-
-	ldx		[%g6 + TI_TASK], %g4
-
-	wrpr		%g0, 0, %wstate
+	/* Put garbage in these registers to trap any access to them.  */
+	set		0xdeadbeef, %g4
+	set		0xdeadbeef, %g5
+	set		0xdeadbeef, %g6
 
 	call		init_irqwork_curcpu
 	 nop
@@ -367,11 +363,17 @@ after_lock_tlb:
 	bne,pt		%icc, 1f
 	 nop
 
+	call		hard_smp_processor_id
+	 nop
+	
+	mov		%o0, %o1
+	mov		0, %o0
+	mov		0, %o2
 	call		sun4v_init_mondo_queues
-	 mov		0, %o0
+	 mov		1, %o3
 
 1:	call		init_cur_cpu_trap
-	 nop
+	 ldx		[%l0], %o0
 
 	/* Start using proper page size encodings in ctx register.  */
 	sethi		%hi(sparc64_kern_pri_context), %g3
@@ -386,9 +388,14 @@ after_lock_tlb:
 
 	membar		#Sync
 
-	rdpr		%pstate, %o1
-	or		%o1, PSTATE_IE, %o1
-	wrpr		%o1, 0, %pstate
+	wrpr		%g0, 0, %wstate
+
+	/* As a hack, put &init_thread_union into %g6.
+	 * prom_world() loads from here to restore the %asi
+	 * register.
+	 */
+	sethi		%hi(init_thread_union), %g6
+	or		%g6, %lo(init_thread_union), %g6
 
 	sethi		%hi(is_sun4v), %o0
 	lduw		[%o0 + %lo(is_sun4v)], %o0
@@ -418,7 +425,20 @@ after_lock_tlb:
 1:	call		prom_set_trap_table
 	 sethi		%hi(sparc64_ttable_tl0), %o0
 
-2:	call		smp_callin
+2:	ldx		[%l0], %g6
+	ldx		[%g6 + TI_TASK], %g4
+
+	mov		1, %g5
+	sllx		%g5, THREAD_SHIFT, %g5
+	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+	add		%g6, %g5, %sp
+	mov		0, %fp
+
+	rdpr		%pstate, %o1
+	or		%o1, PSTATE_IE, %o1
+	wrpr		%o1, 0, %pstate
+
+	call		smp_callin
 	 nop
 	call		cpu_idle
 	 mov		0, %o0
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 5956d0a9400..c9484ae5bb8 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2413,12 +2413,12 @@ struct trap_per_cpu trap_block[NR_CPUS];
 /* This can get invoked before sched_init() so play it super safe
  * and use hard_smp_processor_id().
  */
-void init_cur_cpu_trap(void)
+void init_cur_cpu_trap(struct thread_info *t)
 {
 	int cpu = hard_smp_processor_id();
 	struct trap_per_cpu *p = &trap_block[cpu];
 
-	p->thread = current_thread_info();
+	p->thread = t;
 	p->pgd_paddr = 0;
 }
 
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 5a970f5ed9b..771aa94dfd9 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -77,7 +77,7 @@ struct trap_per_cpu {
 	unsigned long		__pad2[4];
 } __attribute__((aligned(64)));
 extern struct trap_per_cpu trap_block[NR_CPUS];
-extern void init_cur_cpu_trap(void);
+extern void init_cur_cpu_trap(struct thread_info *);
 extern void setup_tba(void);
 
 #ifdef CONFIG_SMP
-- 
cgit v1.2.3-70-g09d2


From ebd8c56c5ae154e2c6cfb7453a76a4e7265b2377 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Feb 2006 08:38:06 -0800
Subject: [SPARC64]: Fix uniprocessor IRQ targetting on SUN4V.

We need to use the real hardware processor ID when
targetting interrupts, not the "define to 0" thing
the uniprocessor build gives us.

Also, fill in the Node-ID and Agent-ID fields properly
on sun4u/Safari.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S   |  4 +-
 arch/sparc64/kernel/irq.c     | 98 +++++++++++++++++++++----------------------
 arch/sparc64/kernel/setup.c   |  6 +--
 include/asm-sparc64/cpudata.h |  6 +--
 include/asm-sparc64/irq.h     |  3 ++
 5 files changed, 58 insertions(+), 59 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index f5c8a293979..bd332e41532 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1692,10 +1692,12 @@ __flushw_user:
 #ifdef CONFIG_SMP
 	.globl		hard_smp_processor_id
 hard_smp_processor_id:
+#endif
+	.globl		real_hard_smp_processor_id
+real_hard_smp_processor_id:
 	__GET_CPUID(%o0)
 	retl
 	 nop
-#endif
 
 	/* %o0: devhandle
 	 * %o1:	devino
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index bb0bb34555d..712b16cdd5f 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -138,11 +138,48 @@ out_unlock:
 	return 0;
 }
 
+extern unsigned long real_hard_smp_processor_id(void);
+
+static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
+{
+	unsigned int tid;
+
+	if (this_is_starfire) {
+		tid = starfire_translate(imap, cpuid);
+		tid <<= IMAP_TID_SHIFT;
+		tid &= IMAP_TID_UPA;
+	} else {
+		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+			unsigned long ver;
+
+			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+			if ((ver >> 32UL) == __JALAPENO_ID ||
+			    (ver >> 32UL) == __SERRANO_ID) {
+				tid = cpuid << IMAP_TID_SHIFT;
+				tid &= IMAP_TID_JBUS;
+			} else {
+				unsigned int a = cpuid & 0x1f;
+				unsigned int n = (cpuid >> 5) & 0x1f;
+
+				tid = ((a << IMAP_AID_SHIFT) |
+				       (n << IMAP_NID_SHIFT));
+				tid &= (IMAP_AID_SAFARI |
+					IMAP_NID_SAFARI);;
+			}
+		} else {
+			tid = cpuid << IMAP_TID_SHIFT;
+			tid &= IMAP_TID_UPA;
+		}
+	}
+
+	return tid;
+}
+
 /* Now these are always passed a true fully specified sun4u INO. */
 void enable_irq(unsigned int irq)
 {
 	struct ino_bucket *bucket = __bucket(irq);
-	unsigned long imap;
+	unsigned long imap, cpuid;
 
 	imap = bucket->imap;
 	if (imap == 0UL)
@@ -150,54 +187,25 @@ void enable_irq(unsigned int irq)
 
 	preempt_disable();
 
+	/* This gets the physical processor ID, even on uniprocessor,
+	 * so we can always program the interrupt target correctly.
+	 */
+	cpuid = real_hard_smp_processor_id();
+
 	if (tlb_type == hypervisor) {
 		unsigned int ino = __irq_ino(irq);
-		int cpu = hard_smp_processor_id();
 		int err;
 
-		err = sun4v_intr_settarget(ino, cpu);
+		err = sun4v_intr_settarget(ino, cpuid);
 		if (err != HV_EOK)
-			printk("sun4v_intr_settarget(%x,%d): err(%d)\n",
-			       ino, cpu, err);
+			printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
+			       ino, cpuid, err);
 		err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
 		if (err != HV_EOK)
 			printk("sun4v_intr_setenabled(%x): err(%d)\n",
 			       ino, err);
 	} else {
-		unsigned long tid;
-
-		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-			unsigned long ver;
-
-			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
-			if ((ver >> 32) == __JALAPENO_ID ||
-			    (ver >> 32) == __SERRANO_ID) {
-				/* We set it to our JBUS ID. */
-				__asm__ __volatile__("ldxa [%%g0] %1, %0"
-						     : "=r" (tid)
-						     : "i" (ASI_JBUS_CONFIG));
-				tid = ((tid & (0x1fUL<<17)) << 9);
-				tid &= IMAP_TID_JBUS;
-			} else {
-				/* We set it to our Safari AID. */
-				__asm__ __volatile__("ldxa [%%g0] %1, %0"
-						     : "=r" (tid)
-						     : "i"(ASI_SAFARI_CONFIG));
-				tid = ((tid & (0x3ffUL<<17)) << 9);
-				tid &= IMAP_AID_SAFARI;
-			}
-		} else if (this_is_starfire == 0) {
-			/* We set it to our UPA MID. */
-			__asm__ __volatile__("ldxa [%%g0] %1, %0"
-					     : "=r" (tid)
-					     : "i" (ASI_UPA_CONFIG));
-			tid = ((tid & UPA_CONFIG_MID) << 9);
-			tid &= IMAP_TID_UPA;
-		} else {
-			tid = (starfire_translate(imap,
-						  smp_processor_id()) << 26);
-			tid &= IMAP_TID_UPA;
-		}
+		unsigned int tid = sun4u_compute_tid(imap, cpuid);
 
 		/* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
 		 * of this SYSIO's preconfigured IGN in the SYSIO Control
@@ -817,18 +825,8 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
 		sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
 	} else {
 		unsigned long imap = bucket->imap;
-		unsigned int tid;
+		unsigned int tid = sun4u_compute_tid(imap, goal_cpu);
 
-		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-			tid = goal_cpu << 26;
-			tid &= IMAP_AID_SAFARI;
-		} else if (this_is_starfire == 0) {
-			tid = goal_cpu << 26;
-			tid &= IMAP_TID_UPA;
-		} else {
-			tid = (starfire_translate(imap, goal_cpu) << 26);
-			tid &= IMAP_TID_UPA;
-		}
 		upa_writel(tid | IMAP_VALID, imap);
 	}
 
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 9b0c409d5b6..77066f1bbe2 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -222,7 +222,6 @@ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
 
 static void __init per_cpu_patch(void)
 {
-#ifdef CONFIG_SMP
 	struct cpuid_patch_entry *p;
 	unsigned long ver;
 	int is_jbus;
@@ -233,8 +232,8 @@ static void __init per_cpu_patch(void)
 	is_jbus = 0;
 	if (tlb_type != hypervisor) {
 		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
-		is_jbus = ((ver >> 32) == __JALAPENO_ID ||
-			   (ver >> 32) == __SERRANO_ID);
+		is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
+			   (ver >> 32UL) == __SERRANO_ID);
 	}
 
 	p = &__cpuid_patch;
@@ -279,7 +278,6 @@ static void __init per_cpu_patch(void)
 
 		p++;
 	}
-#endif
 }
 
 static void __init sun4v_patch(void)
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 771aa94dfd9..84656f1895c 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -80,7 +80,6 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
 extern void init_cur_cpu_trap(struct thread_info *);
 extern void setup_tba(void);
 
-#ifdef CONFIG_SMP
 struct cpuid_patch_entry {
 	unsigned int	addr;
 	unsigned int	cheetah_safari[4];
@@ -89,7 +88,6 @@ struct cpuid_patch_entry {
 	unsigned int	sun4v[4];
 };
 extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
-#endif
 
 struct sun4v_1insn_patch_entry {
 	unsigned int	addr;
@@ -123,8 +121,6 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 
 #include <asm/scratchpad.h>
 
-#ifdef CONFIG_SMP
-
 #define __GET_CPUID(REG)				\
 	/* Spitfire implementation (default). */	\
 661:	ldxa		[%g0] ASI_UPA_CONFIG, REG;	\
@@ -156,6 +152,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 	nop;						\
 	.previous;
 
+#ifdef CONFIG_SMP
+
 #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
 	__GET_CPUID(TMP)			\
 	sethi	%hi(trap_block), DEST;		\
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 529a9df1ad4..de33d6e1afb 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -72,8 +72,11 @@ struct ino_bucket {
 #define IMAP_VALID		0x80000000	/* IRQ Enabled		*/
 #define IMAP_TID_UPA		0x7c000000	/* UPA TargetID		*/
 #define IMAP_TID_JBUS		0x7c000000	/* JBUS TargetID	*/
+#define IMAP_TID_SHIFT		26
 #define IMAP_AID_SAFARI		0x7c000000	/* Safari AgentID	*/
+#define IMAP_AID_SHIFT		26
 #define IMAP_NID_SAFARI		0x03e00000	/* Safari NodeID	*/
+#define IMAP_NID_SHIFT		21
 #define IMAP_IGN		0x000007c0	/* IRQ Group Number	*/
 #define IMAP_INO		0x0000003f	/* IRQ Number		*/
 #define IMAP_INR		0x000007ff	/* Full interrupt number*/
-- 
cgit v1.2.3-70-g09d2


From 97532f598273d03cab8bb5206669b6fdd654eb63 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Feb 2006 10:14:38 -0800
Subject: [SPARC64]: Add HWCAP_SPARC_BLKINIT elf capability flag for Niagara.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/elf.h | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 69539a8ab83..303d85e2f82 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -10,6 +10,7 @@
 #ifdef __KERNEL__
 #include <asm/processor.h>
 #include <asm/uaccess.h>
+#include <asm/spitfire.h>
 #endif
 
 /*
@@ -68,6 +69,7 @@
 #define HWCAP_SPARC_MULDIV      8
 #define HWCAP_SPARC_V9		16
 #define HWCAP_SPARC_ULTRA3	32
+#define HWCAP_SPARC_BLKINIT	64
 
 /*
  * These are used to set parameters in the core dumps.
@@ -145,11 +147,21 @@ typedef struct {
    instruction set this cpu supports.  */
 
 /* On Ultra, we support all of the v8 capabilities. */
-#define ELF_HWCAP	((HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
-			  HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | \
-			  HWCAP_SPARC_V9) | \
-			 ((tlb_type == cheetah || tlb_type == cheetah_plus) ? \
-			  HWCAP_SPARC_ULTRA3 : 0))
+static inline unsigned int sparc64_elf_hwcap(void)
+{
+	unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
+			    HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
+			    HWCAP_SPARC_V9);
+
+	if (tlb_type == cheetah || tlb_type == cheetah_plus)
+		cap |= HWCAP_SPARC_ULTRA3;
+	else if (tlb_type == hypervisor)
+		cap |= HWCAP_SPARC_BLKINIT;
+
+	return cap;
+}
+
+#define ELF_HWCAP	sparc64_elf_hwcap();
 
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
-- 
cgit v1.2.3-70-g09d2


From c857e3fdbc306e95fdcaad1d8f3ea6bc8e7eea99 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Feb 2006 10:35:23 -0800
Subject: [SPARC64]: __bzero_noasi --> __clear_user

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/sparc64_ksyms.c |  2 +-
 arch/sparc64/lib/bzero.S            | 18 +++++++++---------
 include/asm-sparc64/uaccess.h       |  9 +--------
 3 files changed, 11 insertions(+), 18 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index f1f01378d07..801fc0ce484 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -335,7 +335,7 @@ EXPORT_SYMBOL(copy_to_user_fixup);
 EXPORT_SYMBOL(copy_from_user_fixup);
 EXPORT_SYMBOL(copy_in_user_fixup);
 EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__bzero_noasi);
+EXPORT_SYMBOL(__clear_user);
 
 /* Various address conversion macros use this. */
 EXPORT_SYMBOL(phys_base);
diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S
index 1d2abcfa4e5..c7bbae8c590 100644
--- a/arch/sparc64/lib/bzero.S
+++ b/arch/sparc64/lib/bzero.S
@@ -98,12 +98,12 @@ __bzero_done:
 	.text;			\
 	.align 4;
 
-	.globl	__bzero_noasi
-	.type	__bzero_noasi, #function
-__bzero_noasi:		/* %o0=buf, %o1=len */
-	brz,pn		%o1, __bzero_noasi_done
+	.globl	__clear_user
+	.type	__clear_user, #function
+__clear_user:		/* %o0=buf, %o1=len */
+	brz,pn		%o1, __clear_user_done
 	 cmp		%o1, 16
-	bl,pn		%icc, __bzero_noasi_tiny
+	bl,pn		%icc, __clear_user_tiny
 	 EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes)
 	andcc		%o0, 0x3, %g0
 	be,pt		%icc, 2f
@@ -145,14 +145,14 @@ __bzero_noasi:		/* %o0=buf, %o1=len */
 	subcc		%g1, 8, %g1
 	bne,pt		%icc, 5b
 	 add		%o0, 0x8, %o0
-6:	brz,pt		%o1, __bzero_noasi_done
+6:	brz,pt		%o1, __clear_user_done
 	 nop
-__bzero_noasi_tiny:
+__clear_user_tiny:
 1:	EX_ST(stba	%g0, [%o0 + 0x00] %asi)
 	subcc		%o1, 1, %o1
 	bne,pt		%icc, 1b
 	 add		%o0, 1, %o0
-__bzero_noasi_done:
+__clear_user_done:
 	retl
 	 clr		%o0
-	.size		__bzero_noasi, .-__bzero_noasi
+	.size		__clear_user, .-__clear_user
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index 0c375a989be..afe236ba555 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -252,14 +252,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
 }
 #define __copy_in_user copy_in_user
 
-extern unsigned long __must_check __bzero_noasi(void __user *, unsigned long);
-
-static inline unsigned long __must_check
-__clear_user(void __user *addr, unsigned long size)
-{
-	
-	return __bzero_noasi(addr, size);
-}
+extern unsigned long __must_check __clear_user(void __user *, unsigned long);
 
 #define clear_user __clear_user
 
-- 
cgit v1.2.3-70-g09d2


From 3763be32d591cacf808c36390a8af3f2784cde5f Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Feb 2006 12:33:13 -0800
Subject: [SPARC64]: Define ARCH_HAS_READ_CURRENT_TIMER.

This gives more consistent bogomips and delay() semantics,
especially on sun4v.  It gives weird looking values though...

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/time.c  |  8 +-------
 arch/sparc64/lib/delay.c    | 19 ++++++++-----------
 include/asm-sparc64/timex.h |  6 ++++++
 3 files changed, 15 insertions(+), 18 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 7041146f86f..f6275adbc81 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -1029,11 +1029,10 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg
 }
 
 struct freq_table {
-	unsigned long udelay_val_ref;
 	unsigned long clock_tick_ref;
 	unsigned int ref_freq;
 };
-static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 };
+static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 };
 
 unsigned long sparc64_get_clock_tick(unsigned int cpu)
 {
@@ -1055,16 +1054,11 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
 
 	if (!ft->ref_freq) {
 		ft->ref_freq = freq->old;
-		ft->udelay_val_ref = cpu_data(cpu).udelay_val;
 		ft->clock_tick_ref = cpu_data(cpu).clock_tick;
 	}
 	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
 	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
 	    (val == CPUFREQ_RESUMECHANGE)) {
-		cpu_data(cpu).udelay_val =
-			cpufreq_scale(ft->udelay_val_ref,
-				      ft->ref_freq,
-				      freq->new);
 		cpu_data(cpu).clock_tick =
 			cpufreq_scale(ft->clock_tick_ref,
 				      ft->ref_freq,
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c
index e8808727617..fb27e54a03e 100644
--- a/arch/sparc64/lib/delay.c
+++ b/arch/sparc64/lib/delay.c
@@ -1,6 +1,6 @@
 /* delay.c: Delay loops for sparc64
  *
- * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ * Copyright (C) 2004, 2006 David S. Miller <davem@davemloft.net>
  *
  * Based heavily upon x86 variant which is:
  *	Copyright (C) 1993 Linus Torvalds
@@ -8,19 +8,16 @@
  */
 
 #include <linux/delay.h>
+#include <asm/timer.h>
 
 void __delay(unsigned long loops)
 {
-	__asm__ __volatile__(
-"	b,pt	%%xcc, 1f\n"
-"	 cmp	%0, 0\n"
-"	.align	32\n"
-"1:\n"
-"	bne,pt	%%xcc, 1b\n"
-"	 subcc	%0, 1, %0\n"
-	: "=&r" (loops)
-	: "0" (loops)
-	: "cc");
+	unsigned long bclock, now;
+	
+	bclock = tick_ops->get_tick();
+	do {
+		now = tick_ops->get_tick();
+	} while ((now-bclock) < loops);
 }
 
 /* We used to multiply by HZ after shifting down by 32 bits
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h
index 9e8d4175bcb..2a5e4ebaad8 100644
--- a/include/asm-sparc64/timex.h
+++ b/include/asm-sparc64/timex.h
@@ -14,4 +14,10 @@
 typedef unsigned long cycles_t;
 #define get_cycles()	tick_ops->get_tick()
 
+#define ARCH_HAS_READ_CURRENT_TIMER	1
+#define read_current_timer(timer_val_p) 	\
+({	*timer_val_p = tick_ops->get_tick();	\
+	0;					\
+})
+
 #endif
-- 
cgit v1.2.3-70-g09d2


From 8b234274418d6d79527c4ac3a72da446ca4cb35f Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Feb 2006 18:01:02 -0800
Subject: [SPARC64]: More TLB/TSB handling fixes.

The SUN4V convention with non-shared TSBs is that the context
bit of the TAG is clear.  So we have to choose an "invalid"
bit and initialize new TSBs appropriately.  Otherwise a zero
TAG looks "valid".

Make sure, for the window fixup cases, that we use the right
global registers and that we don't potentially trample on
the live global registers in etrap/rtrap handling (%g2 and
%g6) and that we put the missing virtual address properly
in %g5.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/dtlb_miss.S      |  4 ++--
 arch/sparc64/kernel/itlb_miss.S      | 12 +++++------
 arch/sparc64/kernel/ktlb.S           | 14 ++++++++-----
 arch/sparc64/kernel/sun4v_tlb_miss.S | 39 +++++++++++++++++++-----------------
 arch/sparc64/kernel/tsb.S            | 17 ++++++++++------
 arch/sparc64/mm/init.c               |  4 +++-
 arch/sparc64/mm/tsb.c                | 25 +++++++++++------------
 include/asm-sparc64/tsb.h            |  8 ++++++++
 include/asm-sparc64/ttable.h         | 12 +++++------
 9 files changed, 78 insertions(+), 57 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
index 2ef6f6e6e72..09a6a15a710 100644
--- a/arch/sparc64/kernel/dtlb_miss.S
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -2,10 +2,10 @@
 	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR, %g1	! Get TSB 8K pointer
 	ldxa	[%g0] ASI_DMMU, %g6		! Get TAG TARGET
 	srlx	%g6, 48, %g5			! Get context
+	sllx	%g6, 22, %g6			! Zero out context
 	brz,pn	%g5, kvmap_dtlb			! Context 0 processing
-	 nop					! Delay slot (fill me)
+	 srlx	%g6, 22, %g6			! Delay slot
 	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
-	nop					! Push branch to next I$ line
 	cmp	%g4, %g6			! Compare TAG
 
 /* DTLB ** ICACHE line 2: TSB compare and TLB load	*/
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 730caa4a150..6dfe3968c37 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -2,25 +2,25 @@
 	ldxa	[%g0] ASI_IMMU_TSB_8KB_PTR, %g1	! Get TSB 8K pointer
 	ldxa	[%g0] ASI_IMMU, %g6		! Get TAG TARGET
 	srlx	%g6, 48, %g5			! Get context
+	sllx	%g6, 22, %g6			! Zero out context
 	brz,pn	%g5, kvmap_itlb			! Context 0 processing
-	 nop					! Delay slot (fill me)
+	 srlx	%g6, 22, %g6			! Delay slot
 	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
 	cmp	%g4, %g6			! Compare TAG
-	sethi	%hi(PAGE_EXEC), %g4		! Setup exec check
 
 /* ITLB ** ICACHE line 2: TSB compare and TLB load	*/
+	sethi	%hi(PAGE_EXEC), %g4		! Setup exec check
 	ldx	[%g4 + %lo(PAGE_EXEC)], %g4
 	bne,pn	%xcc, tsb_miss_itlb		! Miss
 	 mov	FAULT_CODE_ITLB, %g3
 	andcc	%g5, %g4, %g0			! Executable?
 	be,pn	%xcc, tsb_do_fault
 	 nop					! Delay slot, fill me
-	stxa	%g5, [%g0] ASI_ITLB_DATA_IN	! Load TLB
-	retry					! Trap done
+	nop
 
 /* ITLB ** ICACHE line 3: 				*/
-	nop
-	nop
+	stxa	%g5, [%g0] ASI_ITLB_DATA_IN	! Load TLB
+	retry					! Trap done
 	nop
 	nop
 	nop
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 47dfd45971e..ac29da915d0 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -52,8 +52,10 @@ kvmap_itlb_vmalloc_addr:
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, kvmap_itlb_longpath
-	 KTSB_STORE(%g1, %g0)
+	 KTSB_STORE(%g1, %g7)
 
 	KTSB_WRITE(%g1, %g5, %g6)
 
@@ -146,8 +148,10 @@ kvmap_dtlb_vmalloc_addr:
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, kvmap_dtlb_longpath
-	 KTSB_STORE(%g1, %g0)
+	 KTSB_STORE(%g1, %g7)
 
 	KTSB_WRITE(%g1, %g5, %g6)
 
@@ -215,8 +219,8 @@ kvmap_dtlb_longpath:
 	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
 	.section .sun4v_2insn_patch, "ax"
 	.word	661b
-	nop
-	nop
+	SET_GL(1)
+	ldxa		[%g0] ASI_SCRATCHPAD, %g5
 	.previous
 
 	rdpr	%tl, %g3
@@ -226,7 +230,7 @@ kvmap_dtlb_longpath:
 	ldxa	[%g4] ASI_DMMU, %g5
 	.section .sun4v_2insn_patch, "ax"
 	.word	661b
-	mov	%g4, %g5
+	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
 	nop
 	.previous
 
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index 244d50de849..57ccdaec7cc 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -16,15 +16,14 @@
 	ldx	[BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \
 	ldx	[BASE + HV_FAULT_D_CTX_OFFSET], CTX;
 
-	/* DEST = (CTX << 48) | (VADDR >> 22)
+	/* DEST = (VADDR >> 22)
 	 *
 	 * Branch to ZERO_CTX_LABEL is context is zero.
 	 */
-#define	COMPUTE_TAG_TARGET(DEST, VADDR, CTX, TMP, ZERO_CTX_LABEL) \
-	srlx	VADDR, 22, TMP; \
-	sllx	CTX, 48, DEST; \
+#define	COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \
+	srlx	VADDR, 22, DEST; \
 	brz,pn	CTX, ZERO_CTX_LABEL; \
-	 or	DEST, TMP, DEST;
+	 nop;
 
 	/* Create TSB pointer.  This is something like:
 	 *
@@ -53,7 +52,7 @@ sun4v_itlb_miss:
 	ldxa	[%g1] ASI_SCRATCHPAD, %g1
 
 	LOAD_ITLB_INFO(%g2, %g4, %g5)
-	COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_itlb_4v)
+	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
 	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
@@ -72,15 +71,15 @@ sun4v_itlb_miss:
 	 *
 	 * %g3:	PTE
 	 * %g4:	vaddr
-	 * %g6:	TAG TARGET (only "CTX << 48" part matters)
 	 */
 sun4v_itlb_load:
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
 	mov	%o0, %g1		! save %o0
 	mov	%o1, %g2		! save %o1
 	mov	%o2, %g5		! save %o2
 	mov	%o3, %g7		! save %o3
 	mov	%g4, %o0		! vaddr
-	srlx	%g6, 48, %o1		! ctx
+	ldx	[%g6 + HV_FAULT_I_CTX_OFFSET], %o1	! ctx
 	mov	%g3, %o2		! PTE
 	mov	HV_MMU_IMMU, %o3	! flags
 	ta	HV_MMU_MAP_ADDR_TRAP
@@ -101,7 +100,7 @@ sun4v_dtlb_miss:
 	ldxa	[%g1] ASI_SCRATCHPAD, %g1
 
 	LOAD_DTLB_INFO(%g2, %g4, %g5)
-	COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v)
+	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
 	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
@@ -115,15 +114,15 @@ sun4v_dtlb_miss:
 	 *
 	 * %g3:	PTE
 	 * %g4:	vaddr
-	 * %g6:	TAG TARGET (only "CTX << 48" part matters)
 	 */
 sun4v_dtlb_load:
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
 	mov	%o0, %g1		! save %o0
 	mov	%o1, %g2		! save %o1
 	mov	%o2, %g5		! save %o2
 	mov	%o3, %g7		! save %o3
 	mov	%g4, %o0		! vaddr
-	srlx	%g6, 48, %o1		! ctx
+	ldx	[%g6 + HV_FAULT_D_CTX_OFFSET], %o1	! ctx
 	mov	%g3, %o2		! PTE
 	mov	HV_MMU_DMMU, %o3	! flags
 	ta	HV_MMU_MAP_ADDR_TRAP
@@ -136,16 +135,18 @@ sun4v_dtlb_load:
 	retry
 
 sun4v_dtlb_prot:
+	SET_GL(1)
+
 	/* Load MMU Miss base into %g2.  */
-	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldxa	[%g0] ASI_SCRATCHPAD, %g5
 	
-	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
+	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
 	rdpr	%tl, %g1
 	cmp	%g1, 1
-	bgu,pn		%xcc, winfix_trampoline
+	bgu,pn	%xcc, winfix_trampoline
 	 nop
-	ba,pt		%xcc, sparc64_realfault_common
-	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+	ba,pt	%xcc, sparc64_realfault_common
+	 mov	FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
 
 	/* Called from trap table with TAG TARGET placed into
 	 * %g6, SCRATCHPAD_UTSBREG1 contents in %g1, and
@@ -189,7 +190,8 @@ sun4v_itlb_error:
 	sethi	%hi(sun4v_err_itlb_vaddr), %g1
 	stx	%g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
 	sethi	%hi(sun4v_err_itlb_ctx), %g1
-	srlx	%g6, 48, %o1		! ctx
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
+	ldx	[%g6 + HV_FAULT_I_CTX_OFFSET], %o1
 	stx	%o1, [%g1 + %lo(sun4v_err_itlb_ctx)]
 	sethi	%hi(sun4v_err_itlb_pte), %g1
 	stx	%g3, [%g1 + %lo(sun4v_err_itlb_pte)]
@@ -214,7 +216,8 @@ sun4v_dtlb_error:
 	sethi	%hi(sun4v_err_dtlb_vaddr), %g1
 	stx	%g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
 	sethi	%hi(sun4v_err_dtlb_ctx), %g1
-	srlx	%g6, 48, %o1		! ctx
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
+	ldx	[%g6 + HV_FAULT_D_CTX_OFFSET], %o1
 	stx	%o1, [%g1 + %lo(sun4v_err_dtlb_ctx)]
 	sethi	%hi(sun4v_err_dtlb_pte), %g1
 	stx	%g3, [%g1 + %lo(sun4v_err_dtlb_pte)]
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index a17259cf34b..cc225c0563c 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -36,7 +36,7 @@ tsb_miss_itlb:
 	/* At this point we have:
 	 * %g4 --	missing virtual address
 	 * %g1 --	TSB entry address
-	 * %g6 --	TAG TARGET ((vaddr >> 22) | (ctx << 48))
+	 * %g6 --	TAG TARGET (vaddr >> 22)
 	 */
 tsb_miss_page_table_walk:
 	TRAP_LOAD_PGD_PHYS(%g7, %g5)
@@ -50,8 +50,10 @@ tsb_reload:
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, tsb_do_fault
-	 TSB_STORE(%g1, %g0)
+	 TSB_STORE(%g1, %g7)
 
 	/* If it is larger than the base page size, don't
 	 * bother putting it into the TSB.
@@ -62,8 +64,10 @@ tsb_reload:
 	sethi		%hi(_PAGE_SZBITS), %g7
 	ldx		[%g7 + %lo(_PAGE_SZBITS)], %g7
 	cmp		%g2, %g7
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	bne,a,pn	%xcc, tsb_tlb_reload
-	 TSB_STORE(%g1, %g0)
+	 TSB_STORE(%g1, %g7)
 
 	TSB_WRITE(%g1, %g5, %g6)
 
@@ -136,7 +140,7 @@ tsb_do_fault:
 	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	SET_GL(1)
-	ldxa		[%g0] ASI_SCRATCHPAD, %g2
+	ldxa		[%g0] ASI_SCRATCHPAD, %g4
 	.previous
 
 	bne,pn		%xcc, tsb_do_itlb_fault
@@ -150,7 +154,7 @@ tsb_do_dtlb_fault:
 	ldxa	[%g4] ASI_DMMU, %g5
 	.section .sun4v_2insn_patch, "ax"
 	.word	661b
-	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
+	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
 	nop
 	.previous
 
@@ -217,8 +221,9 @@ tsb_flush:
 	bne,pn	%icc, 1b
 	 membar	#LoadLoad
 	cmp	%g1, %o1
+	mov	1, %o3
 	bne,pt	%xcc, 2f
-	 clr	%o3
+	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
 	TSB_CAS_TAG(%o0, %g1, %o3)
 	cmp	%g1, %o3
 	bne,pn	%xcc, 1b
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index bd9e3205674..aa2aec6373c 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -296,7 +296,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 
 		tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
 				       (mm->context.tsb_nentries - 1UL)];
-		tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL;
+		tag = (address >> 22UL);
 		tsb_insert(tsb, tag, pte_val(pte));
 	}
 }
@@ -1110,6 +1110,8 @@ void __init paging_init(void)
 	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
 	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
+	memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
+
 	if (tlb_type == hypervisor)
 		sun4v_pgprot_init();
 	else
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 3c1ff05038b..353cb060561 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -20,9 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries
 	return vaddr & (nentries - 1);
 }
 
-static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
+static inline int tag_compare(unsigned long tag, unsigned long vaddr)
 {
-	return (tag == ((vaddr >> 22) | (context << 48)));
+	return (tag == (vaddr >> 22));
 }
 
 /* TSB flushes need only occur on the processor initiating the address
@@ -38,8 +38,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
 		unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
 		struct tsb *ent = &swapper_tsb[hash];
 
-		if (tag_compare(ent->tag, v, 0)) {
-			ent->tag = 0UL;
+		if (tag_compare(ent->tag, v)) {
+			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
 			membar_storeload_storestore();
 		}
 	}
@@ -50,14 +50,9 @@ void flush_tsb_user(struct mmu_gather *mp)
 	struct mm_struct *mm = mp->mm;
 	struct tsb *tsb = mm->context.tsb;
 	unsigned long nentries = mm->context.tsb_nentries;
-	unsigned long ctx, base;
+	unsigned long base;
 	int i;
 
-	if (unlikely(!CTX_VALID(mm->context)))
-		return;
-
-	ctx = CTX_HWBITS(mm->context);
-
 	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 		base = __pa(tsb);
 	else
@@ -71,7 +66,7 @@ void flush_tsb_user(struct mmu_gather *mp)
 
 		hash = tsb_hash(v, nentries);
 		ent = base + (hash * sizeof(struct tsb));
-		tag = (v >> 22UL) | (ctx << 48UL);
+		tag = (v >> 22UL);
 
 		tsb_flush(ent, tag);
 	}
@@ -243,7 +238,8 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
 				  "i" (ASI_NUCLEUS_QUAD_LDD));
 		}
 
-		if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
+		if (tag & ((1UL << TSB_TAG_LOCK_BIT) |
+			   (1UL << TSB_TAG_INVALID_BIT)))
 			continue;
 
 		/* We only put base page size PTEs into the TSB,
@@ -315,10 +311,13 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
 			break;
 	}
 
-	page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
+	page = alloc_pages(gfp_flags, get_order(size));
 	if (unlikely(!page))
 		return;
 
+	/* Mark all tags as invalid.  */
+	memset(page_address(page), 0x40, size);
+
 	if (size == max_tsb_size)
 		mm->context.tsb_rss_limit = ~0UL;
 	else
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 7f3abc32c4d..6e6768067e3 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -12,6 +12,8 @@
  *
  * 	ldxa		[%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
  * 	ldxa		[%g0] ASI_{D,I}MMU, %g6
+ *	sllx		%g6, 22, %g6
+ *	srlx		%g6, 22, %g6
  * 	ldda		[%g1] ASI_NUCLEUS_QUAD_LDD, %g4
  * 	cmp		%g4, %g6
  * 	bne,pn	%xcc, tsb_miss_{d,i}tlb
@@ -29,6 +31,9 @@
  * -------------------------------------------------
  *  63 61 60      48 47 42 41                     0
  *
+ * But actually, since we use per-mm TSB's, we zero out the CONTEXT
+ * field.
+ *
  * Like the powerpc hashtables we need to use locking in order to
  * synchronize while we update the entries.  PTE updates need locking
  * as well.
@@ -42,6 +47,9 @@
 #define TSB_TAG_LOCK_BIT	47
 #define TSB_TAG_LOCK_HIGH	(1 << (TSB_TAG_LOCK_BIT - 32))
 
+#define TSB_TAG_INVALID_BIT	46
+#define TSB_TAG_INVALID_HIGH	(1 << (TSB_TAG_INVALID_BIT - 32))
+
 #define TSB_MEMBAR	membar	#StoreStore
 
 /* Some cpus support physical address quad loads.  We want to use
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index 9e28b240f3a..2d5e3c464df 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -184,20 +184,20 @@
 	ldxa	[%g0] ASI_SCRATCHPAD, %g2;		\
 	ldx	[%g2 + HV_FAULT_I_ADDR_OFFSET], %g4;	\
 	ldx	[%g2 + HV_FAULT_I_CTX_OFFSET], %g5;	\
-	srlx	%g4, 22, %g7;				\
-	sllx	%g5, 48, %g6;				\
+	srlx	%g4, 22, %g6;				\
 	ba,pt	%xcc, sun4v_itsb_miss;			\
-	 or	%g6, %g7, %g6;				\
+	 nop;						\
+	nop;						\
 	nop;
 
 #define SUN4V_DTSB_MISS					\
 	ldxa	[%g0] ASI_SCRATCHPAD, %g2;		\
 	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g4;	\
 	ldx	[%g2 + HV_FAULT_D_CTX_OFFSET], %g5;	\
-	srlx	%g4, 22, %g7;				\
-	sllx	%g5, 48, %g6;				\
+	srlx	%g4, 22, %g6;				\
 	ba,pt	%xcc, sun4v_dtsb_miss;			\
-	 or	%g6, %g7, %g6;				\
+	 nop;						\
+	nop;						\
 	nop;
 
 /* Before touching these macros, you owe it to yourself to go and
-- 
cgit v1.2.3-70-g09d2


From f6c1fe529217788f095f6953c2b66bec1196ad3d Mon Sep 17 00:00:00 2001
From: "Fabio M. Di Nitto" <fabbione@ubuntu.com>
Date: Sat, 18 Feb 2006 00:32:31 -0800
Subject: [SPARC64] Fix build if CONFIG_HUGETLB_PAGE is not set

Signed-off-by: Fabio M. Di Nitto <fabbione@ubuntu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/pgtable.h | 2 ++
 1 file changed, 2 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index bc446f302ea..bab7defd8b3 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -354,6 +354,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
  */
 #define pgprot_noncached pgprot_noncached
 
+#ifdef CONFIG_HUGETLB_PAGE
 static inline pte_t pte_mkhuge(pte_t pte)
 {
 	unsigned long mask;
@@ -371,6 +372,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
 
 	return __pte(pte_val(pte) | mask);
 }
+#endif
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-- 
cgit v1.2.3-70-g09d2


From 0f15952ac8641bde1045162ffd4a7b474cc318b0 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 18 Feb 2006 12:43:16 -0800
Subject: [SPARC64]: Export a PAGE_SHARED symbol.

For drivers/media/*, noticed by Fabbione.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/mm/init.c        | 5 +++++
 include/asm-sparc64/pgtable.h | 1 +
 2 files changed, 6 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index aa2aec6373c..c7aa4404edc 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1353,6 +1353,10 @@ EXPORT_SYMBOL(PAGE_KERNEL);
 
 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
 pgprot_t PAGE_COPY __read_mostly;
+
+pgprot_t PAGE_SHARED __read_mostly;
+EXPORT_SYMBOL(PAGE_SHARED);
+
 pgprot_t PAGE_EXEC __read_mostly;
 unsigned long pg_iobits __read_mostly;
 
@@ -1367,6 +1371,7 @@ static void prot_init_common(unsigned long page_none,
 			     unsigned long page_exec_bit)
 {
 	PAGE_COPY = __pgprot(page_copy);
+	PAGE_SHARED = __pgprot(page_shared);
 
 	protection_map[0x0] = __pgprot(page_none);
 	protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index bab7defd8b3..6c8126b2dec 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -206,6 +206,7 @@ extern unsigned long pte_sz_bits(unsigned long size);
 extern pgprot_t PAGE_KERNEL;
 extern pgprot_t PAGE_KERNEL_LOCKED;
 extern pgprot_t PAGE_COPY;
+extern pgprot_t PAGE_SHARED;
 
 /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
 extern unsigned long _PAGE_IE;
-- 
cgit v1.2.3-70-g09d2


From 1bd0cd74d102a527b2a72907698d73fad4b82cbd Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 21 Feb 2006 15:41:01 -0800
Subject: [SPARC64]: Kill cpudata->idle_volume.

Set, but never used.

We used to use this for dynamic IRQ retargetting, but that
code died a long time ago.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/process.c | 10 ++--------
 arch/sparc64/kernel/smp.c     |  2 --
 include/asm-sparc64/cpudata.h |  2 +-
 3 files changed, 3 insertions(+), 11 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index d00cb7ad89b..1ab8283efc4 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -56,6 +56,8 @@ void default_idle(void)
 {
 }
 
+
+
 #ifndef CONFIG_SMP
 
 /*
@@ -104,19 +106,11 @@ void cpu_idle(void)
 
 	while(1) {
 		if (need_resched()) {
-			cpuinfo->idle_volume = 0;
 			preempt_enable_no_resched();
 			schedule();
 			preempt_disable();
 			check_pgt_cache();
 		}
-		cpuinfo->idle_volume++;
-
-		/* The store ordering is so that IRQ handlers on
-		 * other cpus see our increasing idleness for the buddy
-		 * redistribution algorithm.  -DaveM
-		 */
-		membar_storeload_storestore();
 	}
 }
 
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 356d423ae14..0cd9b16612e 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -88,8 +88,6 @@ void __init smp_store_cpu_info(int id)
 	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
 						     "clock-frequency", 0);
 
-	cpu_data(id).idle_volume		= 1;
-
 	def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
 	cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
 						      def);
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 84656f1895c..c66a81bbc84 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -19,7 +19,7 @@ typedef struct {
 	unsigned int	__softirq_pending; /* must be 1st, see rtrap.S */
 	unsigned int	multiplier;
 	unsigned int	counter;
-	unsigned int	idle_volume;
+	unsigned int	__pad1;
 	unsigned long	clock_tick;	/* %tick's per second */
 	unsigned long	udelay_val;
 
-- 
cgit v1.2.3-70-g09d2


From 6f5374c91f0dd1d92408ed44c066c32bcce5ce69 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 21 Feb 2006 15:42:09 -0800
Subject: [SPARC64]: Add sun4v_cpu_yield().

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S      | 9 +++++++++
 include/asm-sparc64/hypervisor.h | 3 +++
 2 files changed, 12 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index bd332e41532..9f3048e64e8 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1786,3 +1786,12 @@ sun4v_cpu_qconf:
 	ta	HV_FAST_TRAP
 	retl
 	 nop
+
+	/* returns %o0:	status
+	 */
+	.globl	sun4v_cpu_yield
+sun4v_cpu_yield:
+	mov	HV_FAST_CPU_YIELD, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index cd5fbcd9556..726e2ea03ce 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -258,6 +258,9 @@
  */
 #define HV_FAST_CPU_YIELD		0x12
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_cpu_yield(void);
+#endif
 
 /* cpu_qconf()
  * TRAP:	HV_FAST_TRAP
-- 
cgit v1.2.3-70-g09d2


From d7744a09504d5ae84edc8289a02254e1f2102410 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 21 Feb 2006 22:31:11 -0800
Subject: [SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.

It can map all of the linear kernel mappings with zero TSB hash
conflicts for systems with 16GB or less ram.  In such cases, on
SUN4V, once we load up this TSB the first time with all the
mappings, we never take a linear kernel mapping TLB miss ever
again, the hypervisor handles them all.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/ktlb.S | 15 ++++++++++++++-
 arch/sparc64/mm/init.c     | 24 +++++++++++++++++++-----
 include/asm-sparc64/tsb.h  | 15 +++++++++++++++
 3 files changed, 48 insertions(+), 6 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index ae1dac17bc8..efcf38b6e28 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -121,6 +121,12 @@ kvmap_dtlb_obp:
 	 nop
 
 	.align		32
+kvmap_dtlb_tsb4m_load:
+	KTSB_LOCK_TAG(%g1, %g2, %g7)
+	KTSB_WRITE(%g1, %g5, %g6)
+	ba,pt		%xcc, kvmap_dtlb_load
+	 nop
+
 kvmap_dtlb:
 	/* %g6: TAG TARGET */
 	mov		TLB_TAG_ACCESS, %g4
@@ -133,6 +139,13 @@ kvmap_dtlb_4v:
 	brgez,pn	%g4, kvmap_dtlb_nonlinear
 	 nop
 
+	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
+	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+
+	/* TSB entry address left in %g1, lookup linear PTE.
+	 * Must preserve %g1 and %g6 (TAG).
+	 */
+kvmap_dtlb_tsb4m_miss:
 	sethi		%hi(kpte_linear_bitmap), %g2
 	or		%g2, %lo(kpte_linear_bitmap), %g2
 
@@ -163,7 +176,7 @@ kvmap_dtlb_4v:
 
 	.globl		kvmap_linear_patch
 kvmap_linear_patch:
-	ba,pt		%xcc, kvmap_dtlb_load
+	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
 	 xor		%g2, %g4, %g5
 
 kvmap_dtlb_vmalloc_addr:
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b5869f00d2d..2a123135b04 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
  */
 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
 
+/* A special kernel TSB for 4MB and 256MB linear mappings.  */
+struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+
 #define MAX_BANKS	32
 
 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
@@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void)
 {
 	unsigned long ktsb_pa;
 
+	/* First KTSB for PAGE_SIZE mappings.  */
 	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
 
 	switch (PAGE_SIZE) {
@@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void)
 	ktsb_descr[0].tsb_base = ktsb_pa;
 	ktsb_descr[0].resv = 0;
 
-	/* XXX When we have a kernel large page size TSB, describe
-	 * XXX it in ktsb_descr[1] here.
-	 */
+	/* Second KTSB for 4MB/256MB mappings.  */
+	ktsb_pa = (kern_base +
+		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
+
+	ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
+	ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
+				   HV_PGSZ_MASK_256MB);
+	ktsb_descr[1].assoc = 1;
+	ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
+	ktsb_descr[1].ctx_idx = 0;
+	ktsb_descr[1].tsb_base = ktsb_pa;
+	ktsb_descr[1].resv = 0;
 }
 
 void __cpuinit sun4v_ktsb_register(void)
@@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void)
 	pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
 
 	func = HV_FAST_MMU_TSB_CTX0;
-	/* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */
-	arg0 = 1;
+	arg0 = 2;
 	arg1 = pa;
 	__asm__ __volatile__("ta	%6"
 			     : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
@@ -1160,7 +1172,9 @@ void __init paging_init(void)
 	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
 	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
+	/* Invalidate both kernel TSBs.  */
 	memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
+	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
 
 	if (tlb_type == hypervisor)
 		sun4v_pgprot_init();
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 6e6768067e3..e82612cd9f3 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -243,6 +243,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 #define KERNEL_TSB_SIZE_BYTES	(32 * 1024)
 #define KERNEL_TSB_NENTRIES	\
 	(KERNEL_TSB_SIZE_BYTES / 16)
+#define KERNEL_TSB4M_NENTRIES	4096
 
 	/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
 	 * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
@@ -263,4 +264,18 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 	be,a,pt		%xcc, OK_LABEL; \
 	 mov		REG4, REG1;
 
+	/* This version uses a trick, the TAG is already (VADDR >> 22) so
+	 * we can make use of that for the index computation.
+	 */
+#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+	sethi		%hi(swapper_4m_tsb), REG1; \
+	or		REG1, %lo(swapper_4m_tsb), REG1; \
+	and		TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \
+	sllx		REG2, 4, REG2; \
+	add		REG1, REG2, REG2; \
+	KTSB_LOAD_QUAD(REG2, REG3); \
+	cmp		REG3, TAG; \
+	be,a,pt		%xcc, OK_LABEL; \
+	 mov		REG4, REG1;
+
 #endif /* !(_SPARC64_TSB_H) */
-- 
cgit v1.2.3-70-g09d2


From 0f05da6d577b80eb00f15994c86e4812ae60f1b9 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 22 Feb 2006 16:20:11 -0800
Subject: [SPARC64]: Fix %tstate ASI handling in start_thread{,32}()

Niagara helps us find a ancient bug in the sparc64 port :-)

The ASI_* values are plain constant defines, thus signed 32-bit
on sparc64.  To put shift this into the regs->tstate value we were
doing or'ing "(ASI_PNF << 24)" into there.

ASI_PNF is 0x82 and shifted left by 24 makes that topmost bit the
sign bit in a 32-bit value.  This would get sign extended to 64-bits
and thus corrupt the top-half of the reg->tstate value.

This never caused problems in pre-Niagara cpus because the only thing
up there were the condition code values.  But Niagara has the global
register level field, and this all 1's value is illegal there so
Niagara gives an illegal instruction trap due to this bug.

I'm pretty sure this bug is about as old as the sparc64 port itself.

This also points out that we weren't setting ASI_PNF for 32-bit tasks.
We should, so fix that while we're here.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/processor.h | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index b3889f3f943..685479fb436 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -91,7 +91,8 @@ extern unsigned long thread_saved_pc(struct task_struct *);
 /* Do necessary setup to start up a newly executed thread. */
 #define start_thread(regs, pc, sp) \
 do { \
-	regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \
+	unsigned long __asi = ASI_PNF; \
+	regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
 	regs->tpc = ((pc & (~3)) - 4); \
 	regs->tnpc = regs->tpc + 4; \
 	regs->y = 0; \
@@ -128,10 +129,10 @@ do { \
 
 #define start_thread32(regs, pc, sp) \
 do { \
+	unsigned long __asi = ASI_PNF; \
 	pc &= 0x00000000ffffffffUL; \
 	sp &= 0x00000000ffffffffUL; \
-\
-	regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \
+	regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
 	regs->tpc = ((pc & (~3)) - 4); \
 	regs->tnpc = regs->tpc + 4; \
 	regs->y = 0; \
-- 
cgit v1.2.3-70-g09d2


From a0663a79ad4faebe1db4a56e2e767b120b12333a Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Thu, 23 Feb 2006 14:19:28 -0800
Subject: [SPARC64]: Fix TLB context allocation with SMT style shared TLBs.

The context allocation scheme we use depends upon there being a 1<-->1
mapping from cpu to physical TLB for correctness.  Chips like Niagara
break this assumption.

So what we do is notify all cpus with a cross call when the context
version number changes, and if necessary this makes them allocate
a valid context for the address space they are running at the time.

Stress tested with make -j1024, make -j2048, and make -j4096 kernel
builds on a 32-strand, 8 core, T2000 with 16GB of ram.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/smp.c         | 40 ++++++++++++++++++++++++++++-----------
 arch/sparc64/mm/init.c            |  9 ++++++++-
 include/asm-sparc64/mmu.h         |  1 +
 include/asm-sparc64/mmu_context.h | 25 ++++++++++++------------
 4 files changed, 50 insertions(+), 25 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 0cd9b16612e..1ce94081149 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -885,26 +885,44 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 	put_cpu();
 }
 
+static void __smp_receive_signal_mask(cpumask_t mask)
+{
+	smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
+}
+
 void smp_receive_signal(int cpu)
 {
 	cpumask_t mask = cpumask_of_cpu(cpu);
 
-	if (cpu_online(cpu)) {
-		u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
-
-		if (tlb_type == spitfire)
-			spitfire_xcall_deliver(data0, 0, 0, mask);
-		else if (tlb_type == cheetah || tlb_type == cheetah_plus)
-			cheetah_xcall_deliver(data0, 0, 0, mask);
-		else if (tlb_type == hypervisor)
-			hypervisor_xcall_deliver(data0, 0, 0, mask);
-	}
+	if (cpu_online(cpu))
+		__smp_receive_signal_mask(mask);
 }
 
 void smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
-	/* Just return, rtrap takes care of the rest. */
+	struct mm_struct *mm;
+
 	clear_softint(1 << irq);
+
+	/* See if we need to allocate a new TLB context because
+	 * the version of the one we are using is now out of date.
+	 */
+	mm = current->active_mm;
+	if (likely(mm)) {
+		if (unlikely(!CTX_VALID(mm->context))) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&mm->context.lock, flags);
+			get_new_mmu_context(mm);
+			load_secondary_context(mm);
+			spin_unlock_irqrestore(&mm->context.lock, flags);
+		}
+	}
+}
+
+void smp_new_mmu_context_version(void)
+{
+	__smp_receive_signal_mask(cpu_online_map);
 }
 
 void smp_report_regs(void)
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 16f0db38d93..ccf083aecb6 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -629,17 +629,20 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
  * let the user have CTX 0 (nucleus) or we ever use a CTX
  * version of zero (and thus NO_CONTEXT would not be caught
  * by version mis-match tests in mmu_context.h).
+ *
+ * Always invoked with interrupts disabled.
  */
 void get_new_mmu_context(struct mm_struct *mm)
 {
 	unsigned long ctx, new_ctx;
 	unsigned long orig_pgsz_bits;
-	
+	int new_version;
 
 	spin_lock(&ctx_alloc_lock);
 	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
 	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
 	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
+	new_version = 0;
 	if (new_ctx >= (1 << CTX_NR_BITS)) {
 		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
 		if (new_ctx >= ctx) {
@@ -662,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm)
 				mmu_context_bmap[i + 2] = 0;
 				mmu_context_bmap[i + 3] = 0;
 			}
+			new_version = 1;
 			goto out;
 		}
 	}
@@ -671,6 +675,9 @@ out:
 	tlb_context_cache = new_ctx;
 	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
 	spin_unlock(&ctx_alloc_lock);
+
+	if (unlikely(new_version))
+		smp_new_mmu_context_version();
 }
 
 void sparc_ultra_dump_itlb(void)
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 473d990848e..1504d303a1d 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -102,6 +102,7 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte
 extern void tsb_flush(unsigned long ent, unsigned long tag);
 
 typedef struct {
+	spinlock_t		lock;
 	unsigned long		sparc64_ctx_val;
 	struct tsb		*tsb;
 	unsigned long		tsb_rss_limit;
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index eb660b1609c..4be40c58e3c 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -19,6 +19,12 @@ extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
 extern void get_new_mmu_context(struct mm_struct *mm);
+#ifdef CONFIG_SMP
+extern void smp_new_mmu_context_version(void);
+#else
+#define smp_new_mmu_context_version() do { } while (0)
+#endif
+
 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
 
@@ -58,21 +64,17 @@ extern void smp_tsb_sync(struct mm_struct *mm);
 
 extern void __flush_tlb_mm(unsigned long, unsigned long);
 
-/* Switch the current MM context. */
+/* Switch the current MM context.  Interrupts are disabled.  */
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
 	unsigned long ctx_valid;
 	int cpu;
 
-	/* Note: page_table_lock is used here to serialize switch_mm
-	 * and activate_mm, and their calls to get_new_mmu_context.
-	 * This use of page_table_lock is unrelated to its other uses.
-	 */ 
-	spin_lock(&mm->page_table_lock);
+	spin_lock(&mm->context.lock);
 	ctx_valid = CTX_VALID(mm->context);
 	if (!ctx_valid)
 		get_new_mmu_context(mm);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(&mm->context.lock);
 
 	if (!ctx_valid || (old_mm != mm)) {
 		load_secondary_context(mm);
@@ -98,19 +100,16 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 /* Activate a new MM instance for the current task. */
 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
 {
+	unsigned long flags;
 	int cpu;
 
-	/* Note: page_table_lock is used here to serialize switch_mm
-	 * and activate_mm, and their calls to get_new_mmu_context.
-	 * This use of page_table_lock is unrelated to its other uses.
-	 */ 
-	spin_lock(&mm->page_table_lock);
+	spin_lock_irqsave(&mm->context.lock, flags);
 	if (!CTX_VALID(mm->context))
 		get_new_mmu_context(mm);
 	cpu = smp_processor_id();
 	if (!cpu_isset(cpu, mm->cpu_vm_mask))
 		cpu_set(cpu, mm->cpu_vm_mask);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock_irqrestore(&mm->context.lock, flags);
 
 	load_secondary_context(mm);
 	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-- 
cgit v1.2.3-70-g09d2


From 36344762396ca868d6076c41a84bda25f1ed9d3c Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 25 Feb 2006 17:16:29 -0800
Subject: [SPARC64]: Niagara optimized XOR functions for RAID.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/sparc64_ksyms.c |  13 ++
 arch/sparc64/lib/xor.S              | 300 +++++++++++++++++++++++++++++++++++-
 include/asm-sparc64/xor.h           |  34 +++-
 3 files changed, 342 insertions(+), 5 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 801fc0ce484..e87fe7dfc7d 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -108,6 +108,14 @@ extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
 extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
 		      unsigned long *, unsigned long *, unsigned long *);
 
+extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
+			  unsigned long *);
+extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
+			  unsigned long *, unsigned long *);
+extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
+			  unsigned long *, unsigned long *, unsigned long *);
+
 /* Per-CPU information table */
 EXPORT_PER_CPU_SYMBOL(__cpu_data);
 
@@ -388,4 +396,9 @@ EXPORT_SYMBOL(xor_vis_3);
 EXPORT_SYMBOL(xor_vis_4);
 EXPORT_SYMBOL(xor_vis_5);
 
+EXPORT_SYMBOL(xor_niagara_2);
+EXPORT_SYMBOL(xor_niagara_3);
+EXPORT_SYMBOL(xor_niagara_4);
+EXPORT_SYMBOL(xor_niagara_5);
+
 EXPORT_SYMBOL(prom_palette);
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S
index 4cd5d2be1ae..a79c8888170 100644
--- a/arch/sparc64/lib/xor.S
+++ b/arch/sparc64/lib/xor.S
@@ -2,9 +2,10 @@
  * arch/sparc64/lib/xor.S
  *
  * High speed xor_block operation for RAID4/5 utilizing the
- * UltraSparc Visual Instruction Set.
+ * UltraSparc Visual Instruction Set and Niagara store-init/twin-load.
  *
  * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  */
 
 #include <asm/visasm.h>
@@ -19,6 +20,8 @@
  */
 	.text
 	.align	32
+
+	/* VIS versions. */
 	.globl	xor_vis_2
 	.type	xor_vis_2,#function
 xor_vis_2:
@@ -352,3 +355,298 @@ xor_vis_5:
 	ret
 	 restore
 	.size	xor_vis_5, .-xor_vis_5
+
+	/* Niagara versions. */
+	.globl		xor_niagara_2
+	.type		xor_niagara_2,#function
+xor_niagara_2:		/* %o0=bytes, %o1=dest, %o2=src */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src  + 0x00 */
+	ldda		[%i1 + 0x10] %asi, %i4	/* %i4/%i5 = src  + 0x10 */
+	ldda		[%i1 + 0x20] %asi, %g2	/* %g2/%g3 = src  + 0x20 */
+	ldda		[%i1 + 0x30] %asi, %l0	/* %l0/%l1 = src  + 0x30 */
+	prefetch	[%i1 + 0x40], #one_read
+	ldda		[%i0 + 0x00] %asi, %o0  /* %o0/%o1 = dest + 0x00 */
+	ldda		[%i0 + 0x10] %asi, %o2  /* %o2/%o3 = dest + 0x10 */
+	ldda		[%i0 + 0x20] %asi, %o4  /* %o4/%o5 = dest + 0x20 */
+	ldda		[%i0 + 0x30] %asi, %l2  /* %l2/%l3 = dest + 0x30 */
+	prefetch	[%i0 + 0x40], #n_writes
+	xor		%o0, %i2, %o0
+	xor		%o1, %i3, %o1
+	stxa		%o0, [%i0 + 0x00] %asi
+	stxa		%o1, [%i0 + 0x08] %asi
+	xor		%o2, %i4, %o2
+	xor		%o3, %i5, %o3
+	stxa		%o2, [%i0 + 0x10] %asi
+	stxa		%o3, [%i0 + 0x18] %asi
+	xor		%o4, %g2, %o4
+	xor		%o5, %g3, %o5
+	stxa		%o4, [%i0 + 0x20] %asi
+	stxa		%o5, [%i0 + 0x28] %asi
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x30] %asi
+	stxa		%l3, [%i0 + 0x38] %asi
+	add		%i0, 0x40, %i0
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%i1, 0x40, %i1
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_2, .-xor_niagara_2
+
+	.globl		xor_niagara_3
+	.type		xor_niagara_3,#function
+xor_niagara_3:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	prefetch	[%i3], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+	mov		%i3, %l7
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src1 + 0x00 */
+	ldda		[%i1 + 0x10] %asi, %i4	/* %i4/%i5 = src1 + 0x10 */
+	ldda		[%l7 + 0x00] %asi, %g2	/* %g2/%g3 = src2 + 0x00 */
+	ldda		[%l7 + 0x10] %asi, %l0	/* %l0/%l1 = src2 + 0x10 */
+	ldda		[%i0 + 0x00] %asi, %o0  /* %o0/%o1 = dest + 0x00 */
+	ldda		[%i0 + 0x10] %asi, %o2  /* %o2/%o3 = dest + 0x10 */
+	xor		%g2, %i2, %g2
+	xor		%g3, %i3, %g3
+	xor		%o0, %g2, %o0
+	xor		%o1, %g3, %o1
+	stxa		%o0, [%i0 + 0x00] %asi
+	stxa		%o1, [%i0 + 0x08] %asi
+	ldda		[%i1 + 0x20] %asi, %i2	/* %i2/%i3 = src1 + 0x20 */
+	ldda		[%l7 + 0x20] %asi, %g2	/* %g2/%g3 = src2 + 0x20 */
+	ldda		[%i0 + 0x20] %asi, %o0	/* %o0/%o1 = dest + 0x20 */
+	xor		%l0, %i4, %l0
+	xor		%l1, %i5, %l1
+	xor		%o2, %l0, %o2
+	xor		%o3, %l1, %o3
+	stxa		%o2, [%i0 + 0x10] %asi
+	stxa		%o3, [%i0 + 0x18] %asi
+	ldda		[%i1 + 0x30] %asi, %i4	/* %i4/%i5 = src1 + 0x30 */
+	ldda		[%l7 + 0x30] %asi, %l0	/* %l0/%l1 = src2 + 0x30 */
+	ldda		[%i0 + 0x30] %asi, %o2	/* %o2/%o3 = dest + 0x30 */
+	prefetch	[%i1 + 0x40], #one_read
+	prefetch	[%l7 + 0x40], #one_read
+	prefetch	[%i0 + 0x40], #n_writes
+	xor		%g2, %i2, %g2
+	xor		%g3, %i3, %g3
+	xor		%o0, %g2, %o0
+	xor		%o1, %g3, %o1
+	stxa		%o0, [%i0 + 0x20] %asi
+	stxa		%o1, [%i0 + 0x28] %asi
+	xor		%l0, %i4, %l0
+	xor		%l1, %i5, %l1
+	xor		%o2, %l0, %o2
+	xor		%o3, %l1, %o3
+	stxa		%o2, [%i0 + 0x30] %asi
+	stxa		%o3, [%i0 + 0x38] %asi
+	add		%i0, 0x40, %i0
+	add		%i1, 0x40, %i1
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%l7, 0x40, %l7
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_3, .-xor_niagara_3
+
+	.globl		xor_niagara_4
+	.type		xor_niagara_4,#function
+xor_niagara_4:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	prefetch	[%i3], #one_read
+	prefetch	[%i4], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+	mov		%i3, %l7
+	mov		%i4, %l6
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src1 + 0x00 */
+	ldda		[%l7 + 0x00] %asi, %i4	/* %i4/%i5 = src2 + 0x00 */
+	ldda		[%l6 + 0x00] %asi, %g2	/* %g2/%g3 = src3 + 0x00 */
+	ldda		[%i0 + 0x00] %asi, %l0	/* %l0/%l1 = dest + 0x00 */
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x10] %asi, %i2	/* %i2/%i3 = src1 + 0x10 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%i7 + 0x10] %asi, %i4	/* %i4/%i5 = src2 + 0x10 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x00] %asi
+	stxa		%l1, [%i0 + 0x08] %asi
+	ldda		[%i6 + 0x10] %asi, %g2	/* %g2/%g3 = src3 + 0x10 */
+	ldda		[%i0 + 0x10] %asi, %l0	/* %l0/%l1 = dest + 0x10 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x20] %asi, %i2	/* %i2/%i3 = src1 + 0x20 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%i7 + 0x20] %asi, %i4	/* %i4/%i5 = src2 + 0x20 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x10] %asi
+	stxa		%l1, [%i0 + 0x18] %asi
+	ldda		[%i6 + 0x20] %asi, %g2	/* %g2/%g3 = src3 + 0x20 */
+	ldda		[%i0 + 0x20] %asi, %l0	/* %l0/%l1 = dest + 0x20 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x30] %asi, %i2	/* %i2/%i3 = src1 + 0x30 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%i7 + 0x30] %asi, %i4	/* %i4/%i5 = src2 + 0x30 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x20] %asi
+	stxa		%l1, [%i0 + 0x28] %asi
+	ldda		[%i6 + 0x30] %asi, %g2	/* %g2/%g3 = src3 + 0x30 */
+	ldda		[%i0 + 0x30] %asi, %l0	/* %l0/%l1 = dest + 0x30 */
+
+	prefetch	[%i1 + 0x40], #one_read
+	prefetch	[%l7 + 0x40], #one_read
+	prefetch	[%l6 + 0x40], #one_read
+	prefetch	[%i0 + 0x40], #n_writes
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x30] %asi
+	stxa		%l1, [%i0 + 0x38] %asi
+
+	add		%i0, 0x40, %i0
+	add		%i1, 0x40, %i1
+	add		%l7, 0x40, %l7
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%l6, 0x40, %l6
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_4, .-xor_niagara_4
+
+	.globl		xor_niagara_5
+	.type		xor_niagara_5,#function
+xor_niagara_5:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	prefetch	[%i3], #one_read
+	prefetch	[%i4], #one_read
+	prefetch	[%i5], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+	mov		%i3, %l7
+	mov		%i4, %l6
+	mov		%i5, %l5
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src1 + 0x00 */
+	ldda		[%l7 + 0x00] %asi, %i4	/* %i4/%i5 = src2 + 0x00 */
+	ldda		[%l6 + 0x00] %asi, %g2	/* %g2/%g3 = src3 + 0x00 */
+	ldda		[%l5 + 0x00] %asi, %l0	/* %l0/%l1 = src4 + 0x00 */
+	ldda		[%i0 + 0x00] %asi, %l2	/* %l2/%l3 = dest + 0x00 */
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x10] %asi, %i2	/* %i2/%i3 = src1 + 0x10 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x10] %asi, %i4	/* %i4/%i5 = src2 + 0x10 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	ldda		[%l6 + 0x10] %asi, %g2	/* %g2/%g3 = src3 + 0x10 */
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x00] %asi
+	stxa		%l3, [%i0 + 0x08] %asi
+	ldda		[%l5 + 0x10] %asi, %l0	/* %l0/%l1 = src4 + 0x10 */
+	ldda		[%i0 + 0x10] %asi, %l2	/* %l2/%l3 = dest + 0x10 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x20] %asi, %i2	/* %i2/%i3 = src1 + 0x20 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x20] %asi, %i4	/* %i4/%i5 = src2 + 0x20 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	ldda		[%l6 + 0x20] %asi, %g2	/* %g2/%g3 = src3 + 0x20 */
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x10] %asi
+	stxa		%l3, [%i0 + 0x18] %asi
+	ldda		[%l5 + 0x20] %asi, %l0	/* %l0/%l1 = src4 + 0x20 */
+	ldda		[%i0 + 0x20] %asi, %l2	/* %l2/%l3 = dest + 0x20 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x30] %asi, %i2	/* %i2/%i3 = src1 + 0x30 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x30] %asi, %i4	/* %i4/%i5 = src2 + 0x30 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	ldda		[%l6 + 0x30] %asi, %g2	/* %g2/%g3 = src3 + 0x30 */
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x20] %asi
+	stxa		%l3, [%i0 + 0x28] %asi
+	ldda		[%l5 + 0x30] %asi, %l0	/* %l0/%l1 = src4 + 0x30 */
+	ldda		[%i0 + 0x30] %asi, %l2	/* %l2/%l3 = dest + 0x30 */
+
+	prefetch	[%i1 + 0x40], #one_read
+	prefetch	[%l7 + 0x40], #one_read
+	prefetch	[%l6 + 0x40], #one_read
+	prefetch	[%l5 + 0x40], #one_read
+	prefetch	[%i0 + 0x40], #n_writes
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x30] %asi
+	stxa		%l3, [%i0 + 0x38] %asi
+
+	add		%i0, 0x40, %i0
+	add		%i1, 0x40, %i1
+	add		%l7, 0x40, %l7
+	add		%l6, 0x40, %l6
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%l5, 0x40, %l5
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_5, .-xor_niagara_5
diff --git a/include/asm-sparc64/xor.h b/include/asm-sparc64/xor.h
index 8b3a7e4b606..8ce3f1813e2 100644
--- a/include/asm-sparc64/xor.h
+++ b/include/asm-sparc64/xor.h
@@ -2,9 +2,11 @@
  * include/asm-sparc64/xor.h
  *
  * High speed xor_block operation for RAID4/5 utilizing the
- * UltraSparc Visual Instruction Set.
+ * UltraSparc Visual Instruction Set and Niagara block-init
+ * twin-load instructions.
  *
  * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,8 +18,7 @@
  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <asm/pstate.h>
-#include <asm/asi.h>
+#include <asm/spitfire.h>
 
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
@@ -37,4 +38,29 @@ static struct xor_block_template xor_block_VIS = {
         .do_5	= xor_vis_5,
 };
 
-#define XOR_TRY_TEMPLATES       xor_speed(&xor_block_VIS)
+extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
+			  unsigned long *);
+extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
+			  unsigned long *, unsigned long *);
+extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
+			  unsigned long *, unsigned long *, unsigned long *);
+
+static struct xor_block_template xor_block_niagara = {
+        .name	= "Niagara",
+        .do_2	= xor_niagara_2,
+        .do_3	= xor_niagara_3,
+        .do_4	= xor_niagara_4,
+        .do_5	= xor_niagara_5,
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES				\
+	do {						\
+		xor_speed(&xor_block_VIS);		\
+		xor_speed(&xor_block_niagara);		\
+	} while (0)
+
+/* For VIS for everything except Niagara.  */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+	(tlb_type == hypervisor ? &xor_block_niagara : &xor_block_VIS)
-- 
cgit v1.2.3-70-g09d2


From 97c4b6f95afadea5846b78ce589d25de2a245c56 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Sun, 26 Feb 2006 20:37:41 -0800
Subject: [SPARC64]: Use 13-bit context size always.

We no longer have the problems that require using the smaller
sizes.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/mmu.h | 14 +-------------
 1 file changed, 1 insertion(+), 13 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 1504d303a1d..da14a9bf0ed 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -6,19 +6,7 @@
 #include <asm/const.h>
 #include <asm/hypervisor.h>
 
-/*
- * For the 8k pagesize kernel, use only 10 hw context bits to optimize some
- * shifts in the fast tlbmiss handlers, instead of all 13 bits (specifically
- * for vpte offset calculation). For other pagesizes, this optimization in
- * the tlbhandlers can not be done; but still, all 13 bits can not be used
- * because the tlb handlers use "andcc" instruction which sign extends 13
- * bit arguments.
- */
-#if PAGE_SHIFT == 13
-#define CTX_NR_BITS		10
-#else
-#define CTX_NR_BITS		12
-#endif
+#define CTX_NR_BITS		13
 
 #define TAG_CONTEXT_BITS	((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
 
-- 
cgit v1.2.3-70-g09d2


From b830ab665ad96c6b20d51a89b35cbc09ab5a2c29 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Tue, 28 Feb 2006 15:10:26 -0800
Subject: [SPARC64]: Fix bugs in SUN4V cpu mondo dispatch.

There were several bugs in the SUN4V cpu mondo dispatch code.

In fact, if we ever got a EWOULDBLOCK or other error from
the hypervisor call, we'd potentially send a cpu mondo multiple
times to the same cpu and even worse we could loop until the
timeout resending the same mondo over and over to such cpus.

So let's bulletproof this thing as follows:

1) Implement cpu_mondo_send() and cpu_state() hypervisor calls
   in arch/sparc64/kernel/entry.S, add prototypes to asm/hypervisor.h

2) Don't build and update the cpulist using inline functions, this
   was causing the cpu mask to not get updated in the caller.

3) Disable interrupts during the entire mondo send, otherwise our
   cpu list and/or mondo block could get overwritten if we take
   an interrupt and do a cpu mondo send on the current cpu.

4) Check for all possible error return types from the cpu_mondo_send()
   hypervisor call.  In particular:

   HV_EOK) Our work is done, all cpus have received the mondo.
   HV_CPUERROR) One or more of the cpus in the cpu list we passed
                to the hypervisor are in error state.  Use cpu_state()
                calls over the entries in the cpu list to see which
		ones.  Record them in "error_mask" and report this
		after we are done sending the mondo to cpus which are
		not in error state.
   HV_EWOULDBLOCK) We need to keep trying.

   Any other error we consider fatal, we report the event and exit
   immediately.

5) We only timeout if forward progress is not made.  Forward progress
   is defined as having at least one cpu get the mondo successfully
   in a given cpu_mondo_send() call.  Otherwise we bump a counter
   and delay a little.  If the counter hits a limit, we signal an
   error and report the event.

Also, smp_call_function_mask() error handling reports the number
of cpus incorrectly.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/entry.S      |  28 ++++++
 arch/sparc64/kernel/smp.c        | 180 ++++++++++++++++++++++++++-------------
 include/asm-sparc64/hypervisor.h |  10 +++
 3 files changed, 161 insertions(+), 57 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 9f3048e64e8..6d0b3ed77a0 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1795,3 +1795,31 @@ sun4v_cpu_yield:
 	ta	HV_FAST_TRAP
 	retl
 	 nop
+
+	/* %o0:	num cpus in cpu list
+	 * %o1:	cpu list paddr
+	 * %o2:	mondo block paddr
+	 *
+	 * returns %o0: status
+	 */
+	.globl	sun4v_cpu_mondo_send
+sun4v_cpu_mondo_send:
+	mov	HV_FAST_CPU_MONDO_SEND, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+
+	/* %o0:	CPU ID
+	 *
+	 * returns %o0:	-status if status non-zero, else
+	 *         %o0:	cpu state as HV_CPU_STATE_*
+	 */
+	.globl	sun4v_cpu_state
+sun4v_cpu_state:
+	mov	HV_FAST_CPU_STATE, %o5
+	ta	HV_FAST_TRAP
+	brnz,pn	%o0, 1f
+	 sub	%g0, %o0, %o0
+	mov	%o1, %o0
+1:	retl
+	 nop
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index eb7c0f855ba..6bc7fd47e44 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -556,77 +556,144 @@ retry:
 }
 
 /* Multi-cpu list version.  */
-static int init_cpu_list(u16 *list, cpumask_t mask)
-{
-	int i, cnt;
-
-	cnt = 0;
-	for_each_cpu_mask(i, mask)
-		list[cnt++] = i;
-
-	return cnt;
-}
-
-static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask)
-{
-	int i;
-
-	for (i = 0; i < orig_cnt; i++) {
-		if (list[i] == 0xffff)
-			cpu_clear(i, mask);
-	}
-
-	return init_cpu_list(list, mask);
-}
-
 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
-	int this_cpu = get_cpu();
-	struct trap_per_cpu *tb = &trap_block[this_cpu];
-	u64 *mondo = __va(tb->cpu_mondo_block_pa);
-	u16 *cpu_list = __va(tb->cpu_list_pa);
-	int cnt, retries;
+	struct trap_per_cpu *tb;
+	u16 *cpu_list;
+	u64 *mondo;
+	cpumask_t error_mask;
+	unsigned long flags, status;
+	int cnt, retries, this_cpu, i;
+
+	/* We have to do this whole thing with interrupts fully disabled.
+	 * Otherwise if we send an xcall from interrupt context it will
+	 * corrupt both our mondo block and cpu list state.
+	 *
+	 * One consequence of this is that we cannot use timeout mechanisms
+	 * that depend upon interrupts being delivered locally.  So, for
+	 * example, we cannot sample jiffies and expect it to advance.
+	 *
+	 * Fortunately, udelay() uses %stick/%tick so we can use that.
+	 */
+	local_irq_save(flags);
+
+	this_cpu = smp_processor_id();
+	tb = &trap_block[this_cpu];
 
+	mondo = __va(tb->cpu_mondo_block_pa);
 	mondo[0] = data0;
 	mondo[1] = data1;
 	mondo[2] = data2;
 	wmb();
 
+	cpu_list = __va(tb->cpu_list_pa);
+
+	/* Setup the initial cpu list.  */
+	cnt = 0;
+	for_each_cpu_mask(i, mask)
+		cpu_list[cnt++] = i;
+
+	cpus_clear(error_mask);
 	retries = 0;
-	cnt = init_cpu_list(cpu_list, mask);
 	do {
-		register unsigned long func __asm__("%o5");
-		register unsigned long arg0 __asm__("%o0");
-		register unsigned long arg1 __asm__("%o1");
-		register unsigned long arg2 __asm__("%o2");
-
-		func = HV_FAST_CPU_MONDO_SEND;
-		arg0 = cnt;
-		arg1 = tb->cpu_list_pa;
-		arg2 = tb->cpu_mondo_block_pa;
-
-		__asm__ __volatile__("ta	%8"
-				     : "=&r" (func), "=&r" (arg0),
-				       "=&r" (arg1), "=&r" (arg2)
-				     : "0" (func), "1" (arg0),
-				       "2" (arg1), "3" (arg2),
-				       "i" (HV_FAST_TRAP)
-				     : "memory");
-		if (likely(arg0 == HV_EOK))
-			break;
+		int forward_progress;
+
+		status = sun4v_cpu_mondo_send(cnt,
+					      tb->cpu_list_pa,
+					      tb->cpu_mondo_block_pa);
 
-		if (unlikely(++retries > 100)) {
-			printk("CPU[%d]: sun4v mondo error %lu\n",
-			       this_cpu, arg0);
+		/* HV_EOK means all cpus received the xcall, we're done.  */
+		if (likely(status == HV_EOK))
 			break;
+
+		/* First, clear out all the cpus in the mask that were
+		 * successfully sent to.  The hypervisor indicates this
+		 * by setting the cpu list entry of such cpus to 0xffff.
+		 */
+		forward_progress = 0;
+		for (i = 0; i < cnt; i++) {
+			if (cpu_list[i] == 0xffff) {
+				cpu_clear(i, mask);
+				forward_progress = 1;
+			}
 		}
 
-		cnt = update_cpu_list(cpu_list, cnt, mask);
+		/* If we get a HV_ECPUERROR, then one or more of the cpus
+		 * in the list are in error state.  Use the cpu_state()
+		 * hypervisor call to find out which cpus are in error state.
+		 */
+		if (unlikely(status == HV_ECPUERROR)) {
+			for (i = 0; i < cnt; i++) {
+				long err;
+				u16 cpu;
+
+				cpu = cpu_list[i];
+				if (cpu == 0xffff)
+					continue;
+
+				err = sun4v_cpu_state(cpu);
+				if (err >= 0 &&
+				    err == HV_CPU_STATE_ERROR) {
+					cpu_clear(cpu, mask);
+					cpu_set(cpu, error_mask);
+				}
+			}
+		} else if (unlikely(status != HV_EWOULDBLOCK))
+			goto fatal_mondo_error;
+
+		/* Rebuild the cpu_list[] array and try again.  */
+		cnt = 0;
+		for_each_cpu_mask(i, mask)
+			cpu_list[cnt++] = i;
 
-		udelay(2 * cnt);
+		if (unlikely(!forward_progress)) {
+			if (unlikely(++retries > 10000))
+				goto fatal_mondo_timeout;
+
+			/* Delay a little bit to let other cpus catch up
+			 * on their cpu mondo queue work.
+			 */
+			udelay(2 * cnt);
+		}
 	} while (1);
 
-	put_cpu();
+	local_irq_restore(flags);
+
+	if (unlikely(!cpus_empty(error_mask)))
+		goto fatal_mondo_cpu_error;
+
+	return;
+
+fatal_mondo_cpu_error:
+	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+	       "were in error state\n",
+	       this_cpu);
+	printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
+	for_each_cpu_mask(i, error_mask)
+		printk("%d ", i);
+	printk("]\n");
+	return;
+
+fatal_mondo_timeout:
+	local_irq_restore(flags);
+	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+	       " progress after %d retries.\n",
+	       this_cpu, retries);
+	goto dump_cpu_list_and_out;
+
+fatal_mondo_error:
+	local_irq_restore(flags);
+	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+	       this_cpu, status);
+	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+	       "mondo_block_pa(%lx)\n",
+	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+
+dump_cpu_list_and_out:
+	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+	for (i = 0; i < cnt; i++)
+		printk("%u ", cpu_list[i]);
+	printk("]\n");
 }
 
 /* Send cross call to all processors mentioned in MASK
@@ -723,9 +790,8 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
 
 out_timeout:
 	spin_unlock(&call_lock);
-	printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
-	       (long) num_online_cpus() - 1L,
-	       (long) atomic_read(&data.finished));
+	printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
+	       cpus, atomic_read(&data.finished));
 	return 0;
 }
 
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index 726e2ea03ce..612bf319753 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -342,6 +342,8 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type,
  *		ENOCPU		Invalid cpu in CPU list
  *		EWOULDBLOCK	Some or all of the listed CPUs did not receive
  *				the mondo
+ *		ECPUERROR	One or more of the listed CPUs are in error
+ *				state, use HV_FAST_CPU_STATE to see which ones
  *		EINVAL		CPU list includes caller's CPU ID
  *
  * Send a mondo interrupt to the CPUs in the given CPU list with the
@@ -355,6 +357,10 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type,
  */
 #define HV_FAST_CPU_MONDO_SEND		0x42
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
+#endif
+
 /* cpu_myid()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_CPU_MYID
@@ -382,6 +388,10 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type,
 #define  HV_CPU_STATE_RUNNING		 0x02
 #define  HV_CPU_STATE_ERROR		 0x03
 
+#ifndef __ASSEMBLY__
+extern long sun4v_cpu_state(unsigned long cpuid);
+#endif
+
 /* cpu_set_rtba()
  * TRAP:	HV_FAST_TRAP
  * FUNCTION:	HV_FAST_CPU_SET_RTBA
-- 
cgit v1.2.3-70-g09d2


From e22990451a6a6263250cdd267708548dfa08a8f2 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 1 Mar 2006 22:25:43 -0800
Subject: [SPARC64]: Kill bogus function externs in asm/pgtable.h

These are all implemented inline earlier in the file.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/pgtable.h | 24 ------------------------
 1 file changed, 24 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 6c8126b2dec..75a2cd2d7e8 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -646,27 +646,6 @@ static inline unsigned long pte_present(pte_t pte)
 /* Same in both SUN4V and SUN4U.  */
 #define pte_none(pte) 			(!pte_val(pte))
 
-extern unsigned long pte_present(pte_t);
-
-/* The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-extern unsigned long pte_read(pte_t);
-extern unsigned long pte_exec(pte_t);
-extern unsigned long pte_write(pte_t);
-extern unsigned long pte_dirty(pte_t);
-extern unsigned long pte_young(pte_t);
-extern pte_t pte_wrprotect(pte_t);
-extern pte_t pte_rdprotect(pte_t);
-extern pte_t pte_mkclean(pte_t);
-extern pte_t pte_mkold(pte_t);
-
-/* Be very careful when you change these three, they are delicate. */
-extern pte_t pte_mkyoung(pte_t);
-extern pte_t pte_mkwrite(pte_t);
-extern pte_t pte_mkdirty(pte_t);
-extern pte_t pte_mkhuge(pte_t);
-
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
@@ -766,9 +745,6 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 			       unsigned long pfn,
 			       unsigned long size, pgprot_t prot);
 
-/* Clear virtual and physical cachability, set side-effect bit.  */
-extern pgprot_t pgprot_noncached(pgprot_t);
-
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  * its high 4 bits.  These macros/functions put it there or get it from there.
-- 
cgit v1.2.3-70-g09d2


From 8bcd17411643beb9a601e032d0cf1016909a81d3 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 2 Mar 2006 18:12:27 -0800
Subject: [SPARC64]: Do not allow mapping pages within 4GB of 64-bit VA hole.

The UltraSPARC T1 manual recommends this because the chip
could instruction prefetch into the VA hole, and this would
also make decoding  certain kinds of memory access traps
more difficult (because the chip sign extends certain pieces
of trap state).

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/sys_sparc.c | 90 +++++++++++++++++++++++++++++++----------
 include/asm-sparc64/a.out.h     |  4 +-
 include/asm-sparc64/page.h      |  3 +-
 3 files changed, 73 insertions(+), 24 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 5f8c822a2b4..095db723bb8 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -33,14 +33,55 @@
 
 /* #define DEBUG_UNIMP_SYSCALL */
 
-/* XXX Make this per-binary type, this way we can detect the type of
- * XXX a binary.  Every Sparc executable calls this very early on.
- */
 asmlinkage unsigned long sys_getpagesize(void)
 {
 	return PAGE_SIZE;
 }
 
+#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
+
+/* Does addr --> addr+len fall within 4GB of the VA-space hole or
+ * overflow past the end of the 64-bit address space?
+ */
+static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
+{
+	unsigned long va_exclude_start, va_exclude_end;
+
+	va_exclude_start = VA_EXCLUDE_START;
+	va_exclude_end   = VA_EXCLUDE_END;
+
+	if (unlikely(len >= va_exclude_start))
+		return 1;
+
+	if (unlikely((addr + len) < addr))
+		return 1;
+
+	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
+		     ((addr + len) >= va_exclude_start &&
+		      (addr + len) < va_exclude_end)))
+		return 1;
+
+	return 0;
+}
+
+/* Does start,end straddle the VA-space hole?  */
+static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
+{
+	unsigned long va_exclude_start, va_exclude_end;
+
+	va_exclude_start = VA_EXCLUDE_START;
+	va_exclude_end   = VA_EXCLUDE_END;
+
+	if (likely(start < va_exclude_start && end < va_exclude_start))
+		return 0;
+
+	if (likely(start >= va_exclude_end && end >= va_exclude_end))
+		return 0;
+
+	return 1;
+}
+
 #define COLOUR_ALIGN(addr,pgoff)		\
 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
@@ -65,7 +106,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 
 	if (test_thread_flag(TIF_32BIT))
 		task_size = 0xf0000000UL;
-	if (len > task_size || len > -PAGE_OFFSET)
+	if (len > task_size || len >= VA_EXCLUDE_START)
 		return -ENOMEM;
 
 	do_color_align = 0;
@@ -100,9 +141,10 @@ full_search:
 
 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 		/* At this point:  (!vma || addr < vma->vm_end). */
-		if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
-			addr = PAGE_OFFSET;
-			vma = find_vma(mm, PAGE_OFFSET);
+		if (addr < VA_EXCLUDE_START &&
+		    (addr + len) >= VA_EXCLUDE_START) {
+			addr = VA_EXCLUDE_END;
+			vma = find_vma(mm, VA_EXCLUDE_END);
 		}
 		if (task_size < addr) {
 			if (start_addr != TASK_UNMAPPED_BASE) {
@@ -174,12 +216,12 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
 asmlinkage unsigned long sparc_brk(unsigned long brk)
 {
 	/* People could try to be nasty and use ta 0x6d in 32bit programs */
-	if (test_thread_flag(TIF_32BIT) &&
-	    brk >= 0xf0000000UL)
+	if (test_thread_flag(TIF_32BIT) && brk >= 0xf0000000UL)
 		return current->mm->brk;
 
-	if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
+	if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
 		return current->mm->brk;
+
 	return sys_brk(brk);
 }
                                                                 
@@ -340,13 +382,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
 	retval = -EINVAL;
 
 	if (test_thread_flag(TIF_32BIT)) {
-		if (len > 0xf0000000UL ||
-		    ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
+		if (len >= 0xf0000000UL)
+			goto out_putf;
+
+		if ((flags & MAP_FIXED) && addr > 0xf0000000UL - len)
 			goto out_putf;
 	} else {
-		if (len > -PAGE_OFFSET ||
-		    ((flags & MAP_FIXED) &&
-		     addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+		if (len >= VA_EXCLUDE_START)
+			goto out_putf;
+
+		if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
 			goto out_putf;
 	}
 
@@ -365,9 +410,9 @@ asmlinkage long sys64_munmap(unsigned long addr, size_t len)
 {
 	long ret;
 
-	if (len > -PAGE_OFFSET ||
-	    (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+	if (invalid_64bit_range(addr, len))
 		return -EINVAL;
+
 	down_write(&current->mm->mmap_sem);
 	ret = do_munmap(current->mm, addr, len);
 	up_write(&current->mm->mmap_sem);
@@ -384,18 +429,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr,
 {
 	struct vm_area_struct *vma;
 	unsigned long ret = -EINVAL;
+
 	if (test_thread_flag(TIF_32BIT))
 		goto out;
-	if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
+	if (unlikely(new_len >= VA_EXCLUDE_START))
 		goto out;
-	if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
+	if (unlikely(invalid_64bit_range(addr, old_len)))
 		goto out;
+
 	down_write(&current->mm->mmap_sem);
 	if (flags & MREMAP_FIXED) {
-		if (new_addr < PAGE_OFFSET &&
-		    new_addr + new_len > -PAGE_OFFSET)
+		if (invalid_64bit_range(new_addr, new_len))
 			goto out_sem;
-	} else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
+	} else if (invalid_64bit_range(addr, new_len)) {
 		unsigned long map_flags = 0;
 		struct file *file = NULL;
 
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h
index 02af289e3f4..faac321d559 100644
--- a/include/asm-sparc64/a.out.h
+++ b/include/asm-sparc64/a.out.h
@@ -95,7 +95,9 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */
 
 #ifdef __KERNEL__
 
-#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L)
+#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
+		   0xf0000000 : \
+		   (0x0000080000000000UL - (1UL << 32UL)))
 
 #endif
 
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 5426bb28a99..c277ac56b9d 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -107,7 +107,8 @@ typedef unsigned long pgprot_t;
 #endif
 
 #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_32BIT) ? \
-				 (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET))
+				 (_AC(0x0000000070000000,UL)) : \
+				 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
 
 #endif /* !(__ASSEMBLY__) */
 
-- 
cgit v1.2.3-70-g09d2


From a77754b4d0731321db266c6c60ffcd7c62757da5 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 6 Mar 2006 19:59:50 -0800
Subject: [SPARC64]: Bulletproof MMU context locking.

1) Always spin_lock_init() in init_context().  The caller essentially
   clears it out, or copies the mm info from the parent.  In both
   cases we need to explicitly initialize the spinlock.

2) Always do explicit IRQ disabling while taking mm->context.lock
   and ctx_alloc_lock.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/mm/init.c            | 5 +++--
 arch/sparc64/mm/tsb.c             | 1 +
 include/asm-sparc64/mmu_context.h | 6 +++---
 3 files changed, 7 insertions(+), 5 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 9bbd0bf64af..a63939347b3 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -639,9 +639,10 @@ void get_new_mmu_context(struct mm_struct *mm)
 {
 	unsigned long ctx, new_ctx;
 	unsigned long orig_pgsz_bits;
+	unsigned long flags;
 	int new_version;
 
-	spin_lock(&ctx_alloc_lock);
+	spin_lock_irqsave(&ctx_alloc_lock, flags);
 	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
 	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
 	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
@@ -677,7 +678,7 @@ void get_new_mmu_context(struct mm_struct *mm)
 out:
 	tlb_context_cache = new_ctx;
 	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
-	spin_unlock(&ctx_alloc_lock);
+	spin_unlock_irqrestore(&ctx_alloc_lock, flags);
 
 	if (unlikely(new_version))
 		smp_new_mmu_context_version();
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 534ac281989..f36799b7152 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -354,6 +354,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
 
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
+	spin_lock_init(&mm->context.lock);
 
 	mm->context.sparc64_ctx_val = 0UL;
 
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 4be40c58e3c..ca36ea96f64 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -67,14 +67,14 @@ extern void __flush_tlb_mm(unsigned long, unsigned long);
 /* Switch the current MM context.  Interrupts are disabled.  */
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
-	unsigned long ctx_valid;
+	unsigned long ctx_valid, flags;
 	int cpu;
 
-	spin_lock(&mm->context.lock);
+	spin_lock_irqsave(&mm->context.lock, flags);
 	ctx_valid = CTX_VALID(mm->context);
 	if (!ctx_valid)
 		get_new_mmu_context(mm);
-	spin_unlock(&mm->context.lock);
+	spin_unlock_irqrestore(&mm->context.lock, flags);
 
 	if (!ctx_valid || (old_mm != mm)) {
 		load_secondary_context(mm);
-- 
cgit v1.2.3-70-g09d2


From ee29074d3bd23848905f52c515974e0cd0219faa Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Mon, 6 Mar 2006 22:50:44 -0800
Subject: [SPARC64]: Fix new context version SMP handling.

Don't piggy back the SMP receive signal code to do the
context version change handling.

Instead allocate another fixed PIL number for this
asynchronous cross-call.  We can't use smp_call_function()
because this thing is invoked with interrupts disabled
and a few spinlocks held.

Also, fix smp_call_function_mask() to count "cpus" correctly.
There is no guarentee that the local cpu is in the mask
yet that is exactly what this code was assuming.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/devices.c    |  2 +-
 arch/sparc64/kernel/pci_psycho.c | 14 +++++++-------
 arch/sparc64/kernel/pci_sabre.c  | 14 +++++++-------
 arch/sparc64/kernel/pci_schizo.c | 12 ++++++------
 arch/sparc64/kernel/pci_sun4v.c  |  6 +++---
 arch/sparc64/kernel/sbus.c       | 10 +++++-----
 arch/sparc64/kernel/smp.c        | 39 ++++++++++++++++++++++++---------------
 arch/sparc64/kernel/ttable.S     |  3 ++-
 arch/sparc64/mm/ultra.S          |  5 +++++
 include/asm-sparc64/pil.h        |  4 +++-
 10 files changed, 63 insertions(+), 46 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index 1341b99ca7a..007e8922cd1 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -157,7 +157,7 @@ unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node)
 		return 0;
 	}
 
-	return sun4v_build_irq(sun4v_vdev_devhandle, irq, 4, 0);
+	return sun4v_build_irq(sun4v_vdev_devhandle, irq, 5, 0);
 }
 
 static const char *cpu_mid_prop(void)
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index f7b16e49c28..d17878b145c 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -286,17 +286,17 @@ static unsigned char psycho_pil_table[] = {
 /*0x14*/0, 0, 0, 0,	/* PCI B slot 1  Int A, B, C, D */
 /*0x18*/0, 0, 0, 0,	/* PCI B slot 2  Int A, B, C, D */
 /*0x1c*/0, 0, 0, 0,	/* PCI B slot 3  Int A, B, C, D */
-/*0x20*/4,		/* SCSI				*/
+/*0x20*/5,		/* SCSI				*/
 /*0x21*/5,		/* Ethernet			*/
 /*0x22*/8,		/* Parallel Port		*/
 /*0x23*/13,		/* Audio Record			*/
 /*0x24*/14,		/* Audio Playback		*/
 /*0x25*/15,		/* PowerFail			*/
-/*0x26*/4,		/* second SCSI			*/
+/*0x26*/5,		/* second SCSI			*/
 /*0x27*/11,		/* Floppy			*/
-/*0x28*/4,		/* Spare Hardware		*/
+/*0x28*/5,		/* Spare Hardware		*/
 /*0x29*/9,		/* Keyboard			*/
-/*0x2a*/4,		/* Mouse			*/
+/*0x2a*/5,		/* Mouse			*/
 /*0x2b*/12,		/* Serial			*/
 /*0x2c*/10,		/* Timer 0			*/
 /*0x2d*/11,		/* Timer 1			*/
@@ -313,11 +313,11 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
 
 	ret = psycho_pil_table[ino];
 	if (ret == 0 && pdev == NULL) {
-		ret = 4;
+		ret = 5;
 	} else if (ret == 0) {
 		switch ((pdev->class >> 16) & 0xff) {
 		case PCI_BASE_CLASS_STORAGE:
-			ret = 4;
+			ret = 5;
 			break;
 
 		case PCI_BASE_CLASS_NETWORK:
@@ -336,7 +336,7 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
 			break;
 
 		default:
-			ret = 4;
+			ret = 5;
 			break;
 		};
 	}
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 5ddc9293197..f67bb7f078c 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -533,17 +533,17 @@ static unsigned char sabre_pil_table[] = {
 /*0x14*/0, 0, 0, 0,	/* PCI B slot 1  Int A, B, C, D */
 /*0x18*/0, 0, 0, 0,	/* PCI B slot 2  Int A, B, C, D */
 /*0x1c*/0, 0, 0, 0,	/* PCI B slot 3  Int A, B, C, D */
-/*0x20*/4,		/* SCSI				*/
+/*0x20*/5,		/* SCSI				*/
 /*0x21*/5,		/* Ethernet			*/
 /*0x22*/8,		/* Parallel Port		*/
 /*0x23*/13,		/* Audio Record			*/
 /*0x24*/14,		/* Audio Playback		*/
 /*0x25*/15,		/* PowerFail			*/
-/*0x26*/4,		/* second SCSI			*/
+/*0x26*/5,		/* second SCSI			*/
 /*0x27*/11,		/* Floppy			*/
-/*0x28*/4,		/* Spare Hardware		*/
+/*0x28*/5,		/* Spare Hardware		*/
 /*0x29*/9,		/* Keyboard			*/
-/*0x2a*/4,		/* Mouse			*/
+/*0x2a*/5,		/* Mouse			*/
 /*0x2b*/12,		/* Serial			*/
 /*0x2c*/10,		/* Timer 0			*/
 /*0x2d*/11,		/* Timer 1			*/
@@ -565,11 +565,11 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
 
 	ret = sabre_pil_table[ino];
 	if (ret == 0 && pdev == NULL) {
-		ret = 4;
+		ret = 5;
 	} else if (ret == 0) {
 		switch ((pdev->class >> 16) & 0xff) {
 		case PCI_BASE_CLASS_STORAGE:
-			ret = 4;
+			ret = 5;
 			break;
 
 		case PCI_BASE_CLASS_NETWORK:
@@ -588,7 +588,7 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
 			break;
 
 		default:
-			ret = 4;
+			ret = 5;
 			break;
 		};
 	}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 3a89f29e27d..7fe4de03ac2 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -243,8 +243,8 @@ static unsigned char schizo_pil_table[] = {
 /*0x0c*/0, 0, 0, 0,	/* PCI slot 3  Int A, B, C, D	*/
 /*0x10*/0, 0, 0, 0,	/* PCI slot 4  Int A, B, C, D	*/
 /*0x14*/0, 0, 0, 0,	/* PCI slot 5  Int A, B, C, D	*/
-/*0x18*/4,		/* SCSI				*/
-/*0x19*/4,		/* second SCSI			*/
+/*0x18*/5,		/* SCSI				*/
+/*0x19*/5,		/* second SCSI			*/
 /*0x1a*/0,		/* UNKNOWN			*/
 /*0x1b*/0,		/* UNKNOWN			*/
 /*0x1c*/8,		/* Parallel			*/
@@ -254,7 +254,7 @@ static unsigned char schizo_pil_table[] = {
 /*0x20*/13,		/* Audio Record			*/
 /*0x21*/14,		/* Audio Playback		*/
 /*0x22*/12,		/* Serial			*/
-/*0x23*/4,		/* EBUS I2C 			*/
+/*0x23*/5,		/* EBUS I2C 			*/
 /*0x24*/10,		/* RTC Clock			*/
 /*0x25*/11,		/* Floppy			*/
 /*0x26*/0,		/* UNKNOWN			*/
@@ -296,11 +296,11 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
 
 	ret = schizo_pil_table[ino];
 	if (ret == 0 && pdev == NULL) {
-		ret = 4;
+		ret = 5;
 	} else if (ret == 0) {
 		switch ((pdev->class >> 16) & 0xff) {
 		case PCI_BASE_CLASS_STORAGE:
-			ret = 4;
+			ret = 5;
 			break;
 
 		case PCI_BASE_CLASS_NETWORK:
@@ -319,7 +319,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
 			break;
 
 		default:
-			ret = 4;
+			ret = 5;
 			break;
 		};
 	}
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index a9c44c0ae0a..9372d4f376d 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -735,11 +735,11 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
 	u32 devhandle = pbm->devhandle;
 	int pil;
 
-	pil = 4;
+	pil = 5;
 	if (pdev) {
 		switch ((pdev->class >> 16) & 0xff) {
 		case PCI_BASE_CLASS_STORAGE:
-			pil = 4;
+			pil = 5;
 			break;
 
 		case PCI_BASE_CLASS_NETWORK:
@@ -758,7 +758,7 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
 			break;
 
 		default:
-			pil = 4;
+			pil = 5;
 			break;
 		};
 	}
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index d95a1bcf163..1d6ffdeabd4 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -693,11 +693,11 @@ void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
 
 /* SBUS SYSIO INO number to Sparc PIL level. */
 static unsigned char sysio_ino_to_pil[] = {
-	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 0 */
-	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 1 */
-	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 2 */
-	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 3 */
-	4, /* Onboard SCSI */
+	0, 5, 5, 7, 5, 7, 8, 9,		/* SBUS slot 0 */
+	0, 5, 5, 7, 5, 7, 8, 9,		/* SBUS slot 1 */
+	0, 5, 5, 7, 5, 7, 8, 9,		/* SBUS slot 2 */
+	0, 5, 5, 7, 5, 7, 8, 9,		/* SBUS slot 3 */
+	5, /* Onboard SCSI */
 	5, /* Onboard Ethernet */
 /*XXX*/	8, /* Onboard BPP */
 	0, /* Bogon */
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c4548a88953..cf56128097c 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -760,12 +760,9 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
 				  int nonatomic, int wait, cpumask_t mask)
 {
 	struct call_data_struct data;
-	int cpus = cpus_weight(mask) - 1;
+	int cpus;
 	long timeout;
 
-	if (!cpus)
-		return 0;
-
 	/* Can deadlock when called with interrupts disabled */
 	WARN_ON(irqs_disabled());
 
@@ -776,6 +773,11 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
 
 	spin_lock(&call_lock);
 
+	cpu_clear(smp_processor_id(), mask);
+	cpus = cpus_weight(mask);
+	if (!cpus)
+		goto out_unlock;
+
 	call_data = &data;
 
 	smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
@@ -792,6 +794,7 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
 		udelay(1);
 	}
 
+out_unlock:
 	spin_unlock(&call_lock);
 
 	return 0;
@@ -845,6 +848,7 @@ extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_report_regs;
 extern unsigned long xcall_receive_signal;
+extern unsigned long xcall_new_mmu_context_version;
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -973,8 +977,14 @@ void smp_receive_signal(int cpu)
 }
 
 void smp_receive_signal_client(int irq, struct pt_regs *regs)
+{
+	clear_softint(1 << irq);
+}
+
+void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
 {
 	struct mm_struct *mm;
+	unsigned long flags;
 
 	clear_softint(1 << irq);
 
@@ -982,25 +992,24 @@ void smp_receive_signal_client(int irq, struct pt_regs *regs)
 	 * the version of the one we are using is now out of date.
 	 */
 	mm = current->active_mm;
-	if (likely(mm)) {
-		unsigned long flags;
+	if (unlikely(!mm || (mm == &init_mm)))
+		return;
 
-		spin_lock_irqsave(&mm->context.lock, flags);
+	spin_lock_irqsave(&mm->context.lock, flags);
 
-		if (unlikely(!CTX_VALID(mm->context)))
-			get_new_mmu_context(mm);
+	if (unlikely(!CTX_VALID(mm->context)))
+		get_new_mmu_context(mm);
 
-		load_secondary_context(mm);
-		__flush_tlb_mm(CTX_HWBITS(mm->context),
-			       SECONDARY_CONTEXT);
+	spin_unlock_irqrestore(&mm->context.lock, flags);
 
-		spin_unlock_irqrestore(&mm->context.lock, flags);
-	}
+	load_secondary_context(mm);
+	__flush_tlb_mm(CTX_HWBITS(mm->context),
+		       SECONDARY_CONTEXT);
 }
 
 void smp_new_mmu_context_version(void)
 {
-	__smp_receive_signal_mask(cpu_online_map);
+	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
 }
 
 void smp_report_regs(void)
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index d5a8dd52d1f..5d901519db5 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -51,12 +51,13 @@ tl0_resv03e:	BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
 tl0_irq1:	TRAP_IRQ(smp_call_function_client, 1)
 tl0_irq2:	TRAP_IRQ(smp_receive_signal_client, 2)
 tl0_irq3:	TRAP_IRQ(smp_penguin_jailcell, 3)
+tl0_irq4:	TRAP_IRQ(smp_new_mmu_context_version_client, 4)
 #else
 tl0_irq1:	BTRAP(0x41)
 tl0_irq2:	BTRAP(0x42)
 tl0_irq3:	BTRAP(0x43)
+tl0_irq4:	BTRAP(0x44)
 #endif
-tl0_irq4:	TRAP_IRQ(handler_irq, 4)
 tl0_irq5:	TRAP_IRQ(handler_irq, 5)  TRAP_IRQ(handler_irq, 6)
 tl0_irq7:	TRAP_IRQ(handler_irq, 7)  TRAP_IRQ(handler_irq, 8)
 tl0_irq9:	TRAP_IRQ(handler_irq, 9)  TRAP_IRQ(handler_irq, 10)
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index bd8b0b4f878..f8479fad404 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -673,6 +673,11 @@ xcall_capture:
 	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
 	retry
 
+	.globl		xcall_new_mmu_context_version
+xcall_new_mmu_context_version:
+	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
+	retry
+
 #endif /* CONFIG_SMP */
 
 
diff --git a/include/asm-sparc64/pil.h b/include/asm-sparc64/pil.h
index 8f87750c351..79f827eb3f5 100644
--- a/include/asm-sparc64/pil.h
+++ b/include/asm-sparc64/pil.h
@@ -16,11 +16,13 @@
 #define PIL_SMP_CALL_FUNC	1
 #define PIL_SMP_RECEIVE_SIGNAL	2
 #define PIL_SMP_CAPTURE		3
+#define PIL_SMP_CTX_NEW_VERSION	4
 
 #ifndef __ASSEMBLY__
 #define PIL_RESERVED(PIL)	((PIL) == PIL_SMP_CALL_FUNC || \
 				 (PIL) == PIL_SMP_RECEIVE_SIGNAL || \
-				 (PIL) == PIL_SMP_CAPTURE)
+				 (PIL) == PIL_SMP_CAPTURE || \
+				 (PIL) == PIL_SMP_CTX_NEW_VERSION)
 #endif
 
 #endif /* !(_SPARC64_PIL_H) */
-- 
cgit v1.2.3-70-g09d2


From d1112018b4bc82adf5c8a9c15a08954328f023ae Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 8 Mar 2006 02:16:07 -0800
Subject: [SPARC64]: Move over to sparsemem.

This has been pending for a long time, and the fact
that we waste a ton of ram on some configurations
kind of pushed things over the edge.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/Kconfig                |   6 ++
 arch/sparc64/kernel/sparc64_ksyms.c |   7 --
 arch/sparc64/mm/init.c              | 134 ++++++++++++++++++++++++++----------
 include/asm-sparc64/numnodes.h      |   6 ++
 include/asm-sparc64/page.h          |   9 +--
 include/asm-sparc64/pgtable.h       |   3 -
 include/asm-sparc64/sparsemem.h     |  12 ++++
 7 files changed, 123 insertions(+), 54 deletions(-)
 create mode 100644 include/asm-sparc64/numnodes.h
 create mode 100644 include/asm-sparc64/sparsemem.h

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 4c0a50a7655..a253a39c3ff 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -186,6 +186,12 @@ endchoice
 
 endmenu
 
+config ARCH_SPARSEMEM_ENABLE
+	def_bool y
+
+config ARCH_SPARSEMEM_DEFAULT
+	def_bool y
+
 source "mm/Kconfig"
 
 config GENERIC_ISA_DMA
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index e87fe7dfc7d..9914a17651b 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -95,9 +95,6 @@ extern int __ashrdi3(int, int);
 
 extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
 
-extern unsigned long phys_base;
-extern unsigned long pfn_base;
-
 extern unsigned int sys_call_table[];
 
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
@@ -346,11 +343,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__clear_user);
 
 /* Various address conversion macros use this. */
-EXPORT_SYMBOL(phys_base);
-EXPORT_SYMBOL(pfn_base);
 EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
-EXPORT_SYMBOL(page_to_pfn);
-EXPORT_SYMBOL(pfn_to_page);
 
 /* No version information on this, heavily used in inline asm,
  * and will always be 'void __ret_efault(void)'.
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index a63939347b3..5f67b53b3a5 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -130,11 +130,9 @@ static void __init read_obp_memory(const char *property,
 
 unsigned long *sparc64_valid_addr_bitmap __read_mostly;
 
-/* Ugly, but necessary... -DaveM */
-unsigned long phys_base __read_mostly;
+/* Kernel physical address base and size in bytes.  */
 unsigned long kern_base __read_mostly;
 unsigned long kern_size __read_mostly;
-unsigned long pfn_base __read_mostly;
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -368,16 +366,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 	}
 }
 
-unsigned long page_to_pfn(struct page *page)
-{
-	return (unsigned long) ((page - mem_map) + pfn_base);
-}
-
-struct page *pfn_to_page(unsigned long pfn)
-{
-	return (mem_map + (pfn - pfn_base));
-}
-
 void show_mem(void)
 {
 	printk("Mem-info:\n");
@@ -773,9 +761,78 @@ void sparc_ultra_dump_dtlb(void)
 
 extern unsigned long cmdline_memory_size;
 
-unsigned long __init bootmem_init(unsigned long *pages_avail)
+/* Find a free area for the bootmem map, avoiding the kernel image
+ * and the initial ramdisk.
+ */
+static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
+					       unsigned long end_pfn)
 {
-	unsigned long bootmap_size, start_pfn, end_pfn;
+	unsigned long avoid_start, avoid_end, bootmap_size;
+	int i;
+
+	bootmap_size = ((end_pfn - start_pfn) + 7) / 8;
+	bootmap_size = ALIGN(bootmap_size, sizeof(long));
+
+	avoid_start = avoid_end = 0;
+#ifdef CONFIG_BLK_DEV_INITRD
+	avoid_start = initrd_start;
+	avoid_end = PAGE_ALIGN(initrd_end);
+#endif
+
+#ifdef CONFIG_DEBUG_BOOTMEM
+	prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",
+		    kern_base, PAGE_ALIGN(kern_base + kern_size),
+		    avoid_start, avoid_end);
+#endif
+	for (i = 0; i < pavail_ents; i++) {
+		unsigned long start, end;
+
+		start = pavail[i].phys_addr;
+		end = start + pavail[i].reg_size;
+
+		while (start < end) {
+			if (start >= kern_base &&
+			    start < PAGE_ALIGN(kern_base + kern_size)) {
+				start = PAGE_ALIGN(kern_base + kern_size);
+				continue;
+			}
+			if (start >= avoid_start && start < avoid_end) {
+				start = avoid_end;
+				continue;
+			}
+
+			if ((end - start) < bootmap_size)
+				break;
+
+			if (start < kern_base &&
+			    (start + bootmap_size) > kern_base) {
+				start = PAGE_ALIGN(kern_base + kern_size);
+				continue;
+			}
+
+			if (start < avoid_start &&
+			    (start + bootmap_size) > avoid_start) {
+				start = avoid_end;
+				continue;
+			}
+
+			/* OK, it doesn't overlap anything, use it.  */
+#ifdef CONFIG_DEBUG_BOOTMEM
+			prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",
+				    start >> PAGE_SHIFT, start);
+#endif
+			return start >> PAGE_SHIFT;
+		}
+	}
+
+	prom_printf("Cannot find free area for bootmap, aborting.\n");
+	prom_halt();
+}
+
+static unsigned long __init bootmem_init(unsigned long *pages_avail,
+					 unsigned long phys_base)
+{
+	unsigned long bootmap_size, end_pfn;
 	unsigned long end_of_phys_memory = 0UL;
 	unsigned long bootmap_pfn, bytes_avail, size;
 	int i;
@@ -813,14 +870,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 
 	*pages_avail = bytes_avail >> PAGE_SHIFT;
 
-	/* Start with page aligned address of last symbol in kernel
-	 * image.  The kernel is hard mapped below PAGE_OFFSET in a
-	 * 4MB locked TLB translation.
-	 */
-	start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
-
-	bootmap_pfn = start_pfn;
-
 	end_pfn = end_of_phys_memory >> PAGE_SHIFT;
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -837,23 +886,23 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 		                 	 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
 			       initrd_end, end_of_phys_memory);
 			initrd_start = 0;
-		}
-		if (initrd_start) {
-			if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
-			    initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
-				bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
+			initrd_end = 0;
 		}
 	}
 #endif	
 	/* Initialize the boot-time allocator. */
 	max_pfn = max_low_pfn = end_pfn;
-	min_low_pfn = pfn_base;
+	min_low_pfn = (phys_base >> PAGE_SHIFT);
+
+	bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);
 
 #ifdef CONFIG_DEBUG_BOOTMEM
 	prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
 		    min_low_pfn, bootmap_pfn, max_low_pfn);
 #endif
-	bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
+	bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
+					 (phys_base >> PAGE_SHIFT),
+					 end_pfn);
 
 	/* Now register the available physical memory with the
 	 * allocator.
@@ -901,6 +950,20 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 	reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
 	*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
 
+	for (i = 0; i < pavail_ents; i++) {
+		unsigned long start_pfn, end_pfn;
+
+		start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
+		end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
+#ifdef CONFIG_DEBUG_BOOTMEM
+		prom_printf("memory_present(0, %lx, %lx)\n",
+			    start_pfn, end_pfn);
+#endif
+		memory_present(0, start_pfn, end_pfn);
+	}
+
+	sparse_init();
+
 	return end_pfn;
 }
 
@@ -1180,7 +1243,7 @@ static void sun4v_pgprot_init(void);
 
 void __init paging_init(void)
 {
-	unsigned long end_pfn, pages_avail, shift;
+	unsigned long end_pfn, pages_avail, shift, phys_base;
 	unsigned long real_end, i;
 
 	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
@@ -1211,8 +1274,6 @@ void __init paging_init(void)
 	for (i = 0; i < pavail_ents; i++)
 		phys_base = min(phys_base, pavail[i].phys_addr);
 
-	pfn_base = phys_base >> PAGE_SHIFT;
-
 	set_bit(0, mmu_context_bmap);
 
 	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
@@ -1248,7 +1309,9 @@ void __init paging_init(void)
 
 	/* Setup bootmem... */
 	pages_avail = 0;
-	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+	last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
+
+	max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);
 
 	kernel_physical_mapping_init();
 
@@ -1261,7 +1324,7 @@ void __init paging_init(void)
 		for (znum = 0; znum < MAX_NR_ZONES; znum++)
 			zones_size[znum] = zholes_size[znum] = 0;
 
-		npages = end_pfn - pfn_base;
+		npages = end_pfn - (phys_base >> PAGE_SHIFT);
 		zones_size[ZONE_DMA] = npages;
 		zholes_size[ZONE_DMA] = npages - pages_avail;
 
@@ -1336,7 +1399,6 @@ void __init mem_init(void)
 
 	taint_real_pages();
 
-	max_mapnr = last_valid_pfn - pfn_base;
 	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
 
 #ifdef CONFIG_DEBUG_BOOTMEM
diff --git a/include/asm-sparc64/numnodes.h b/include/asm-sparc64/numnodes.h
new file mode 100644
index 00000000000..017e7e74f5e
--- /dev/null
+++ b/include/asm-sparc64/numnodes.h
@@ -0,0 +1,6 @@
+#ifndef _SPARC64_NUMNODES_H
+#define _SPARC64_NUMNODES_H
+
+#define NODES_SHIFT	0
+
+#endif /* !(_SPARC64_NUMNODES_H) */
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index c277ac56b9d..f6b49256fe2 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -125,17 +125,10 @@ typedef unsigned long pgprot_t;
 #define __pa(x)			((unsigned long)(x) - PAGE_OFFSET)
 #define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
 
-/* PFNs are real physical page numbers.  However, mem_map only begins to record
- * per-page information starting at pfn_base.  This is to handle systems where
- * the first physical page in the machine is at some huge physical address,
- * such as 4GB.   This is common on a partitioned E10000, for example.
- */
-extern struct page *pfn_to_page(unsigned long pfn);
-extern unsigned long page_to_pfn(struct page *);
+#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
 
-#define pfn_valid(pfn)		(((pfn)-(pfn_base)) < max_mapnr)
 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
 #define virt_to_phys __pa
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 75a2cd2d7e8..d427ce64921 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -217,9 +217,6 @@ extern unsigned long pg_iobits;
 extern unsigned long _PAGE_ALL_SZ_BITS;
 extern unsigned long _PAGE_SZBITS;
 
-extern unsigned long phys_base;
-extern unsigned long pfn_base;
-
 extern struct page *mem_map_zero;
 #define ZERO_PAGE(vaddr)	(mem_map_zero)
 
diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h
new file mode 100644
index 00000000000..ed5c9d8541e
--- /dev/null
+++ b/include/asm-sparc64/sparsemem.h
@@ -0,0 +1,12 @@
+#ifndef _SPARC64_SPARSEMEM_H
+#define _SPARC64_SPARSEMEM_H
+
+#ifdef __KERNEL__
+
+#define SECTION_SIZE_BITS       26
+#define MAX_PHYSADDR_BITS       42
+#define MAX_PHYSMEM_BITS        42
+
+#endif /* !(__KERNEL__) */
+
+#endif /* !(_SPARC64_SPARSEMEM_H) */
-- 
cgit v1.2.3-70-g09d2


From 8935dced547afbf37d0fcfcac9a3556494e53104 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 8 Mar 2006 16:09:19 -0800
Subject: [SPARC64]: Add SMT scheduling support for Niagara.

The mapping is a simple "(cpuid >> 2) == core" for now.
Later we'll add more sophisticated code that will walk
the sun4v machine description and figure this out from
there.

We should also add core mappings for jaguar and panther
processors.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/Kconfig      |  9 +++++++++
 arch/sparc64/kernel/smp.c | 18 ++++++++++++++++++
 include/asm-sparc64/smp.h |  2 ++
 3 files changed, 29 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index a253a39c3ff..49b652f9b1d 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -356,6 +356,15 @@ config SOLARIS_EMUL
 
 endmenu
 
+config SCHED_SMT
+	bool "SMT (Hyperthreading) scheduler support"
+	depends on SMP
+	default y
+	help
+	  SMT scheduler support improves the CPU scheduler's decision making
+	  when dealing with UltraSPARC cpus at a cost of slightly increased
+	  overhead in some places. If unsure say N here.
+
 config CMDLINE_BOOL
 	bool "Default bootloader kernel arguments"
 
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index cf56128097c..373a701c90a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -47,6 +47,8 @@ static unsigned char boot_cpu_id;
 
 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
+	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 static cpumask_t smp_commenced_mask;
 static cpumask_t cpu_callout_map;
 
@@ -1291,6 +1293,8 @@ int setup_profiling_timer(unsigned int multiplier)
 /* Constrain the number of cpus to max_cpus.  */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
+	int i;
+
 	if (num_possible_cpus() > max_cpus) {
 		int instance, mid;
 
@@ -1305,6 +1309,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 		}
 	}
 
+	for_each_cpu(i) {
+		if (tlb_type == hypervisor) {
+			int j;
+
+			/* XXX get this mapping from machine description */
+			for_each_cpu(j) {
+				if ((j >> 2) == (i >> 2))
+					cpu_set(j, cpu_sibling_map[i]);
+			}
+		} else {
+			cpu_set(i, cpu_sibling_map[i]);
+		}
+	}
+
 	smp_store_cpu_info(boot_cpu_id);
 }
 
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index ad1d35a7d13..89d86ecaab2 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -33,6 +33,8 @@
 extern cpumask_t phys_cpu_present_map;
 #define cpu_possible_map phys_cpu_present_map
 
+extern cpumask_t cpu_sibling_map[NR_CPUS];
+
 /*
  *	General functions that each host system must provide.
  */
-- 
cgit v1.2.3-70-g09d2


From 90a6646bf6a1ca821f32d5510e935855612904df Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Wed, 8 Mar 2006 17:18:19 -0800
Subject: [SPARC64]: Fix system type in /proc/cpuinfo and remove bogus OBP
 check.

Report 'sun4v' when appropriate in /proc/cpuinfo

Remove all the verifications of the OBP version string.  Just
make sure it's there, and report it raw in the bootup logs and
via /proc/cpuinfo.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/setup.c | 13 ++++++------
 arch/sparc64/prom/init.c    | 48 ++++-----------------------------------------
 arch/sparc64/prom/misc.c    | 18 -----------------
 include/asm-sparc64/oplib.h | 23 ++--------------------
 4 files changed, 12 insertions(+), 90 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 2a2a8a6cd17..7ae4027a919 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -442,9 +442,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
 	seq_printf(m, 
 		   "cpu\t\t: %s\n"
 		   "fpu\t\t: %s\n"
-		   "promlib\t\t: Version 3 Revision %d\n"
-		   "prom\t\t: %d.%d.%d\n"
-		   "type\t\t: sun4u\n"
+		   "prom\t\t: %s\n"
+		   "type\t\t: %s\n"
 		   "ncpus probed\t: %d\n"
 		   "ncpus active\t: %d\n"
 		   "D$ parity tl1\t: %u\n"
@@ -456,10 +455,10 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
 		   ,
 		   sparc_cpu_type,
 		   sparc_fpu_type,
-		   prom_rev,
-		   prom_prev >> 16,
-		   (prom_prev >> 8) & 0xff,
-		   prom_prev & 0xff,
+		   prom_version,
+		   ((tlb_type == hypervisor) ?
+		    "sun4v" :
+		    "sun4u"),
 		   ncpus_probed,
 		   num_online_cpus(),
 		   dcache_parity_tl1_occurred,
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
index 095755e428a..1c0db842a6f 100644
--- a/arch/sparc64/prom/init.c
+++ b/arch/sparc64/prom/init.c
@@ -14,8 +14,8 @@
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 
-enum prom_major_version prom_vers;
-unsigned int prom_rev, prom_prev;
+/* OBP version string. */
+char prom_version[80];
 
 /* The root node of the prom device tree. */
 int prom_stdin, prom_stdout;
@@ -30,13 +30,7 @@ extern void prom_cif_init(void *, void *);
 
 void __init prom_init(void *cif_handler, void *cif_stack)
 {
-	char buffer[80], *p;
-	int ints[3];
 	int node;
-	int i = 0;
-	int bufadjust;
-
-	prom_vers = PROM_P1275;
 
 	prom_cif_init(cif_handler, cif_stack);
 
@@ -51,44 +45,10 @@ void __init prom_init(void *cif_handler, void *cif_stack)
 	if (!node || node == -1)
 		prom_halt();
 
-	prom_getstring(node, "version", buffer, sizeof (buffer));
+	prom_getstring(node, "version", prom_version, sizeof(prom_version));
 
 	prom_printf("\n");
 
-	if (strncmp(buffer, "OBP ", 4))
-		goto strange_version;
-
-	/*
-	 * Version field is expected to be 'OBP xx.yy.zz date...'
-	 * However, Sun can't stick to this format very well, so
-	 * we need to check for 'OBP  xx.yy.zz date...' and adjust
-	 * accordingly. -spot
-	 */
-
-	if (strncmp(buffer, "OBP  ", 5))
-		bufadjust = 4;
-	else
-		bufadjust = 5;
-
-	p = buffer + bufadjust;
-	while (p && isdigit(*p) && i < 3) {
-		ints[i++] = simple_strtoul(p, NULL, 0);
-		if ((p = strchr(p, '.')) != NULL)
-			p++;
-	}
-	if (i != 3)
-		goto strange_version;
-
-	prom_rev = ints[1];
-	prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2];
-
-	printk("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
+	printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version);
 	printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible);
-
-	/* Initialization successful. */
-	return;
-
-strange_version:
-	prom_printf ("Strange OBP version `%s'.\n", buffer);
-	prom_halt ();
 }
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 90df42141b1..577bde8b664 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -112,24 +112,6 @@ unsigned char prom_get_idprom(char *idbuf, int num_bytes)
 	return 0xff;
 }
 
-/* Get the major prom version number. */
-int prom_version(void)
-{
-	return PROM_P1275;
-}
-
-/* Get the prom plugin-revision. */
-int prom_getrev(void)
-{
-	return prom_rev;
-}
-
-/* Get the prom firmware print revision. */
-int prom_getprev(void)
-{
-	return prom_prev;
-}
-
 /* Install Linux trap table so PROM uses that instead of its own. */
 void prom_set_trap_table(unsigned long tba)
 {
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 84618f8a9e6..c754676e13e 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -12,18 +12,8 @@
 #include <linux/config.h>
 #include <asm/openprom.h>
 
-/* Enumeration to describe the prom major version we have detected. */
-enum prom_major_version {
-	PROM_V0,      /* Original sun4c V0 prom */
-	PROM_V2,      /* sun4c and early sun4m V2 prom */
-	PROM_V3,      /* sun4m and later, up to sun4d/sun4e machines V3 */
-	PROM_P1275,   /* IEEE compliant ISA based Sun PROM, only sun4u */
-        PROM_AP1000,  /* actually no prom at all */
-};
-
-extern enum prom_major_version prom_vers;
-/* Revision, and firmware revision. */
-extern unsigned int prom_rev, prom_prev;
+/* OBP version string. */
+extern char prom_version[];
 
 /* Root node of the prom device tree, this stays constant after
  * initialization is complete.
@@ -133,15 +123,6 @@ extern void prom_setcallback(callback_func_t func_ptr);
  */
 extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
 
-/* Get the prom major version. */
-extern int prom_version(void);
-
-/* Get the prom plugin revision. */
-extern int prom_getrev(void);
-
-/* Get the prom firmware revision. */
-extern int prom_getprev(void);
-
 /* Character operations to/from the console.... */
 
 /* Non-blocking get character from console. */
-- 
cgit v1.2.3-70-g09d2


From 7a1ac5264108fc3ed22d17a3cdd76212ed1666d1 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Thu, 16 Mar 2006 02:02:32 -0800
Subject: [SPARC64]: Fix and re-enable dynamic TSB sizing.

This is good for up to %50 performance improvement of some test cases.
The problem has been the race conditions, and hopefully I've plugged
them all up here.

1) There was a serious race in switch_mm() wrt. lazy TLB
   switching to and from kernel threads.

   We could erroneously skip a tsb_context_switch() and thus
   use a stale TSB across a TSB grow event.

   There is a big comment now in that function describing
   exactly how it can happen.

2) All code paths that do something with the TSB need to be
   guarded with the mm->context.lock spinlock.  This makes
   page table flushing paths properly synchronize with both
   TSB growing and TLB context changes.

3) TSB growing events are moved to the end of successful fault
   processing.  Previously it was in update_mmu_cache() but
   that is deadlock prone.  At the end of do_sparc64_fault()
   we hold no spinlocks that could deadlock the TSB grow
   sequence.  We also have dropped the address space semaphore.

While we're here, add prefetching to the copy_tsb() routine
and put it in assembler into the tsb.S file.  This piece of
code is quite time critical.

There are some small negative side effects to this code which
can be improved upon.  In particular we grab the mm->context.lock
even for the tsb insert done by update_mmu_cache() now and that's
a bit excessive.  We can get rid of that locking, and the same
lock taking in flush_tsb_user(), by disabling PSTATE_IE around
the whole operation including the capturing of the tsb pointer
and tsb_nentries value.  That would work because anyone growing
the TSB won't free up the old TSB until all cpus respond to the
TSB change cross call.

I'm not quite so confident in that optimization to put it in
right now, but eventually we might be able to and the description
is here for reference.

This code seems very solid now.  It passes several parallel GCC
bootstrap builds, and our favorite "nut cruncher" stress test which is
a full "make -j8192" build of a "make allmodconfig" kernel.  That puts
about 256 processes on each cpu's run queue, makes lots of process cpu
migrations occur, causes lots of page table and TLB flushing activity,
incurs many context version number changes, and it swaps the machine
real far out to disk even though there is 16GB of ram on this test
system. :-)

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/tsb.S         |  71 ++++++++++++++-
 arch/sparc64/mm/fault.c           |   8 +-
 arch/sparc64/mm/init.c            |   7 +-
 arch/sparc64/mm/tsb.c             | 185 +++++++++++++++++---------------------
 include/asm-sparc64/mmu_context.h |  50 ++++++++---
 5 files changed, 203 insertions(+), 118 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index d738910153f..1b154c86362 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -34,8 +34,9 @@ tsb_miss_itlb:
 	 ldxa		[%g4] ASI_IMMU, %g4
 
 	/* At this point we have:
-	 * %g4 --	missing virtual address
 	 * %g1 --	TSB entry address
+	 * %g3 --	FAULT_CODE_{D,I}TLB
+	 * %g4 --	missing virtual address
 	 * %g6 --	TAG TARGET (vaddr >> 22)
 	 */
 tsb_miss_page_table_walk:
@@ -45,6 +46,12 @@ tsb_miss_page_table_walk:
 tsb_miss_page_table_walk_sun4v_fastpath:
 	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
 
+	/* At this point we have:
+	 * %g1 --	TSB entry address
+	 * %g3 --	FAULT_CODE_{D,I}TLB
+	 * %g5 --	physical address of PTE in Linux page tables
+	 * %g6 --	TAG TARGET (vaddr >> 22)
+	 */
 tsb_reload:
 	TSB_LOCK_TAG(%g1, %g2, %g7)
 
@@ -199,6 +206,7 @@ __tsb_insert:
 	wrpr	%o5, %pstate
 	retl
 	 nop
+	.size	__tsb_insert, .-__tsb_insert
 
 	/* Flush the given TSB entry if it has the matching
 	 * tag.
@@ -208,6 +216,7 @@ __tsb_insert:
 	 */
 	.align	32
 	.globl	tsb_flush
+	.type	tsb_flush,#function
 tsb_flush:
 	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
 1:	TSB_LOAD_TAG(%o0, %g1)
@@ -225,6 +234,7 @@ tsb_flush:
 	 nop
 2:	retl
 	 TSB_MEMBAR
+	.size	tsb_flush, .-tsb_flush
 
 	/* Reload MMU related context switch state at
 	 * schedule() time.
@@ -241,6 +251,7 @@ tsb_flush:
 	 */
 	.align	32
 	.globl	__tsb_context_switch
+	.type	__tsb_context_switch,#function
 __tsb_context_switch:
 	rdpr	%pstate, %o5
 	wrpr	%o5, PSTATE_IE, %pstate
@@ -302,3 +313,61 @@ __tsb_context_switch:
 
 	retl
 	 nop
+	.size	__tsb_context_switch, .-__tsb_context_switch
+
+#define TSB_PASS_BITS	((1 << TSB_TAG_LOCK_BIT) | \
+			 (1 << TSB_TAG_INVALID_BIT))
+
+	.align	32
+	.globl	copy_tsb
+	.type	copy_tsb,#function
+copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
+			 * %o2=new_tsb_base, %o3=new_tsb_size
+			 */
+	sethi		%uhi(TSB_PASS_BITS), %g7
+	srlx		%o3, 4, %o3
+	add		%o0, %o1, %g1	/* end of old tsb */
+	sllx		%g7, 32, %g7
+	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
+
+661:	prefetcha	[%o0] ASI_N, #one_read
+	.section	.tsb_phys_patch, "ax"
+	.word		661b
+	prefetcha	[%o0] ASI_PHYS_USE_EC, #one_read
+	.previous
+
+90:	andcc		%o0, (64 - 1), %g0
+	bne		1f
+	 add		%o0, 64, %o5
+
+661:	prefetcha	[%o5] ASI_N, #one_read
+	.section	.tsb_phys_patch, "ax"
+	.word		661b
+	prefetcha	[%o5] ASI_PHYS_USE_EC, #one_read
+	.previous
+
+1:	TSB_LOAD_QUAD(%o0, %g2)		/* %g2/%g3 == TSB entry */
+	andcc		%g2, %g7, %g0	/* LOCK or INVALID set? */
+	bne,pn		%xcc, 80f	/* Skip it */
+	 sllx		%g2, 22, %o4	/* TAG --> VADDR */
+
+	/* This can definitely be computed faster... */
+	srlx		%o0, 4, %o5	/* Build index */
+	and		%o5, 511, %o5	/* Mask index */
+	sllx		%o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+	or		%o4, %o5, %o4	/* Full VADDR. */
+	srlx		%o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
+	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
+	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
+	add		%o4, 0x8, %o4	/* Advance to TTE */
+	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
+
+80:	add		%o0, 16, %o0
+	cmp		%o0, %g1
+	bne,pt		%xcc, 90b
+	 nop
+
+	retl
+	 TSB_MEMBAR
+	.size		copy_tsb, .-copy_tsb
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index b97bd054aad..63b6cc0cd5d 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -29,6 +29,7 @@
 #include <asm/lsu.h>
 #include <asm/sections.h>
 #include <asm/kdebug.h>
+#include <asm/mmu_context.h>
 
 /*
  * To debug kernel to catch accesses to certain virtual/physical addresses.
@@ -258,7 +259,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 	struct vm_area_struct *vma;
 	unsigned int insn = 0;
 	int si_code, fault_code;
-	unsigned long address;
+	unsigned long address, mm_rss;
 
 	fault_code = get_thread_fault_code();
 
@@ -407,6 +408,11 @@ good_area:
 	}
 
 	up_read(&mm->mmap_sem);
+
+	mm_rss = get_mm_rss(mm);
+	if (unlikely(mm_rss >= mm->context.tsb_rss_limit))
+		tsb_grow(mm, mm_rss);
+
 	return;
 
 	/*
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b40f6477dea..d703b67bc7b 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -279,7 +279,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 {
 	struct mm_struct *mm;
 	struct tsb *tsb;
-	unsigned long tag;
+	unsigned long tag, flags;
 
 	if (tlb_type != hypervisor) {
 		unsigned long pfn = pte_pfn(pte);
@@ -308,10 +308,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 	}
 
 	mm = vma->vm_mm;
+
+	spin_lock_irqsave(&mm->context.lock, flags);
+
 	tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
 			       (mm->context.tsb_nentries - 1UL)];
 	tag = (address >> 22UL);
 	tsb_insert(tsb, tag, pte_val(pte));
+
+	spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
 void flush_dcache_page(struct page *page)
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index f36799b7152..7fbe1e0cd10 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -48,11 +48,15 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
 void flush_tsb_user(struct mmu_gather *mp)
 {
 	struct mm_struct *mm = mp->mm;
-	struct tsb *tsb = mm->context.tsb;
-	unsigned long nentries = mm->context.tsb_nentries;
-	unsigned long base;
+	unsigned long nentries, base, flags;
+	struct tsb *tsb;
 	int i;
 
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	tsb = mm->context.tsb;
+	nentries = mm->context.tsb_nentries;
+
 	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 		base = __pa(tsb);
 	else
@@ -70,6 +74,8 @@ void flush_tsb_user(struct mmu_gather *mp)
 
 		tsb_flush(ent, tag);
 	}
+
+	spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
@@ -201,86 +207,9 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 	}
 }
 
-/* The page tables are locked against modifications while this
- * runs.
- *
- * XXX do some prefetching...
- */
-static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
-		     struct tsb *new_tsb, unsigned long new_size)
-{
-	unsigned long old_nentries = old_size / sizeof(struct tsb);
-	unsigned long new_nentries = new_size / sizeof(struct tsb);
-	unsigned long i;
-
-	for (i = 0; i < old_nentries; i++) {
-		register unsigned long tag asm("o4");
-		register unsigned long pte asm("o5");
-		unsigned long v, hash;
-
-		if (tlb_type == hypervisor) {
-			__asm__ __volatile__(
-				"ldda [%2] %3, %0"
-				: "=r" (tag), "=r" (pte)
-				: "r" (__pa(&old_tsb[i])),
-				  "i" (ASI_QUAD_LDD_PHYS_4V));
-		} else if (tlb_type == cheetah_plus) {
-			__asm__ __volatile__(
-				"ldda [%2] %3, %0"
-				: "=r" (tag), "=r" (pte)
-				: "r" (__pa(&old_tsb[i])),
-				  "i" (ASI_QUAD_LDD_PHYS));
-		} else {
-			__asm__ __volatile__(
-				"ldda [%2] %3, %0"
-				: "=r" (tag), "=r" (pte)
-				: "r" (&old_tsb[i]),
-				  "i" (ASI_NUCLEUS_QUAD_LDD));
-		}
-
-		if (tag & ((1UL << TSB_TAG_LOCK_BIT) |
-			   (1UL << TSB_TAG_INVALID_BIT)))
-			continue;
-
-		/* We only put base page size PTEs into the TSB,
-		 * but that might change in the future.  This code
-		 * would need to be changed if we start putting larger
-		 * page size PTEs into there.
-		 */
-		WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
-
-		/* The tag holds bits 22 to 63 of the virtual address
-		 * and the context.  Clear out the context, and shift
-		 * up to make a virtual address.
-		 */
-		v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
-
-		/* The implied bits of the tag (bits 13 to 21) are
-		 * determined by the TSB entry index, so fill that in.
-		 */
-		v |= (i & (512UL - 1UL)) << 13UL;
-
-		hash = tsb_hash(v, new_nentries);
-		if (tlb_type == cheetah_plus ||
-		    tlb_type == hypervisor) {
-			__asm__ __volatile__(
-				"stxa	%0, [%1] %2\n\t"
-				"stxa	%3, [%4] %2"
-				: /* no outputs */
-				: "r" (tag),
-				  "r" (__pa(&new_tsb[hash].tag)),
-				  "i" (ASI_PHYS_USE_EC),
-				  "r" (pte),
-				  "r" (__pa(&new_tsb[hash].pte)));
-		} else {
-			new_tsb[hash].tag = tag;
-			new_tsb[hash].pte = pte;
-		}
-	}
-}
-
 /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
- * update_mmu_cache() invokes this routine to try and grow the TSB.
+ * do_sparc64_fault() invokes this routine to try and grow the TSB.
+ *
  * When we reach the maximum TSB size supported, we stick ~0UL into
  * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
  * will not trigger any longer.
@@ -293,12 +222,12 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
  * the number of entries that the current TSB can hold at once.  Currently,
  * we trigger when the RSS hits 3/4 of the TSB capacity.
  */
-void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
+void tsb_grow(struct mm_struct *mm, unsigned long rss)
 {
 	unsigned long max_tsb_size = 1 * 1024 * 1024;
-	unsigned long size, old_size;
+	unsigned long size, old_size, flags;
 	struct page *page;
-	struct tsb *old_tsb;
+	struct tsb *old_tsb, *new_tsb;
 
 	if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
 		max_tsb_size = (PAGE_SIZE << MAX_ORDER);
@@ -311,12 +240,51 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
 			break;
 	}
 
-	page = alloc_pages(gfp_flags, get_order(size));
+	page = alloc_pages(GFP_KERNEL, get_order(size));
 	if (unlikely(!page))
 		return;
 
 	/* Mark all tags as invalid.  */
-	memset(page_address(page), 0x40, size);
+	new_tsb = page_address(page);
+	memset(new_tsb, 0x40, size);
+
+	/* Ok, we are about to commit the changes.  If we are
+	 * growing an existing TSB the locking is very tricky,
+	 * so WATCH OUT!
+	 *
+	 * We have to hold mm->context.lock while committing to the
+	 * new TSB, this synchronizes us with processors in
+	 * flush_tsb_user() and switch_mm() for this address space.
+	 *
+	 * But even with that lock held, processors run asynchronously
+	 * accessing the old TSB via TLB miss handling.  This is OK
+	 * because those actions are just propagating state from the
+	 * Linux page tables into the TSB, page table mappings are not
+	 * being changed.  If a real fault occurs, the processor will
+	 * synchronize with us when it hits flush_tsb_user(), this is
+	 * also true for the case where vmscan is modifying the page
+	 * tables.  The only thing we need to be careful with is to
+	 * skip any locked TSB entries during copy_tsb().
+	 *
+	 * When we finish committing to the new TSB, we have to drop
+	 * the lock and ask all other cpus running this address space
+	 * to run tsb_context_switch() to see the new TSB table.
+	 */
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	old_tsb = mm->context.tsb;
+	old_size = mm->context.tsb_nentries * sizeof(struct tsb);
+
+	/* Handle multiple threads trying to grow the TSB at the same time.
+	 * One will get in here first, and bump the size and the RSS limit.
+	 * The others will get in here next and hit this check.
+	 */
+	if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
+		spin_unlock_irqrestore(&mm->context.lock, flags);
+
+		free_pages((unsigned long) new_tsb, get_order(size));
+		return;
+	}
 
 	if (size == max_tsb_size)
 		mm->context.tsb_rss_limit = ~0UL;
@@ -324,30 +292,37 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
 		mm->context.tsb_rss_limit =
 			((size / sizeof(struct tsb)) * 3) / 4;
 
-	old_tsb = mm->context.tsb;
-	old_size = mm->context.tsb_nentries * sizeof(struct tsb);
-
-	if (old_tsb)
-		copy_tsb(old_tsb, old_size, page_address(page), size);
+	if (old_tsb) {
+		extern void copy_tsb(unsigned long old_tsb_base,
+				     unsigned long old_tsb_size,
+				     unsigned long new_tsb_base,
+				     unsigned long new_tsb_size);
+		unsigned long old_tsb_base = (unsigned long) old_tsb;
+		unsigned long new_tsb_base = (unsigned long) new_tsb;
+
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
+			old_tsb_base = __pa(old_tsb_base);
+			new_tsb_base = __pa(new_tsb_base);
+		}
+		copy_tsb(old_tsb_base, old_size, new_tsb_base, size);
+	}
 
-	mm->context.tsb = page_address(page);
+	mm->context.tsb = new_tsb;
 	setup_tsb_params(mm, size);
 
+	spin_unlock_irqrestore(&mm->context.lock, flags);
+
 	/* If old_tsb is NULL, we're being invoked for the first time
 	 * from init_new_context().
 	 */
 	if (old_tsb) {
-		/* Now force all other processors to reload the new
-		 * TSB state.
-		 */
-		smp_tsb_sync(mm);
-
-		/* Finally reload it on the local cpu.  No further
-		 * references will remain to the old TSB and we can
-		 * thus free it up.
-		 */
+		/* Reload it on the local cpu.  */
 		tsb_context_switch(mm);
 
+		/* Now force other processors to do the same.  */
+		smp_tsb_sync(mm);
+
+		/* Now it is safe to free the old tsb.  */
 		free_pages((unsigned long) old_tsb, get_order(old_size));
 	}
 }
@@ -363,7 +338,11 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 	 * will be confused and think there is an older TSB to free up.
 	 */
 	mm->context.tsb = NULL;
-	tsb_grow(mm, 0, GFP_KERNEL);
+
+	/* If this is fork, inherit the parent's TSB size.  We would
+	 * grow it to that size on the first page fault anyways.
+	 */
+	tsb_grow(mm, get_mm_rss(mm));
 
 	if (unlikely(!mm->context.tsb))
 		return -ENOMEM;
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index ca36ea96f64..e7974321d05 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -42,7 +42,7 @@ static inline void tsb_context_switch(struct mm_struct *mm)
 			     __pa(&mm->context.tsb_descr));
 }
 
-extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
+extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss);
 #ifdef CONFIG_SMP
 extern void smp_tsb_sync(struct mm_struct *mm);
 #else
@@ -74,18 +74,43 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 	ctx_valid = CTX_VALID(mm->context);
 	if (!ctx_valid)
 		get_new_mmu_context(mm);
-	spin_unlock_irqrestore(&mm->context.lock, flags);
 
-	if (!ctx_valid || (old_mm != mm)) {
-		load_secondary_context(mm);
-		tsb_context_switch(mm);
-	}
+	/* We have to be extremely careful here or else we will miss
+	 * a TSB grow if we switch back and forth between a kernel
+	 * thread and an address space which has it's TSB size increased
+	 * on another processor.
+	 *
+	 * It is possible to play some games in order to optimize the
+	 * switch, but the safest thing to do is to unconditionally
+	 * perform the secondary context load and the TSB context switch.
+	 *
+	 * For reference the bad case is, for address space "A":
+	 *
+	 *		CPU 0			CPU 1
+	 *	run address space A
+	 *	set cpu0's bits in cpu_vm_mask
+	 *	switch to kernel thread, borrow
+	 *	address space A via entry_lazy_tlb
+	 *					run address space A
+	 *					set cpu1's bit in cpu_vm_mask
+	 *					flush_tlb_pending()
+	 *					reset cpu_vm_mask to just cpu1
+	 *					TSB grow
+	 *	run address space A
+	 *	context was valid, so skip
+	 *	TSB context switch
+	 *
+	 * At that point cpu0 continues to use a stale TSB, the one from
+	 * before the TSB grow performed on cpu1.  cpu1 did not cross-call
+	 * cpu0 to update it's TSB because at that point the cpu_vm_mask
+	 * only had cpu1 set in it.
+	 */
+	load_secondary_context(mm);
+	tsb_context_switch(mm);
 
-	/* Even if (mm == old_mm) we _must_ check
-	 * the cpu_vm_mask.  If we do not we could
-	 * corrupt the TLB state because of how
-	 * smp_flush_tlb_{page,range,mm} on sparc64
-	 * and lazy tlb switches work. -DaveM
+	/* Any time a processor runs a context on an address space
+	 * for the first time, we must flush that context out of the
+	 * local TLB.
 	 */
 	cpu = smp_processor_id();
 	if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
@@ -93,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 		__flush_tlb_mm(CTX_HWBITS(mm->context),
 			       SECONDARY_CONTEXT);
 	}
+	spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
@@ -109,11 +135,11 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
 	cpu = smp_processor_id();
 	if (!cpu_isset(cpu, mm->cpu_vm_mask))
 		cpu_set(cpu, mm->cpu_vm_mask);
-	spin_unlock_irqrestore(&mm->context.lock, flags);
 
 	load_secondary_context(mm);
 	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
 	tsb_context_switch(mm);
+	spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
 #endif /* !(__ASSEMBLY__) */
-- 
cgit v1.2.3-70-g09d2


From a91690ddd05ab0b7fbdd37733875525ac75c20f2 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Mar 2006 14:41:03 -0800
Subject: [SPARC64]: Top-down address space allocation for 32-bit tasks.

Currently allocations are very constrained for 32-bit processes.
It grows down-up from 0x70000000 to 0xf0000000 which gives about
2GB of stack + dynamic mmap() space.

So support the top-down method, and we need to override the
generic helper function in order to deal with D-cache coloring.

With these changes I was able to squeeze out a mmap() just over
3.6GB in size in a 32-bit process.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/binfmt_aout32.c |   2 +
 arch/sparc64/kernel/sys_sparc.c     | 190 ++++++++++++++++++++++++++++++++++--
 include/asm-sparc64/pgtable.h       |   5 +-
 include/asm-sparc64/processor.h     |   2 +
 4 files changed, 189 insertions(+), 10 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index cb9ecd0172c..d7caa60a007 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -239,6 +239,8 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
 		(current->mm->start_data = N_DATADDR(ex));
 	current->mm->brk = ex.a_bss +
 		(current->mm->start_brk = N_BSSADDR(ex));
+	current->mm->free_area_cache = current->mm->mmap_base;
+	current->mm->cached_hole_size = 0;
 
 	current->mm->mmap = NULL;
 	compute_creds(bprm);
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 8840415408b..61dffb9349b 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -82,9 +82,34 @@ static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end
 	return 1;
 }
 
-#define COLOUR_ALIGN(addr,pgoff)		\
-	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
-	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+/* These functions differ from the default implementations in
+ * mm/mmap.c in two ways:
+ *
+ * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
+ *    for fixed such mappings we just validate what the user gave us.
+ * 2) For 64-bit tasks we avoid mapping anything within 4GB of
+ *    the spitfire/niagara VA-hole.
+ */
+
+static inline unsigned long COLOUR_ALIGN(unsigned long addr,
+					 unsigned long pgoff)
+{
+	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
+	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
+
+	return base + off;
+}
+
+static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
+					      unsigned long pgoff)
+{
+	unsigned long base = addr & ~(SHMLBA-1);
+	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
+
+	if (base + off <= addr)
+		return base + off;
+	return base - off;
+}
 
 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
@@ -106,7 +131,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 
 	if (test_thread_flag(TIF_32BIT))
 		task_size = 0xf0000000UL;
-	if (len > task_size || len >= VA_EXCLUDE_START)
+	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
 		return -ENOMEM;
 
 	do_color_align = 0;
@@ -125,11 +150,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 			return addr;
 	}
 
-	if (len <= mm->cached_hole_size) {
+	if (len > mm->cached_hole_size) {
+	        start_addr = addr = mm->free_area_cache;
+	} else {
+	        start_addr = addr = TASK_UNMAPPED_BASE;
 	        mm->cached_hole_size = 0;
-		mm->free_area_cache = TASK_UNMAPPED_BASE;
 	}
-	start_addr = addr = mm->free_area_cache;
 
 	task_size -= len;
 
@@ -146,7 +172,7 @@ full_search:
 			addr = VA_EXCLUDE_END;
 			vma = find_vma(mm, VA_EXCLUDE_END);
 		}
-		if (task_size < addr) {
+		if (unlikely(task_size < addr)) {
 			if (start_addr != TASK_UNMAPPED_BASE) {
 				start_addr = addr = TASK_UNMAPPED_BASE;
 				mm->cached_hole_size = 0;
@@ -154,7 +180,7 @@ full_search:
 			}
 			return -ENOMEM;
 		}
-		if (!vma || addr + len <= vma->vm_start) {
+		if (likely(!vma || addr + len <= vma->vm_start)) {
 			/*
 			 * Remember the place where we stopped the search:
 			 */
@@ -170,6 +196,121 @@ full_search:
 	}
 }
 
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+			  const unsigned long len, const unsigned long pgoff,
+			  const unsigned long flags)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	unsigned long task_size = 0xf0000000UL;
+	unsigned long addr = addr0;
+	int do_color_align;
+
+	/* This should only ever run for 32-bit processes.  */
+	BUG_ON(!test_thread_flag(TIF_32BIT));
+
+	if (flags & MAP_FIXED) {
+		/* We do not accept a shared mapping if it would violate
+		 * cache aliasing constraints.
+		 */
+		if ((flags & MAP_SHARED) &&
+		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+			return -EINVAL;
+		return addr;
+	}
+
+	if (unlikely(len > task_size))
+		return -ENOMEM;
+
+	do_color_align = 0;
+	if (filp || (flags & MAP_SHARED))
+		do_color_align = 1;
+
+	/* requesting a specific address */
+	if (addr) {
+		if (do_color_align)
+			addr = COLOUR_ALIGN(addr, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+
+		vma = find_vma(mm, addr);
+		if (task_size - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	/* check if free_area_cache is useful for us */
+	if (len <= mm->cached_hole_size) {
+ 	        mm->cached_hole_size = 0;
+ 		mm->free_area_cache = mm->mmap_base;
+ 	}
+
+	/* either no address requested or can't fit in requested address hole */
+	addr = mm->free_area_cache;
+	if (do_color_align) {
+		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
+
+		addr = base + len;
+	}
+
+	/* make sure it can fit in the remaining address space */
+	if (likely(addr > len)) {
+		vma = find_vma(mm, addr-len);
+		if (!vma || addr <= vma->vm_start) {
+			/* remember the address as a hint for next time */
+			return (mm->free_area_cache = addr-len);
+		}
+	}
+
+	if (unlikely(mm->mmap_base < len))
+		goto bottomup;
+
+	addr = mm->mmap_base-len;
+	if (do_color_align)
+		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+
+	do {
+		/*
+		 * Lookup failure means no vma is above this address,
+		 * else if new region fits below vma->vm_start,
+		 * return with success:
+		 */
+		vma = find_vma(mm, addr);
+		if (likely(!vma || addr+len <= vma->vm_start)) {
+			/* remember the address as a hint for next time */
+			return (mm->free_area_cache = addr);
+		}
+
+ 		/* remember the largest hole we saw so far */
+ 		if (addr + mm->cached_hole_size < vma->vm_start)
+ 		        mm->cached_hole_size = vma->vm_start - addr;
+
+		/* try just below the current vma->vm_start */
+		addr = vma->vm_start-len;
+		if (do_color_align)
+			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+	} while (likely(len < vma->vm_start));
+
+bottomup:
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	mm->cached_hole_size = ~0UL;
+  	mm->free_area_cache = TASK_UNMAPPED_BASE;
+	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+	/*
+	 * Restore the topdown base:
+	 */
+	mm->free_area_cache = mm->mmap_base;
+	mm->cached_hole_size = ~0UL;
+
+	return addr;
+}
+
 /* Try to align mapping such that we align it as much as possible. */
 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
@@ -213,6 +354,37 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
 	return addr;
 }
 
+/* Essentially the same as PowerPC... */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+	/*
+	 * Fall back to the standard layout if the personality
+	 * bit is set, or if the expected stack growth is unlimited:
+	 */
+	if (!test_thread_flag(TIF_32BIT) ||
+	    (current->personality & ADDR_COMPAT_LAYOUT) ||
+	    current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
+	    sysctl_legacy_va_layout) {
+		mm->mmap_base = TASK_UNMAPPED_BASE;
+		mm->get_unmapped_area = arch_get_unmapped_area;
+		mm->unmap_area = arch_unmap_area;
+	} else {
+		/* We know it's 32-bit */
+		unsigned long task_size = 0xf0000000UL;
+		unsigned long gap;
+
+		gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+		if (gap < 128 * 1024 * 1024)
+			gap = 128 * 1024 * 1024;
+		if (gap > (task_size / 6 * 5))
+			gap = (task_size / 6 * 5);
+
+		mm->mmap_base = task_size - (gap & PAGE_MASK);
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+		mm->unmap_area = arch_unmap_area_topdown;
+	}
+}
+
 asmlinkage unsigned long sparc_brk(unsigned long brk)
 {
 	/* People could try to be nasty and use ta 0x6d in 32bit programs */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index d427ce64921..ed4124edf83 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -752,8 +752,11 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 
 #include <asm-generic/pgtable.h>
 
-/* We provide our own get_unmapped_area to cope with VA holes for userland */
+/* We provide our own get_unmapped_area to cope with VA holes and
+ * SHM area cache aliasing for userland.
+ */
 #define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
  * the largest alignment possible such that larget PTEs can be used.
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 685479fb436..c6896b88283 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -217,6 +217,8 @@ static inline void prefetchw(const void *x)
 
 #define spin_lock_prefetch(x)	prefetchw(x)
 
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */
-- 
cgit v1.2.3-70-g09d2


From d61e16df940e02e25679bdc1aee8c25786f6de90 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Fri, 17 Mar 2006 17:33:56 -0800
Subject: [SPARC64]: Increase top of 32-bit process stack.

Put it one page below the top of the 32-bit address space.
This gives us ~16MB more address space to work with.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/binfmt_elf32.c |  4 +++-
 arch/sparc64/kernel/sys_sparc.c    | 13 +++++++------
 arch/sparc64/kernel/sys_sparc32.c  |  9 +++++----
 arch/sparc64/solaris/misc.c        |  4 ++--
 include/asm-sparc64/a.out.h        |  6 ++++--
 5 files changed, 21 insertions(+), 15 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
index a1a12d2aa35..8a2abcce273 100644
--- a/arch/sparc64/kernel/binfmt_elf32.c
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -153,7 +153,9 @@ MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
 #undef MODULE_DESCRIPTION
 #undef MODULE_AUTHOR
 
+#include <asm/a.out.h>
+
 #undef TASK_SIZE
-#define TASK_SIZE 0xf0000000
+#define TASK_SIZE STACK_TOP32
 
 #include "../../../fs/binfmt_elf.c"
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 61dffb9349b..9019b41fc02 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -30,6 +30,7 @@
 #include <asm/ipc.h>
 #include <asm/utrap.h>
 #include <asm/perfctr.h>
+#include <asm/a.out.h>
 
 /* #define DEBUG_UNIMP_SYSCALL */
 
@@ -130,7 +131,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 	}
 
 	if (test_thread_flag(TIF_32BIT))
-		task_size = 0xf0000000UL;
+		task_size = STACK_TOP32;
 	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
 		return -ENOMEM;
 
@@ -203,7 +204,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 {
 	struct vm_area_struct *vma;
 	struct mm_struct *mm = current->mm;
-	unsigned long task_size = 0xf0000000UL;
+	unsigned long task_size = STACK_TOP32;
 	unsigned long addr = addr0;
 	int do_color_align;
 
@@ -370,7 +371,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 		mm->unmap_area = arch_unmap_area;
 	} else {
 		/* We know it's 32-bit */
-		unsigned long task_size = 0xf0000000UL;
+		unsigned long task_size = STACK_TOP32;
 		unsigned long gap;
 
 		gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
@@ -388,7 +389,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 asmlinkage unsigned long sparc_brk(unsigned long brk)
 {
 	/* People could try to be nasty and use ta 0x6d in 32bit programs */
-	if (test_thread_flag(TIF_32BIT) && brk >= 0xf0000000UL)
+	if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
 		return current->mm->brk;
 
 	if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
@@ -554,10 +555,10 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
 	retval = -EINVAL;
 
 	if (test_thread_flag(TIF_32BIT)) {
-		if (len >= 0xf0000000UL)
+		if (len >= STACK_TOP32)
 			goto out_putf;
 
-		if ((flags & MAP_FIXED) && addr > 0xf0000000UL - len)
+		if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
 			goto out_putf;
 	} else {
 		if (len >= VA_EXCLUDE_START)
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 417727bd87b..0e41df02448 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -62,6 +62,7 @@
 #include <asm/fpumacro.h>
 #include <asm/semaphore.h>
 #include <asm/mmu_context.h>
+#include <asm/a.out.h>
 
 asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
 {
@@ -1039,15 +1040,15 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr,
 	unsigned long ret = -EINVAL;
 	unsigned long new_addr = __new_addr;
 
-	if (old_len > 0xf0000000UL || new_len > 0xf0000000UL)
+	if (old_len > STACK_TOP32 || new_len > STACK_TOP32)
 		goto out;
-	if (addr > 0xf0000000UL - old_len)
+	if (addr > STACK_TOP32 - old_len)
 		goto out;
 	down_write(&current->mm->mmap_sem);
 	if (flags & MREMAP_FIXED) {
-		if (new_addr > 0xf0000000UL - new_len)
+		if (new_addr > STACK_TOP32 - new_len)
 			goto out_sem;
-	} else if (addr > 0xf0000000UL - new_len) {
+	} else if (addr > STACK_TOP32 - new_len) {
 		unsigned long map_flags = 0;
 		struct file *file = NULL;
 
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index 3ab4677395f..5284996780a 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -90,7 +90,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o
 	len = PAGE_ALIGN(len);
 	if(!(flags & MAP_FIXED))
 		addr = 0;
-	else if (len > 0xf0000000UL || addr > 0xf0000000UL - len)
+	else if (len > STACK_TOP32 || addr > STACK_TOP32 - len)
 		goto out_putf;
 	ret_type = flags & _MAP_NEW;
 	flags &= ~_MAP_NEW;
@@ -102,7 +102,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o
 			 (unsigned long) prot, (unsigned long) flags, off);
 	up_write(&current->mm->mmap_sem);
 	if(!ret_type)
-		retval = ((retval < 0xf0000000) ? 0 : retval);
+		retval = ((retval < STACK_TOP32) ? 0 : retval);
 	                        
 out_putf:
 	if (file)
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h
index faac321d559..35cb5c9e0c9 100644
--- a/include/asm-sparc64/a.out.h
+++ b/include/asm-sparc64/a.out.h
@@ -95,9 +95,11 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */
 
 #ifdef __KERNEL__
 
+#define STACK_TOP32	((1UL << 32UL) - PAGE_SIZE)
+#define STACK_TOP64	(0x0000080000000000UL - (1UL << 32UL))
+
 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
-		   0xf0000000 : \
-		   (0x0000080000000000UL - (1UL << 32UL)))
+		   STACK_TOP32 : STACK_TOP64)
 
 #endif
 
-- 
cgit v1.2.3-70-g09d2


From bb8646d8340fa7c1b66a037428e39f85f8738f0a Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sat, 18 Mar 2006 23:55:11 -0800
Subject: [SPARC64]: Optimized TSB table initialization.

We only need to write an invalid tag every 16 bytes,
so taking advantage of this can save many instructions
compared to the simple memset() call we make now.

A prefetching implementation is implemented for sun4u
and a block-init store version if implemented for Niagara.

The next trick is to be able to perform an init and
a copy_tsb() in parallel when growing a TSB table.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/tsb.S  | 69 ++++++++++++++++++++++++++++++++++++++++++++++
 arch/sparc64/lib/NGbzero.S |  1 +
 arch/sparc64/mm/tsb.c      |  2 +-
 include/asm-sparc64/mmu.h  |  1 +
 4 files changed, 72 insertions(+), 1 deletion(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 1b154c86362..118baea44f6 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -371,3 +371,72 @@ copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
 	retl
 	 TSB_MEMBAR
 	.size		copy_tsb, .-copy_tsb
+
+	/* Set the invalid bit in all TSB entries.  */
+	.align		32
+	.globl		tsb_init
+	.type		tsb_init,#function
+tsb_init:		/* %o0 = TSB vaddr, %o1 = size in bytes */
+	prefetch	[%o0 + 0x000], #n_writes
+	mov		1, %g1
+	prefetch	[%o0 + 0x040], #n_writes
+	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
+	prefetch	[%o0 + 0x080], #n_writes
+1:	prefetch	[%o0 + 0x0c0], #n_writes
+	stx		%g1, [%o0 + 0x00]
+	stx		%g1, [%o0 + 0x10]
+	stx		%g1, [%o0 + 0x20]
+	stx		%g1, [%o0 + 0x30]
+	prefetch	[%o0 + 0x100], #n_writes
+	stx		%g1, [%o0 + 0x40]
+	stx		%g1, [%o0 + 0x50]
+	stx		%g1, [%o0 + 0x60]
+	stx		%g1, [%o0 + 0x70]
+	prefetch	[%o0 + 0x140], #n_writes
+	stx		%g1, [%o0 + 0x80]
+	stx		%g1, [%o0 + 0x90]
+	stx		%g1, [%o0 + 0xa0]
+	stx		%g1, [%o0 + 0xb0]
+	prefetch	[%o0 + 0x180], #n_writes
+	stx		%g1, [%o0 + 0xc0]
+	stx		%g1, [%o0 + 0xd0]
+	stx		%g1, [%o0 + 0xe0]
+	stx		%g1, [%o0 + 0xf0]
+	subcc		%o1, 0x100, %o1
+	bne,pt		%xcc, 1b
+	 add		%o0, 0x100, %o0
+	retl
+	 nop
+	nop
+	nop
+	.size		tsb_init, .-tsb_init
+
+	.globl		NGtsb_init
+	.type		NGtsb_init,#function
+NGtsb_init:
+	rd		%asi, %g2
+	mov		1, %g1
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
+1:	stxa		%g1, [%o0 + 0x00] %asi
+	stxa		%g1, [%o0 + 0x10] %asi
+	stxa		%g1, [%o0 + 0x20] %asi
+	stxa		%g1, [%o0 + 0x30] %asi
+	stxa		%g1, [%o0 + 0x40] %asi
+	stxa		%g1, [%o0 + 0x50] %asi
+	stxa		%g1, [%o0 + 0x60] %asi
+	stxa		%g1, [%o0 + 0x70] %asi
+	stxa		%g1, [%o0 + 0x80] %asi
+	stxa		%g1, [%o0 + 0x90] %asi
+	stxa		%g1, [%o0 + 0xa0] %asi
+	stxa		%g1, [%o0 + 0xb0] %asi
+	stxa		%g1, [%o0 + 0xc0] %asi
+	stxa		%g1, [%o0 + 0xd0] %asi
+	stxa		%g1, [%o0 + 0xe0] %asi
+	stxa		%g1, [%o0 + 0xf0] %asi
+	subcc		%o1, 0x100, %o1
+	bne,pt		%xcc, 1b
+	 add		%o0, 0x100, %o0
+	retl
+	 wr		%g2, 0x0, %asi
+	.size		NGtsb_init, .-NGtsb_init
diff --git a/arch/sparc64/lib/NGbzero.S b/arch/sparc64/lib/NGbzero.S
index fef584f745d..e86baece5cc 100644
--- a/arch/sparc64/lib/NGbzero.S
+++ b/arch/sparc64/lib/NGbzero.S
@@ -157,6 +157,7 @@ niagara_patch_bzero:
 	NG_DO_PATCH(memset, NGmemset)
 	NG_DO_PATCH(__bzero, NGbzero)
 	NG_DO_PATCH(__clear_user, NGclear_user)
+	NG_DO_PATCH(tsb_init, NGtsb_init)
 	retl
 	 nop
 	.size	niagara_patch_bzero,.-niagara_patch_bzero
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 1af797a0a09..b2064e2a44d 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -313,7 +313,7 @@ retry_tsb_alloc:
 	}
 
 	/* Mark all tags as invalid.  */
-	memset(new_tsb, 0x40, new_size);
+	tsb_init(new_tsb, new_size);
 
 	/* Ok, we are about to commit the changes.  If we are
 	 * growing an existing TSB the locking is very tricky,
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index da14a9bf0ed..230ba678d3b 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -88,6 +88,7 @@ struct tsb {
 
 extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
 extern void tsb_flush(unsigned long ent, unsigned long tag);
+extern void tsb_init(struct tsb *tsb, unsigned long size);
 
 typedef struct {
 	spinlock_t		lock;
-- 
cgit v1.2.3-70-g09d2


From f6b83f070e9b7ad9075f7cc5646260e56c7d0219 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Mon, 20 Mar 2006 01:17:17 -0800
Subject: [SPARC64]: Fix 2 bugs in huge page support.

1) huge_pte_offset() did not check the page table hierarchy
   elements as being empty correctly, resulting in an OOPS

2) Need platform specific hugetlb_get_unmapped_area() to handle
   the top-down vs. bottom-up address space allocation strategies.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/mm/hugetlbpage.c | 179 +++++++++++++++++++++++++++++++++++++++++-
 include/asm-sparc64/page.h    |   1 +
 2 files changed, 176 insertions(+), 4 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 625cbb336a2..a7a24869d04 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -1,7 +1,7 @@
 /*
  * SPARC64 Huge TLB page support.
  *
- * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/config.h>
@@ -22,6 +22,175 @@
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
 
+/* Slightly simplified from the non-hugepage variant because by
+ * definition we don't have to worry about any page coloring stuff
+ */
+#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
+
+static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+							unsigned long addr,
+							unsigned long len,
+							unsigned long pgoff,
+							unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct * vma;
+	unsigned long task_size = TASK_SIZE;
+	unsigned long start_addr;
+
+	if (test_thread_flag(TIF_32BIT))
+		task_size = STACK_TOP32;
+	if (unlikely(len >= VA_EXCLUDE_START))
+		return -ENOMEM;
+
+	if (len > mm->cached_hole_size) {
+	        start_addr = addr = mm->free_area_cache;
+	} else {
+	        start_addr = addr = TASK_UNMAPPED_BASE;
+	        mm->cached_hole_size = 0;
+	}
+
+	task_size -= len;
+
+full_search:
+	addr = ALIGN(addr, HPAGE_SIZE);
+
+	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+		/* At this point:  (!vma || addr < vma->vm_end). */
+		if (addr < VA_EXCLUDE_START &&
+		    (addr + len) >= VA_EXCLUDE_START) {
+			addr = VA_EXCLUDE_END;
+			vma = find_vma(mm, VA_EXCLUDE_END);
+		}
+		if (unlikely(task_size < addr)) {
+			if (start_addr != TASK_UNMAPPED_BASE) {
+				start_addr = addr = TASK_UNMAPPED_BASE;
+				mm->cached_hole_size = 0;
+				goto full_search;
+			}
+			return -ENOMEM;
+		}
+		if (likely(!vma || addr + len <= vma->vm_start)) {
+			/*
+			 * Remember the place where we stopped the search:
+			 */
+			mm->free_area_cache = addr + len;
+			return addr;
+		}
+		if (addr + mm->cached_hole_size < vma->vm_start)
+		        mm->cached_hole_size = vma->vm_start - addr;
+
+		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
+	}
+}
+
+static unsigned long
+hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+				  const unsigned long len,
+				  const unsigned long pgoff,
+				  const unsigned long flags)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	unsigned long addr = addr0;
+
+	/* This should only ever run for 32-bit processes.  */
+	BUG_ON(!test_thread_flag(TIF_32BIT));
+
+	/* check if free_area_cache is useful for us */
+	if (len <= mm->cached_hole_size) {
+ 	        mm->cached_hole_size = 0;
+ 		mm->free_area_cache = mm->mmap_base;
+ 	}
+
+	/* either no address requested or can't fit in requested address hole */
+	addr = mm->free_area_cache & HPAGE_MASK;
+
+	/* make sure it can fit in the remaining address space */
+	if (likely(addr > len)) {
+		vma = find_vma(mm, addr-len);
+		if (!vma || addr <= vma->vm_start) {
+			/* remember the address as a hint for next time */
+			return (mm->free_area_cache = addr-len);
+		}
+	}
+
+	if (unlikely(mm->mmap_base < len))
+		goto bottomup;
+
+	addr = (mm->mmap_base-len) & HPAGE_MASK;
+
+	do {
+		/*
+		 * Lookup failure means no vma is above this address,
+		 * else if new region fits below vma->vm_start,
+		 * return with success:
+		 */
+		vma = find_vma(mm, addr);
+		if (likely(!vma || addr+len <= vma->vm_start)) {
+			/* remember the address as a hint for next time */
+			return (mm->free_area_cache = addr);
+		}
+
+ 		/* remember the largest hole we saw so far */
+ 		if (addr + mm->cached_hole_size < vma->vm_start)
+ 		        mm->cached_hole_size = vma->vm_start - addr;
+
+		/* try just below the current vma->vm_start */
+		addr = (vma->vm_start-len) & HPAGE_MASK;
+	} while (likely(len < vma->vm_start));
+
+bottomup:
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	mm->cached_hole_size = ~0UL;
+  	mm->free_area_cache = TASK_UNMAPPED_BASE;
+	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+	/*
+	 * Restore the topdown base:
+	 */
+	mm->free_area_cache = mm->mmap_base;
+	mm->cached_hole_size = ~0UL;
+
+	return addr;
+}
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long task_size = TASK_SIZE;
+
+	if (test_thread_flag(TIF_32BIT))
+		task_size = STACK_TOP32;
+
+	if (len & ~HPAGE_MASK)
+		return -EINVAL;
+	if (len > task_size)
+		return -ENOMEM;
+
+	if (addr) {
+		addr = ALIGN(addr, HPAGE_SIZE);
+		vma = find_vma(mm, addr);
+		if (task_size - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+	if (mm->get_unmapped_area == arch_get_unmapped_area)
+		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+				pgoff, flags);
+	else
+		return hugetlb_get_unmapped_area_topdown(file, addr, len,
+				pgoff, flags);
+}
+
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
@@ -48,12 +217,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 	pmd_t *pmd;
 	pte_t *pte = NULL;
 
+	addr &= HPAGE_MASK;
+
 	pgd = pgd_offset(mm, addr);
-	if (pgd) {
+	if (!pgd_none(*pgd)) {
 		pud = pud_offset(pgd, addr);
-		if (pud) {
+		if (!pud_none(*pud)) {
 			pmd = pmd_offset(pud, addr);
-			if (pmd)
+			if (!pmd_none(*pmd))
 				pte = pte_offset_map(pmd, addr);
 		}
 	}
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index f6b49256fe2..fcb2812265f 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -104,6 +104,7 @@ typedef unsigned long pgprot_t;
 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 #define ARCH_HAS_SETCLEAR_HUGE_PTE
 #define ARCH_HAS_HUGETLB_PREFAULT_HOOK
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 #endif
 
 #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_32BIT) ? \
-- 
cgit v1.2.3-70-g09d2


From dcc1e8dd88d4bc55e32a26dad7633d20ffe606d2 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Wed, 22 Mar 2006 00:49:59 -0800
Subject: [SPARC64]: Add a secondary TSB for hugepage mappings.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/Kconfig                 |   4 +-
 arch/sparc64/kernel/sun4v_tlb_miss.S |  39 +++---
 arch/sparc64/kernel/traps.c          |  21 +++-
 arch/sparc64/kernel/tsb.S            | 210 +++++++++++++++++++++++--------
 arch/sparc64/mm/fault.c              |  15 ++-
 arch/sparc64/mm/hugetlbpage.c        |  28 +++--
 arch/sparc64/mm/init.c               |  21 +++-
 arch/sparc64/mm/tsb.c                | 234 ++++++++++++++++++++++-------------
 include/asm-sparc64/cpudata.h        |   5 +-
 include/asm-sparc64/mmu.h            |  29 ++++-
 include/asm-sparc64/mmu_context.h    |  21 ++--
 include/asm-sparc64/page.h           |  34 ++---
 include/asm-sparc64/pgtable.h        |   2 +
 13 files changed, 462 insertions(+), 201 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index c3685b314d7..267afddf63c 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -175,11 +175,11 @@ config HUGETLB_PAGE_SIZE_4MB
 	bool "4MB"
 
 config HUGETLB_PAGE_SIZE_512K
-	depends on !SPARC64_PAGE_SIZE_4MB
+	depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
 	bool "512K"
 
 config HUGETLB_PAGE_SIZE_64K
-	depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
+	depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB && !SPARC64_PAGE_SIZE_64K
 	bool "64K"
 
 endchoice
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index ab23ddb7116..b731881224e 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -29,15 +29,15 @@
 	 *
 	 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
 	 * tsb_base = tsb_reg & ~0x7UL;
-	 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
+	 * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
 	 * tsb_ptr = tsb_base + (tsb_index * 16);
 	 */
-#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2)	\
+#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
 	and	TSB_PTR, 0x7, TMP1;			\
 	mov	512, TMP2;				\
 	andn	TSB_PTR, 0x7, TSB_PTR;			\
 	sllx	TMP2, TMP1, TMP2;			\
-	srlx	VADDR, PAGE_SHIFT, TMP1;		\
+	srlx	VADDR, HASH_SHIFT, TMP1;		\
 	sub	TMP2, 1, TMP2;				\
 	and	TMP1, TMP2, TMP1;			\
 	sllx	TMP1, 4, TMP1;				\
@@ -53,7 +53,7 @@ sun4v_itlb_miss:
 
 	LOAD_ITLB_INFO(%g2, %g4, %g5)
 	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
-	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+	COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
 	ldda	[%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -99,7 +99,7 @@ sun4v_dtlb_miss:
 
 	LOAD_DTLB_INFO(%g2, %g4, %g5)
 	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
-	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+	COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
 	ldda	[%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -171,21 +171,26 @@ sun4v_dtsb_miss:
 
 	/* fallthrough */
 
-	/* Create TSB pointer into %g1.  This is something like:
-	 *
-	 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
-	 * tsb_base = tsb_reg & ~0x7UL;
-	 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
-	 * tsb_ptr = tsb_base + (tsb_index * 16);
-	 */
 sun4v_tsb_miss_common:
-	COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7)
+	COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)
 
-	/* Branch directly to page table lookup.  We have SCRATCHPAD_MMU_MISS
-	 * still in %g2, so it's quite trivial to get at the PGD PHYS value
-	 * so we can preload it into %g7.
-	 */
 	sub	%g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+#ifdef CONFIG_HUGETLB_PAGE
+	mov	SCRATCHPAD_UTSBREG2, %g5
+	ldxa	[%g5] ASI_SCRATCHPAD, %g5
+	cmp	%g5, -1
+	be,pt	%xcc, 80f
+	 nop
+	COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
+
+	/* That clobbered %g2, reload it.  */
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	sub	%g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+80:	stx	%g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+#endif
+
 	ba,pt	%xcc, tsb_miss_page_table_walk_sun4v_fastpath
 	 ldx	[%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
 
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 7f7dba0ca96..df612e4f75f 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t)
 
 extern void thread_info_offsets_are_bolixed_dave(void);
 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
+extern void tsb_config_offsets_are_bolixed_dave(void);
 
 /* Only invoked on boot processor. */
 void __init trap_init(void)
@@ -2535,9 +2536,27 @@ void __init trap_init(void)
 	    (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
 	     offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
 	    (TRAP_PER_CPU_CPU_LIST_PA !=
-	     offsetof(struct trap_per_cpu, cpu_list_pa)))
+	     offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+	    (TRAP_PER_CPU_TSB_HUGE !=
+	     offsetof(struct trap_per_cpu, tsb_huge)) ||
+	    (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+	     offsetof(struct trap_per_cpu, tsb_huge_temp)))
 		trap_per_cpu_offsets_are_bolixed_dave();
 
+	if ((TSB_CONFIG_TSB !=
+	     offsetof(struct tsb_config, tsb)) ||
+	    (TSB_CONFIG_RSS_LIMIT !=
+	     offsetof(struct tsb_config, tsb_rss_limit)) ||
+	    (TSB_CONFIG_NENTRIES !=
+	     offsetof(struct tsb_config, tsb_nentries)) ||
+	    (TSB_CONFIG_REG_VAL !=
+	     offsetof(struct tsb_config, tsb_reg_val)) ||
+	    (TSB_CONFIG_MAP_VADDR !=
+	     offsetof(struct tsb_config, tsb_map_vaddr)) ||
+	    (TSB_CONFIG_MAP_PTE !=
+	     offsetof(struct tsb_config, tsb_map_pte)))
+		tsb_config_offsets_are_bolixed_dave();
+
 	/* Attach to the address space of init_task.  On SMP we
 	 * do this in smp.c:smp_callin for other cpus.
 	 */
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 118baea44f6..a0c8ba58920 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -3,8 +3,13 @@
  * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  */
 
+#include <linux/config.h>
+
 #include <asm/tsb.h>
 #include <asm/hypervisor.h>
+#include <asm/page.h>
+#include <asm/cpudata.h>
+#include <asm/mmu.h>
 
 	.text
 	.align	32
@@ -34,34 +39,124 @@ tsb_miss_itlb:
 	 ldxa		[%g4] ASI_IMMU, %g4
 
 	/* At this point we have:
-	 * %g1 --	TSB entry address
+	 * %g1 --	PAGE_SIZE TSB entry address
 	 * %g3 --	FAULT_CODE_{D,I}TLB
 	 * %g4 --	missing virtual address
 	 * %g6 --	TAG TARGET (vaddr >> 22)
 	 */
 tsb_miss_page_table_walk:
-	TRAP_LOAD_PGD_PHYS(%g7, %g5)
+	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
 
-	/* And now we have the PGD base physical address in %g7.  */
-tsb_miss_page_table_walk_sun4v_fastpath:
-	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
+	/* Before committing to a full page table walk,
+	 * check the huge page TSB.
+	 */
+#ifdef CONFIG_HUGETLB_PAGE
+
+661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
+	nop
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	mov		SCRATCHPAD_UTSBREG2, %g5
+	ldxa		[%g5] ASI_SCRATCHPAD, %g5
+	.previous
+
+	cmp		%g5, -1
+	be,pt		%xcc, 80f
+	 nop
+
+	/* We need an aligned pair of registers containing 2 values
+	 * which can be easily rematerialized.  %g6 and %g7 foot the
+	 * bill just nicely.  We'll save %g6 away into %g2 for the
+	 * huge page TSB TAG comparison.
+	 *
+	 * Perform a huge page TSB lookup.
+	 */
+	mov		%g6, %g2
+	and		%g5, 0x7, %g6
+	mov		512, %g7
+	andn		%g5, 0x7, %g5
+	sllx		%g7, %g6, %g7
+	srlx		%g4, HPAGE_SHIFT, %g6
+	sub		%g7, 1, %g7
+	and		%g6, %g7, %g6
+	sllx		%g6, 4, %g6
+	add		%g5, %g6, %g5
+
+	TSB_LOAD_QUAD(%g5, %g6)
+	cmp		%g6, %g2
+	be,a,pt		%xcc, tsb_tlb_reload
+	 mov		%g7, %g5
+
+	/* No match, remember the huge page TSB entry address,
+	 * and restore %g6 and %g7.
+	 */
+	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
+	srlx		%g4, 22, %g6
+80:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+
+#endif
+
+	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
 
 	/* At this point we have:
 	 * %g1 --	TSB entry address
 	 * %g3 --	FAULT_CODE_{D,I}TLB
-	 * %g5 --	physical address of PTE in Linux page tables
+	 * %g4 --	missing virtual address
 	 * %g6 --	TAG TARGET (vaddr >> 22)
+	 * %g7 --	page table physical address
+	 *
+	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
+	 * TSB both lack a matching entry.
 	 */
-tsb_reload:
-	TSB_LOCK_TAG(%g1, %g2, %g7)
+tsb_miss_page_table_walk_sun4v_fastpath:
+	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
-	mov		1, %g7
-	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
-	brgez,a,pn	%g5, tsb_do_fault
-	 TSB_STORE(%g1, %g7)
+	brgez,pn	%g5, tsb_do_fault
+	 nop
+
+#ifdef CONFIG_HUGETLB_PAGE
+661:	sethi		%uhi(_PAGE_SZALL_4U), %g7
+	sllx		%g7, 32, %g7
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	mov		_PAGE_SZALL_4V, %g7
+	nop
+	.previous
+
+	and		%g5, %g7, %g2
+
+661:	sethi		%uhi(_PAGE_SZHUGE_4U), %g7
+	sllx		%g7, 32, %g7
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	mov		_PAGE_SZHUGE_4V, %g7
+	nop
+	.previous
+
+	cmp		%g2, %g7
+	bne,pt		%xcc, 60f
+	 nop
+
+	/* It is a huge page, use huge page TSB entry address we
+	 * calculated above.
+	 */
+	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
+	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
+	cmp		%g2, -1
+	movne		%xcc, %g2, %g1
+60:
+#endif
 
+	/* At this point we have:
+	 * %g1 --	TSB entry address
+	 * %g3 --	FAULT_CODE_{D,I}TLB
+	 * %g5 --	valid PTE
+	 * %g6 --	TAG TARGET (vaddr >> 22)
+	 */
+tsb_reload:
+	TSB_LOCK_TAG(%g1, %g2, %g7)
 	TSB_WRITE(%g1, %g5, %g6)
 
 	/* Finally, load TLB and return from trap.  */
@@ -240,10 +335,9 @@ tsb_flush:
 	 * schedule() time.
 	 *
 	 * %o0: page table physical address
-	 * %o1:	TSB register value
-	 * %o2:	TSB virtual address
-	 * %o3:	TSB mapping locked PTE
-	 * %o4:	Hypervisor TSB descriptor physical address
+	 * %o1:	TSB base config pointer
+	 * %o2:	TSB huge config pointer, or NULL if none
+	 * %o3:	Hypervisor TSB descriptor physical address
 	 *
 	 * We have to run this whole thing with interrupts
 	 * disabled so that the current cpu doesn't change
@@ -253,63 +347,79 @@ tsb_flush:
 	.globl	__tsb_context_switch
 	.type	__tsb_context_switch,#function
 __tsb_context_switch:
-	rdpr	%pstate, %o5
-	wrpr	%o5, PSTATE_IE, %pstate
+	rdpr	%pstate, %g1
+	wrpr	%g1, PSTATE_IE, %pstate
+
+	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
 
-	ldub	[%g6 + TI_CPU], %g1
-	sethi	%hi(trap_block), %g2
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1
-	or	%g2, %lo(trap_block), %g2
-	add	%g2, %g1, %g2
 	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
 
-	sethi	%hi(tlb_type), %g1
-	lduw	[%g1 + %lo(tlb_type)], %g1
-	cmp	%g1, 3
-	bne,pt	%icc, 1f
+	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
+	brz,pt	%o2, 1f
+	 mov	-1, %g3
+
+	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3
+
+1:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
+
+	sethi	%hi(tlb_type), %g2
+	lduw	[%g2 + %lo(tlb_type)], %g2
+	cmp	%g2, 3
+	bne,pt	%icc, 50f
 	 nop
 
 	/* Hypervisor TSB switch. */
-	mov	SCRATCHPAD_UTSBREG1, %g1
-	stxa	%o1, [%g1] ASI_SCRATCHPAD
-	mov	-1, %g2
-	mov	SCRATCHPAD_UTSBREG2, %g1
-	stxa	%g2, [%g1] ASI_SCRATCHPAD
-
-	/* Save away %o5's %pstate, we have to use %o5 for
-	 * the hypervisor call.
-	 */
-	mov	%o5, %g1
+	mov	SCRATCHPAD_UTSBREG1, %o5
+	stxa	%o0, [%o5] ASI_SCRATCHPAD
+	mov	SCRATCHPAD_UTSBREG2, %o5
+	stxa	%g3, [%o5] ASI_SCRATCHPAD
+
+	mov	2, %o0
+	cmp	%g3, -1
+	move	%xcc, 1, %o0
 
 	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
-	mov	1, %o0
-	mov	%o4, %o1
+	mov	%o3, %o1
 	ta	HV_FAST_TRAP
 
-	/* Finish up and restore %o5.  */
+	/* Finish up.  */
 	ba,pt	%xcc, 9f
-	 mov	%g1, %o5
+	 nop
 
 	/* SUN4U TSB switch.  */
-1:	mov	TSB_REG, %g1
-	stxa	%o1, [%g1] ASI_DMMU
+50:	mov	TSB_REG, %o5
+	stxa	%o0, [%o5] ASI_DMMU
 	membar	#Sync
-	stxa	%o1, [%g1] ASI_IMMU
+	stxa	%o0, [%o5] ASI_IMMU
 	membar	#Sync
 
-2:	brz	%o2, 9f
-	 nop
+2:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
+	brz	%o4, 9f
+	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
 
 	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
-	mov	TLB_TAG_ACCESS, %g1
+	mov	TLB_TAG_ACCESS, %g3
 	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
-	stxa	%o2, [%g1] ASI_DMMU
+	stxa	%o4, [%g3] ASI_DMMU
 	membar	#Sync
 	sllx	%g2, 3, %g2
-	stxa	%o3, [%g2] ASI_DTLB_DATA_ACCESS
+	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+
+	brz,pt	%o2, 9f
+	 nop
+
+	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
+	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
+	mov	TLB_TAG_ACCESS, %g3
+	stxa	%o4, [%g3] ASI_DMMU
+	membar	#Sync
+	sub	%g2, (1 << 3), %g2
+	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
 	membar	#Sync
+
 9:
-	wrpr	%o5, %pstate
+	wrpr	%g1, %pstate
 
 	retl
 	 nop
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 63b6cc0cd5d..d21ff3230c0 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -410,9 +410,18 @@ good_area:
 	up_read(&mm->mmap_sem);
 
 	mm_rss = get_mm_rss(mm);
-	if (unlikely(mm_rss >= mm->context.tsb_rss_limit))
-		tsb_grow(mm, mm_rss);
-
+#ifdef CONFIG_HUGETLB_PAGE
+	mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#endif
+	if (unlikely(mm_rss >=
+		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
+		tsb_grow(mm, MM_TSB_BASE, mm_rss);
+#ifdef CONFIG_HUGETLB_PAGE
+	mm_rss = mm->context.huge_pte_count;
+	if (unlikely(mm_rss >=
+		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
+		tsb_grow(mm, MM_TSB_HUGE, mm_rss);
+#endif
 	return;
 
 	/*
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index a7a24869d04..0a1d4cd24cd 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 	pte_t *pte = NULL;
 
 	pgd = pgd_offset(mm, addr);
-	if (pgd) {
-		pud = pud_offset(pgd, addr);
-		if (pud) {
-			pmd = pmd_alloc(mm, pud, addr);
-			if (pmd)
-				pte = pte_alloc_map(mm, pmd, addr);
-		}
+	pud = pud_alloc(mm, pgd, addr);
+	if (pud) {
+		pmd = pmd_alloc(mm, pud, addr);
+		if (pmd)
+			pte = pte_alloc_map(mm, pmd, addr);
 	}
 	return pte;
 }
@@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 	return pte;
 }
 
-#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
-
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t entry)
 {
 	int i;
 
+	if (!pte_present(*ptep) && pte_present(entry))
+		mm->context.huge_pte_count++;
+
 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 		set_pte_at(mm, addr, ptep, entry);
 		ptep++;
@@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 	int i;
 
 	entry = *ptep;
+	if (pte_present(entry))
+		mm->context.huge_pte_count--;
 
 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 		pte_clear(mm, addr, ptep);
@@ -302,6 +303,15 @@ static void context_reload(void *__data)
 
 void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 {
+	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
+
+	if (likely(tp->tsb != NULL))
+		return;
+
+	tsb_grow(mm, MM_TSB_HUGE, 0);
+	tsb_context_switch(mm);
+	smp_tsb_sync(mm);
+
 	/* On UltraSPARC-III+ and later, configure the second half of
 	 * the Data-TLB for huge pages.
 	 */
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index c2b556106fc..16d231703d6 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 	struct mm_struct *mm;
 	struct tsb *tsb;
 	unsigned long tag, flags;
+	unsigned long tsb_index, tsb_hash_shift;
 
 	if (tlb_type != hypervisor) {
 		unsigned long pfn = pte_pfn(pte);
@@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 
 	mm = vma->vm_mm;
 
+	tsb_index = MM_TSB_BASE;
+	tsb_hash_shift = PAGE_SHIFT;
+
 	spin_lock_irqsave(&mm->context.lock, flags);
 
-	tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
-			       (mm->context.tsb_nentries - 1UL)];
+#ifdef CONFIG_HUGETLB_PAGE
+	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
+		if ((tlb_type == hypervisor &&
+		     (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+		    (tlb_type != hypervisor &&
+		     (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
+			tsb_index = MM_TSB_HUGE;
+			tsb_hash_shift = HPAGE_SHIFT;
+		}
+	}
+#endif
+
+	tsb = mm->context.tsb_block[tsb_index].tsb;
+	tsb += ((address >> tsb_hash_shift) &
+		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
 	tag = (address >> 22UL);
 	tsb_insert(tsb, tag, pte_val(pte));
 
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index b2064e2a44d..beaa02810f0 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -15,9 +15,9 @@
 
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
 
-static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
+static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
 {
-	vaddr >>= PAGE_SHIFT;
+	vaddr >>= hash_shift;
 	return vaddr & (nentries - 1);
 }
 
@@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
 	unsigned long v;
 
 	for (v = start; v < end; v += PAGE_SIZE) {
-		unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
+		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
+					      KERNEL_TSB_NENTRIES);
 		struct tsb *ent = &swapper_tsb[hash];
 
 		if (tag_compare(ent->tag, v)) {
@@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
 	}
 }
 
-void flush_tsb_user(struct mmu_gather *mp)
+static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
 {
-	struct mm_struct *mm = mp->mm;
-	unsigned long nentries, base, flags;
-	struct tsb *tsb;
-	int i;
-
-	spin_lock_irqsave(&mm->context.lock, flags);
-
-	tsb = mm->context.tsb;
-	nentries = mm->context.tsb_nentries;
+	unsigned long i;
 
-	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
-		base = __pa(tsb);
-	else
-		base = (unsigned long) tsb;
-	
 	for (i = 0; i < mp->tlb_nr; i++) {
 		unsigned long v = mp->vaddrs[i];
 		unsigned long tag, ent, hash;
 
 		v &= ~0x1UL;
 
-		hash = tsb_hash(v, nentries);
-		ent = base + (hash * sizeof(struct tsb));
+		hash = tsb_hash(v, hash_shift, nentries);
+		ent = tsb + (hash * sizeof(struct tsb));
 		tag = (v >> 22UL);
 
 		tsb_flush(ent, tag);
 	}
+}
+
+void flush_tsb_user(struct mmu_gather *mp)
+{
+	struct mm_struct *mm = mp->mm;
+	unsigned long nentries, base, flags;
+
+	spin_lock_irqsave(&mm->context.lock, flags);
 
+	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+		base = __pa(base);
+	__flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
+
+#ifdef CONFIG_HUGETLB_PAGE
+	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+			base = __pa(base);
+		__flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
+	}
+#endif
 	spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
-static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
+#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_64K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_512K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_4MB
+#else
+#error Broken base page size setting...
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_64K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_512K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_4MB
+#else
+#error Broken huge page size setting...
+#endif
+#endif
+
+static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
 {
 	unsigned long tsb_reg, base, tsb_paddr;
 	unsigned long page_sz, tte;
 
-	mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
+	mm->context.tsb_block[tsb_idx].tsb_nentries =
+		tsb_bytes / sizeof(struct tsb);
 
 	base = TSBMAP_BASE;
 	tte = pgprot_val(PAGE_KERNEL_LOCKED);
-	tsb_paddr = __pa(mm->context.tsb);
+	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
 
 	/* Use the smallest page size that can map the whole TSB
@@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 		/* Physical mapping, no locked TLB entry for TSB.  */
 		tsb_reg |= tsb_paddr;
 
-		mm->context.tsb_reg_val = tsb_reg;
-		mm->context.tsb_map_vaddr = 0;
-		mm->context.tsb_map_pte = 0;
+		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
+		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
 	} else {
 		tsb_reg |= base;
 		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
 		tte |= (tsb_paddr & ~(page_sz - 1UL));
 
-		mm->context.tsb_reg_val = tsb_reg;
-		mm->context.tsb_map_vaddr = base;
-		mm->context.tsb_map_pte = tte;
+		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
+		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
 	}
 
 	/* Setup the Hypervisor TSB descriptor.  */
 	if (tlb_type == hypervisor) {
-		struct hv_tsb_descr *hp = &mm->context.tsb_descr;
+		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
 
-		switch (PAGE_SIZE) {
-		case 8192:
-		default:
-			hp->pgsz_idx = HV_PGSZ_IDX_8K;
+		switch (tsb_idx) {
+		case MM_TSB_BASE:
+			hp->pgsz_idx = HV_PGSZ_IDX_BASE;
 			break;
-
-		case 64 * 1024:
-			hp->pgsz_idx = HV_PGSZ_IDX_64K;
-			break;
-
-		case 512 * 1024:
-			hp->pgsz_idx = HV_PGSZ_IDX_512K;
-			break;
-
-		case 4 * 1024 * 1024:
-			hp->pgsz_idx = HV_PGSZ_IDX_4MB;
+#ifdef CONFIG_HUGETLB_PAGE
+		case MM_TSB_HUGE:
+			hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
 			break;
+#endif
+		default:
+			BUG();
 		};
 		hp->assoc = 1;
 		hp->num_ttes = tsb_bytes / 16;
 		hp->ctx_idx = 0;
-		switch (PAGE_SIZE) {
-		case 8192:
-		default:
-			hp->pgsz_mask = HV_PGSZ_MASK_8K;
-			break;
-
-		case 64 * 1024:
-			hp->pgsz_mask = HV_PGSZ_MASK_64K;
-			break;
-
-		case 512 * 1024:
-			hp->pgsz_mask = HV_PGSZ_MASK_512K;
+		switch (tsb_idx) {
+		case MM_TSB_BASE:
+			hp->pgsz_mask = HV_PGSZ_MASK_BASE;
 			break;
-
-		case 4 * 1024 * 1024:
-			hp->pgsz_mask = HV_PGSZ_MASK_4MB;
+#ifdef CONFIG_HUGETLB_PAGE
+		case MM_TSB_HUGE:
+			hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
 			break;
+#endif
+		default:
+			BUG();
 		};
 		hp->tsb_base = tsb_paddr;
 		hp->resv = 0;
@@ -241,11 +272,11 @@ void __init tsb_cache_init(void)
 	}
 }
 
-/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
- * do_sparc64_fault() invokes this routine to try and grow the TSB.
+/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
+ * do_sparc64_fault() invokes this routine to try and grow it.
  *
  * When we reach the maximum TSB size supported, we stick ~0UL into
- * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
+ * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
  * will not trigger any longer.
  *
  * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
@@ -257,7 +288,7 @@ void __init tsb_cache_init(void)
  * the number of entries that the current TSB can hold at once.  Currently,
  * we trigger when the RSS hits 3/4 of the TSB capacity.
  */
-void tsb_grow(struct mm_struct *mm, unsigned long rss)
+void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
 {
 	unsigned long max_tsb_size = 1 * 1024 * 1024;
 	unsigned long new_size, old_size, flags;
@@ -297,7 +328,8 @@ retry_tsb_alloc:
 		 * down to a 0-order allocation and force no TSB
 		 * growing for this address space.
 		 */
-		if (mm->context.tsb == NULL && new_cache_index > 0) {
+		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
+		    new_cache_index > 0) {
 			new_cache_index = 0;
 			new_size = 8192;
 			new_rss_limit = ~0UL;
@@ -307,8 +339,8 @@ retry_tsb_alloc:
 		/* If we failed on a TSB grow, we are under serious
 		 * memory pressure so don't try to grow any more.
 		 */
-		if (mm->context.tsb != NULL)
-			mm->context.tsb_rss_limit = ~0UL;
+		if (mm->context.tsb_block[tsb_index].tsb != NULL)
+			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
 		return;
 	}
 
@@ -339,23 +371,26 @@ retry_tsb_alloc:
 	 */
 	spin_lock_irqsave(&mm->context.lock, flags);
 
-	old_tsb = mm->context.tsb;
-	old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
-	old_size = mm->context.tsb_nentries * sizeof(struct tsb);
+	old_tsb = mm->context.tsb_block[tsb_index].tsb;
+	old_cache_index =
+		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
+	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
+		    sizeof(struct tsb));
 
 
 	/* Handle multiple threads trying to grow the TSB at the same time.
 	 * One will get in here first, and bump the size and the RSS limit.
 	 * The others will get in here next and hit this check.
 	 */
-	if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
+	if (unlikely(old_tsb &&
+		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
 		spin_unlock_irqrestore(&mm->context.lock, flags);
 
 		kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
 		return;
 	}
 
-	mm->context.tsb_rss_limit = new_rss_limit;
+	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
 
 	if (old_tsb) {
 		extern void copy_tsb(unsigned long old_tsb_base,
@@ -372,8 +407,8 @@ retry_tsb_alloc:
 		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
 	}
 
-	mm->context.tsb = new_tsb;
-	setup_tsb_params(mm, new_size);
+	mm->context.tsb_block[tsb_index].tsb = new_tsb;
+	setup_tsb_params(mm, tsb_index, new_size);
 
 	spin_unlock_irqrestore(&mm->context.lock, flags);
 
@@ -394,40 +429,65 @@ retry_tsb_alloc:
 
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
+#ifdef CONFIG_HUGETLB_PAGE
+	unsigned long huge_pte_count;
+#endif
+	unsigned int i;
+
 	spin_lock_init(&mm->context.lock);
 
 	mm->context.sparc64_ctx_val = 0UL;
 
+#ifdef CONFIG_HUGETLB_PAGE
+	/* We reset it to zero because the fork() page copying
+	 * will re-increment the counters as the parent PTEs are
+	 * copied into the child address space.
+	 */
+	huge_pte_count = mm->context.huge_pte_count;
+	mm->context.huge_pte_count = 0;
+#endif
+
 	/* copy_mm() copies over the parent's mm_struct before calling
 	 * us, so we need to zero out the TSB pointer or else tsb_grow()
 	 * will be confused and think there is an older TSB to free up.
 	 */
-	mm->context.tsb = NULL;
+	for (i = 0; i < MM_NUM_TSBS; i++)
+		mm->context.tsb_block[i].tsb = NULL;
 
 	/* If this is fork, inherit the parent's TSB size.  We would
 	 * grow it to that size on the first page fault anyways.
 	 */
-	tsb_grow(mm, get_mm_rss(mm));
+	tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
 
-	if (unlikely(!mm->context.tsb))
+#ifdef CONFIG_HUGETLB_PAGE
+	if (unlikely(huge_pte_count))
+		tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
+#endif
+
+	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
 		return -ENOMEM;
 
 	return 0;
 }
 
-void destroy_context(struct mm_struct *mm)
+static void tsb_destroy_one(struct tsb_config *tp)
 {
-	unsigned long flags, cache_index;
+	unsigned long cache_index;
 
-	cache_index = (mm->context.tsb_reg_val & 0x7UL);
-	kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
+	if (!tp->tsb)
+		return;
+	cache_index = tp->tsb_reg_val & 0x7UL;
+	kmem_cache_free(tsb_caches[cache_index], tp->tsb);
+	tp->tsb = NULL;
+	tp->tsb_reg_val = 0UL;
+}
 
-	/* We can remove these later, but for now it's useful
-	 * to catch any bogus post-destroy_context() references
-	 * to the TSB.
-	 */
-	mm->context.tsb = NULL;
-	mm->context.tsb_reg_val = 0UL;
+void destroy_context(struct mm_struct *mm)
+{
+	unsigned long flags, i;
+
+	for (i = 0; i < MM_NUM_TSBS; i++)
+		tsb_destroy_one(&mm->context.tsb_block[i]);
 
 	spin_lock_irqsave(&ctx_alloc_lock, flags);
 
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index c66a81bbc84..9d6a6dbaf12 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -71,7 +71,8 @@ struct trap_per_cpu {
 /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list.  */
 	unsigned long		cpu_mondo_block_pa;
 	unsigned long		cpu_list_pa;
-	unsigned long		__pad1[2];
+	unsigned long		tsb_huge;
+	unsigned long		tsb_huge_temp;
 
 /* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size.  */
 	unsigned long		__pad2[4];
@@ -116,6 +117,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
 #define TRAP_PER_CPU_FAULT_INFO		0x40
 #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA	0xc0
 #define TRAP_PER_CPU_CPU_LIST_PA	0xc8
+#define TRAP_PER_CPU_TSB_HUGE		0xd0
+#define TRAP_PER_CPU_TSB_HUGE_TEMP	0xd8
 
 #define TRAP_BLOCK_SZ_SHIFT		8
 
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 230ba678d3b..2d4f2ea9568 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -90,18 +90,39 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte
 extern void tsb_flush(unsigned long ent, unsigned long tag);
 extern void tsb_init(struct tsb *tsb, unsigned long size);
 
-typedef struct {
-	spinlock_t		lock;
-	unsigned long		sparc64_ctx_val;
+struct tsb_config {
 	struct tsb		*tsb;
 	unsigned long		tsb_rss_limit;
 	unsigned long		tsb_nentries;
 	unsigned long		tsb_reg_val;
 	unsigned long		tsb_map_vaddr;
 	unsigned long		tsb_map_pte;
-	struct hv_tsb_descr	tsb_descr;
+};
+
+#define MM_TSB_BASE	0
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define MM_TSB_HUGE	1
+#define MM_NUM_TSBS	2
+#else
+#define MM_NUM_TSBS	1
+#endif
+
+typedef struct {
+	spinlock_t		lock;
+	unsigned long		sparc64_ctx_val;
+	unsigned long		huge_pte_count;
+	struct tsb_config	tsb_block[MM_NUM_TSBS];
+	struct hv_tsb_descr	tsb_descr[MM_NUM_TSBS];
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
 
+#define TSB_CONFIG_TSB		0x00
+#define TSB_CONFIG_RSS_LIMIT	0x08
+#define TSB_CONFIG_NENTRIES	0x10
+#define TSB_CONFIG_REG_VAL	0x18
+#define TSB_CONFIG_MAP_VADDR	0x20
+#define TSB_CONFIG_MAP_PTE	0x28
+
 #endif /* __MMU_H */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index e7974321d05..2337eb48771 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -29,20 +29,25 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
 
 extern void __tsb_context_switch(unsigned long pgd_pa,
-				 unsigned long tsb_reg,
-				 unsigned long tsb_vaddr,
-				 unsigned long tsb_pte,
+				 struct tsb_config *tsb_base,
+				 struct tsb_config *tsb_huge,
 				 unsigned long tsb_descr_pa);
 
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
-	__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
-			     mm->context.tsb_map_vaddr,
-			     mm->context.tsb_map_pte,
-			     __pa(&mm->context.tsb_descr));
+	__tsb_context_switch(__pa(mm->pgd),
+			     &mm->context.tsb_block[0],
+#ifdef CONFIG_HUGETLB_PAGE
+			     (mm->context.tsb_block[1].tsb ?
+			      &mm->context.tsb_block[1] :
+			      NULL)
+#else
+			     NULL
+#endif
+			     , __pa(&mm->context.tsb_descr[0]));
 }
 
-extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss);
+extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
 #ifdef CONFIG_SMP
 extern void smp_tsb_sync(struct mm_struct *mm);
 #else
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index fcb2812265f..66fe4ac59fd 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -30,6 +30,23 @@
 
 #ifdef __KERNEL__
 
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HPAGE_SHIFT		22
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define HPAGE_SHIFT		19
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HPAGE_SHIFT		16
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
+#define HPAGE_MASK		(~(HPAGE_SIZE - 1UL))
+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+
 #ifndef __ASSEMBLY__
 
 extern void _clear_page(void *page);
@@ -90,23 +107,6 @@ typedef unsigned long pgprot_t;
 
 #endif /* (STRICT_MM_TYPECHECKS) */
 
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
-#define HPAGE_SHIFT		22
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-#define HPAGE_SHIFT		19
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define HPAGE_SHIFT		16
-#endif
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
-#define HPAGE_MASK		(~(HPAGE_SIZE - 1UL))
-#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-
 #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_32BIT) ? \
 				 (_AC(0x0000000070000000,UL)) : \
 				 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index ed4124edf83..c44e7466534 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -105,6 +105,7 @@
 #define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
 #define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
+#define _PAGE_SZALL_4U	  _AC(0x6001000000000000,UL) /* All pgsz bits        */
 #define _PAGE_SN_4U	  _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
 #define _PAGE_RES2_4U	  _AC(0x0000780000000000,UL) /* Reserved             */
 #define _PAGE_PADDR_4U	  _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
@@ -150,6 +151,7 @@
 #define _PAGE_SZ512K_4V	  _AC(0x0000000000000002,UL) /* 512K Page            */
 #define _PAGE_SZ64K_4V	  _AC(0x0000000000000001,UL) /* 64K Page             */
 #define _PAGE_SZ8K_4V	  _AC(0x0000000000000000,UL) /* 8K Page              */
+#define _PAGE_SZALL_4V	  _AC(0x0000000000000007,UL) /* All pgsz bits        */
 
 #if PAGE_SHIFT == 13
 #define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
-- 
cgit v1.2.3-70-g09d2


From 804f1594cc3deb161e531a43d90c501f0db2635a Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@parisc-linux.org>
Date: Thu, 23 Mar 2006 03:00:16 -0800
Subject: [PATCH] Move read_mostly definition to asm/cache.h

Seems like needless clutter having a bunch of #if defined(CONFIG_$ARCH) in
include/linux/cache.h.  Move the per architecture section definition to
asm/cache.h, and keep the if-not-defined dummy case in linux/cache.h to
catch architectures which don't implement the section.

Verified that symbols still go in .data.read_mostly on parisc,
and the compile doesn't break.

Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 include/asm-i386/cache.h    | 2 ++
 include/asm-ia64/cache.h    | 2 ++
 include/asm-parisc/cache.h  | 2 ++
 include/asm-sparc64/cache.h | 2 ++
 include/asm-x86_64/cache.h  | 2 ++
 include/linux/cache.h       | 4 +---
 6 files changed, 11 insertions(+), 3 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index 615911e5bd2..ca15c9c665c 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -10,4 +10,6 @@
 #define L1_CACHE_SHIFT	(CONFIG_X86_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
 
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
 #endif
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h
index 40dd25195d6..f0a104db8f2 100644
--- a/include/asm-ia64/cache.h
+++ b/include/asm-ia64/cache.h
@@ -25,4 +25,6 @@
 # define SMP_CACHE_BYTES	(1 << 3)
 #endif
 
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
 #endif /* _ASM_IA64_CACHE_H */
diff --git a/include/asm-parisc/cache.h b/include/asm-parisc/cache.h
index 93f179f13ce..ae50f8e12ee 100644
--- a/include/asm-parisc/cache.h
+++ b/include/asm-parisc/cache.h
@@ -29,6 +29,8 @@
 
 #define SMP_CACHE_BYTES L1_CACHE_BYTES
 
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
 extern void flush_data_cache_local(void *);  /* flushes local data-cache only */
 extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */
 #ifdef CONFIG_SMP
diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h
index f7d35a2ae9b..e9df17acedd 100644
--- a/include/asm-sparc64/cache.h
+++ b/include/asm-sparc64/cache.h
@@ -13,4 +13,6 @@
 #define        SMP_CACHE_BYTES_SHIFT	6
 #define        SMP_CACHE_BYTES		(1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
 
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
 #endif
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index 263f0a211ed..c8043a16152 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -20,6 +20,8 @@
        __attribute__((__section__(".data.page_aligned")))
 #endif
 
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
 #endif
 
 #endif
diff --git a/include/linux/cache.h b/include/linux/cache.h
index d22e632f41f..cc4b3aafad9 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -13,9 +13,7 @@
 #define SMP_CACHE_BYTES L1_CACHE_BYTES
 #endif
 
-#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
-#else
+#ifndef __read_mostly
 #define __read_mostly
 #endif
 
-- 
cgit v1.2.3-70-g09d2


From 0b2fcfdb8b4e7e379192f24ea2203163ddf5df1d Mon Sep 17 00:00:00 2001
From: Nick Piggin <npiggin@suse.de>
Date: Thu, 23 Mar 2006 03:01:02 -0800
Subject: [PATCH] atomic: add_unless cmpxchg optimise

Without branch hints, the very unlikely chance of the loop repeating due to
cmpxchg failure is unrolled with gcc-4 that I have tested.

Improve this for architectures with a native cas/cmpxchg.  llsc archs
should try to implement this natively.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Andi Kleen <ak@muc.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 include/asm-i386/atomic.h    |  8 +++++++-
 include/asm-ia64/atomic.h    |  8 +++++++-
 include/asm-m68k/atomic.h    |  8 +++++++-
 include/asm-s390/atomic.h    | 18 ++++++++++++++----
 include/asm-sparc64/atomic.h | 10 ++++++++--
 include/asm-x86_64/atomic.h  |  8 +++++++-
 6 files changed, 50 insertions(+), 10 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 78b0032d1f2..22d80ece95c 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -225,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
 ({								\
 	int c, old;						\
 	c = atomic_read(v);					\
-	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+	for (;;) {						\
+		if (unlikely(c == (u)))				\
+			break;					\
+		old = atomic_cmpxchg((v), c, c + (a));		\
+		if (likely(old == c))				\
+			break;					\
 		c = old;					\
+	}							\
 	c != (u);						\
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index d3e0dfa99e1..569ec7574ba 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
 ({								\
 	int c, old;						\
 	c = atomic_read(v);					\
-	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+	for (;;) {						\
+		if (unlikely(c == (u)))				\
+			break;					\
+		old = atomic_cmpxchg((v), c, c + (a));		\
+		if (likely(old == c))				\
+			break;					\
 		c = old;					\
+	}							\
 	c != (u);						\
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 862e497c264..732d696d31a 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -175,8 +175,14 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
 ({								\
 	int c, old;						\
 	c = atomic_read(v);					\
-	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+	for (;;) {						\
+		if (unlikely(c == (u)))				\
+			break;					\
+		old = atomic_cmpxchg((v), c, c + (a));		\
+		if (likely(old == c))				\
+			break;					\
 		c = old;					\
+	}							\
 	c != (u);						\
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index be6fefe223d..de1d9926aa6 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -89,10 +89,15 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int c, old;
-
 	c = atomic_read(v);
-	while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+	for (;;) {
+		if (unlikely(c == u))
+			break;
+		old = atomic_cmpxchg(v, c, c + a);
+		if (likely(old == c))
+			break;
 		c = old;
+	}
 	return c != u;
 }
 
@@ -167,10 +172,15 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
 					  long long a, long long u)
 {
 	long long c, old;
-
 	c = atomic64_read(v);
-	while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
+	for (;;) {
+		if (unlikely(c == u))
+			break;
+		old = atomic64_cmpxchg(v, c, c + a);
+		if (likely(old == c))
+			break;
 		c = old;
+	}
 	return c != u;
 }
 
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 25256bdc8aa..468eb48d814 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -78,9 +78,15 @@ extern int atomic64_sub_ret(int, atomic64_t *);
 ({								\
 	int c, old;						\
 	c = atomic_read(v);					\
-	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+	for (;;) {						\
+		if (unlikely(c == (u)))				\
+			break;					\
+		old = atomic_cmpxchg((v), c, c + (a));		\
+		if (likely(old == c))				\
+			break;					\
 		c = old;					\
-	c != (u);						\
+	}							\
+	likely(c != (u));					\
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd553e77..cecbf7baa6a 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
 ({								\
 	int c, old;						\
 	c = atomic_read(v);					\
-	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+	for (;;) {						\
+		if (unlikely(c == (u)))				\
+			break;					\
+		old = atomic_cmpxchg((v), c, c + (a));		\
+		if (likely(old == c))				\
+			break;					\
 		c = old;					\
+	}							\
 	c != (u);						\
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-- 
cgit v1.2.3-70-g09d2


From 394e3902c55e667945f6f1c2bdbc59842cce70f7 Mon Sep 17 00:00:00 2001
From: Andrew Morton <akpm@osdl.org>
Date: Thu, 23 Mar 2006 03:01:05 -0800
Subject: [PATCH] more for_each_cpu() conversions

When we stop allocating percpu memory for not-possible CPUs we must not touch
the percpu data for not-possible CPUs at all.  The correct way of doing this
is to test cpu_possible() or to use for_each_cpu().

This patch is a kernel-wide sweep of all instances of NR_CPUS.  I found very
few instances of this bug, if any.  But the patch converts lots of open-coded
test to use the preferred helper macros.

Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Acked-by: Kyle McMartin <kyle@parisc-linux.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Christian Zankel <chris@zankel.net>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Jens Axboe <axboe@suse.de>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 arch/cris/kernel/irq.c                     | 10 ++++------
 arch/frv/kernel/irq.c                      | 10 ++++------
 arch/i386/kernel/cpu/cpufreq/powernow-k8.c |  4 +---
 arch/i386/kernel/io_apic.c                 | 22 +++++++++-------------
 arch/i386/kernel/nmi.c                     |  4 ++--
 arch/i386/oprofile/nmi_int.c               |  7 ++-----
 arch/m32r/kernel/irq.c                     | 10 ++++------
 arch/mips/kernel/irq.c                     | 10 ++++------
 arch/mips/kernel/smp.c                     |  4 ++--
 arch/mips/sgi-ip27/ip27-irq.c              |  5 +----
 arch/parisc/kernel/smp.c                   | 25 ++++++++++---------------
 arch/powerpc/kernel/irq.c                  |  5 ++---
 arch/powerpc/kernel/setup-common.c         |  5 ++---
 arch/powerpc/kernel/setup_32.c             |  5 ++---
 arch/powerpc/platforms/powermac/smp.c      |  4 +---
 arch/ppc/kernel/setup.c                    | 10 ++++------
 arch/s390/kernel/smp.c                     |  4 +---
 arch/sh/kernel/irq.c                       |  5 ++---
 arch/sh/kernel/setup.c                     |  5 ++---
 arch/sh64/kernel/irq.c                     |  5 ++---
 arch/sparc/kernel/irq.c                    |  5 ++---
 arch/sparc/kernel/smp.c                    | 24 ++++++++++--------------
 arch/sparc/kernel/sun4d_irq.c              |  8 +++-----
 arch/sparc/kernel/sun4d_smp.c              |  8 +++-----
 arch/sparc/kernel/sun4m_smp.c              |  6 ++----
 arch/sparc64/kernel/irq.c                  |  4 +---
 arch/sparc64/kernel/smp.c                  | 30 ++++++++++++------------------
 arch/x86_64/kernel/irq.c                   | 21 ++++++++-------------
 arch/x86_64/kernel/nmi.c                   |  4 +---
 arch/xtensa/kernel/irq.c                   | 15 ++++++---------
 drivers/net/loopback.c                     |  4 +---
 drivers/oprofile/cpu_buffer.c              |  3 +--
 fs/xfs/linux-2.6/xfs_stats.c               |  7 ++-----
 fs/xfs/linux-2.6/xfs_sysctl.c              |  3 +--
 include/asm-alpha/mmu_context.h            |  5 ++---
 include/asm-alpha/topology.h               |  4 ++--
 include/asm-generic/percpu.h               |  7 +++----
 include/asm-powerpc/percpu.h               |  7 +++----
 include/asm-s390/percpu.h                  |  7 +++----
 include/asm-sparc64/percpu.h               |  7 +++----
 include/asm-x86_64/percpu.h                |  7 +++----
 include/linux/genhd.h                      | 14 ++++----------
 42 files changed, 137 insertions(+), 222 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 30deaf1b728..b504def3e34 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,9 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
 
 	if (i == 0) {
 		seq_printf(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 		seq_putc(p, '\n');
 	}
 
@@ -67,9 +66,8 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
 		seq_printf(p, " %14s", irq_desc[i].handler->typename);
 		seq_printf(p, "  %s", action->name);
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 27ab4c30aac..11fa326a8f6 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -75,9 +75,8 @@ int show_interrupts(struct seq_file *p, void *v)
 	switch (i) {
 	case 0:
 		seq_printf(p, "           ");
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 
 		seq_putc(p, '\n');
 		break;
@@ -100,9 +99,8 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
 #endif
 
 		level = group->sources[ix]->level - frv_irq_levels;
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index e11a09207ec..3d5110b65cc 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void)
 {
 	unsigned int i, supported_cpus = 0;
 
-	for (i=0; i<NR_CPUS; i++) {
-		if (!cpu_online(i))
-			continue;
+	for_each_cpu(i) {
 		if (check_supported_cpu(i))
 			supported_cpus++;
 	}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fd1c60cfd29..311b4e7266f 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
 {
 	int i, j;
 	Dprintk("Rotating IRQs among CPUs.\n");
-	for (i = 0; i < NR_CPUS; i++) {
-		for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
+	for_each_online_cpu(i) {
+		for (j = 0; j < NR_IRQS; j++) {
 			if (!irq_desc[j].action)
 				continue;
 			/* Is it a significant load ?  */
@@ -381,7 +381,7 @@ static void do_irq_balance(void)
 	unsigned long imbalance = 0;
 	cpumask_t allowed_mask, target_cpu_mask, tmp;
 
-	for (i = 0; i < NR_CPUS; i++) {
+	for_each_cpu(i) {
 		int package_index;
 		CPU_IRQ(i) = 0;
 		if (!cpu_online(i))
@@ -422,9 +422,7 @@ static void do_irq_balance(void)
 		}
 	}
 	/* Find the least loaded processor package */
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_online(i))
-			continue;
+	for_each_online_cpu(i) {
 		if (i != CPU_TO_PACKAGEINDEX(i))
 			continue;
 		if (min_cpu_irq > CPU_IRQ(i)) {
@@ -441,9 +439,7 @@ tryanothercpu:
 	 */
 	tmp_cpu_irq = 0;
 	tmp_loaded = -1;
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_online(i))
-			continue;
+	for_each_online_cpu(i) {
 		if (i != CPU_TO_PACKAGEINDEX(i))
 			continue;
 		if (max_cpu_irq <= CPU_IRQ(i)) 
@@ -619,9 +615,7 @@ static int __init balanced_irq_init(void)
 	if (smp_num_siblings > 1 && !cpus_empty(tmp))
 		physical_balance = 1;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_online(i))
-			continue;
+	for_each_online_cpu(i) {
 		irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
 		irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
 		if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
@@ -638,9 +632,11 @@ static int __init balanced_irq_init(void)
 	else 
 		printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
 failed:
-	for (i = 0; i < NR_CPUS; i++) {
+	for_each_cpu(i) {
 		kfree(irq_cpu_data[i].irq_delta);
+		irq_cpu_data[i].irq_delta = NULL;
 		kfree(irq_cpu_data[i].last_irq);
+		irq_cpu_data[i].last_irq = NULL;
 	}
 	return 0;
 }
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 1db34effdd8..9074818b947 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void)
 	local_irq_enable();
 	mdelay((10*1000)/nmi_hz); // wait 10 ticks
 
-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+	for_each_cpu(cpu) {
 #ifdef CONFIG_SMP
 		/* Check cpu_callin_map here because that is set
 		   after the timer is started. */
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
 	 * Just reset the alert counters, (other CPUs might be
 	 * spinning on locks we hold):
 	 */
-	for (i = 0; i < NR_CPUS; i++)
+	for_each_cpu(i)
 		alert_counter[i] = 0;
 
 	/*
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 0493e8b8ec4..1accce50c2c 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
 static void free_msrs(void)
 {
 	int i;
-	for (i = 0; i < NR_CPUS; ++i) {
+	for_each_cpu(i) {
 		kfree(cpu_msrs[i].counters);
 		cpu_msrs[i].counters = NULL;
 		kfree(cpu_msrs[i].controls);
@@ -138,10 +138,7 @@ static int allocate_msrs(void)
 	size_t counters_size = sizeof(struct op_msr) * model->num_counters;
 
 	int i;
-	for (i = 0; i < NR_CPUS; ++i) {
-		if (!cpu_online(i))
-			continue;
-
+	for_each_online_cpu(i) {
 		cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
 		if (!cpu_msrs[i].counters) {
 			success = 0;
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 1ce63926a3c..a4634b06f67 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v)
 
 	if (i == 0) {
 		seq_printf(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 		seq_putc(p, '\n');
 	}
 
@@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
 		seq_printf(p, " %14s", irq_desc[i].handler->typename);
 		seq_printf(p, "  %s", action->name);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7d93992e462..3dd76b3d296 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -68,9 +68,8 @@ int show_interrupts(struct seq_file *p, void *v)
 
 	if (i == 0) {
 		seq_printf(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 		seq_putc(p, '\n');
 	}
 
@@ -83,9 +82,8 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
 		seq_printf(p, " %14s", irq_desc[i].handler->typename);
 		seq_printf(p, "  %s", action->name);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 06ed9075242..78d171bfa33 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
 	mb();
 
 	/* Send a message to all other CPUs and wait for them to respond */
-	for (i = 0; i < NR_CPUS; i++)
-		if (cpu_online(i) && i != cpu)
+	for_each_online_cpu(i)
+		if (i != cpu)
 			core_send_ipi(i, SMP_CALL_FUNCTION);
 
 	/* Wait for response */
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 73e5e52781d..2854ac4c9be 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -88,12 +88,9 @@ static inline int find_level(cpuid_t *cpunum, int irq)
 {
 	int cpu, i;
 
-	for (cpu = 0; cpu <= NR_CPUS; cpu++) {
+	for_each_online_cpu(cpu) {
 		struct slice_data *si = cpu_data[cpu].data;
 
-		if (!cpu_online(cpu))
-			continue;
-
 		for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
 			if (si->level_to_irq[i] == irq) {
 				*cpunum = cpu;
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 25564b7ca6b..d6ac1c60a47 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op)
 {
 	int i;
 	
-	for (i = 0; i < NR_CPUS; i++) {
-		if (cpu_online(i) && i != smp_processor_id())
+	for_each_online_cpu(i) {
+		if (i != smp_processor_id())
 			send_IPI_single(i, op);
 	}
 }
@@ -643,14 +643,13 @@ int sys_cpus(int argc, char **argv)
 	if ( argc == 1 ){
 	
 #ifdef DUMP_MORE_STATE
-		for(i=0; i<NR_CPUS; i++) {
+		for_each_online_cpu(i) {
 			int cpus_per_line = 4;
-			if(cpu_online(i)) {
-				if (j++ % cpus_per_line)
-					printk(" %3d",i);
-				else
-					printk("\n %3d",i);
-			}
+
+			if (j++ % cpus_per_line)
+				printk(" %3d",i);
+			else
+				printk("\n %3d",i);
 		}
 		printk("\n"); 
 #else
@@ -659,9 +658,7 @@ int sys_cpus(int argc, char **argv)
 	} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
 		printk("\nCPUSTATE  TASK CPUNUM CPUID HARDCPU(HPA)\n");
 #ifdef DUMP_MORE_STATE
-		for(i=0;i<NR_CPUS;i++) {
-			if (!cpu_online(i))
-				continue;
+		for_each_online_cpu(i) {
 			if (cpu_data[i].cpuid != NO_PROC_ID) {
 				switch(cpu_data[i].state) {
 					case STATE_RENDEZVOUS:
@@ -695,9 +692,7 @@ int sys_cpus(int argc, char **argv)
 	} else if ((argc==2) && !(strcmp(argv[1],"-s"))) { 
 #ifdef DUMP_MORE_STATE
      		printk("\nCPUSTATE   CPUID\n");
-		for (i=0;i<NR_CPUS;i++) {
-			if (!cpu_online(i))
-				continue;
+		for_each_online_cpu(i) {
 			if (cpu_data[i].cpuid != NO_PROC_ID) {
 				switch(cpu_data[i].state) {
 					case STATE_RENDEZVOUS:
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 24dc8117b82..771a59cbd21 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -135,9 +135,8 @@ skip:
 #ifdef CONFIG_TAU_INT
 		if (tau_initialized){
 			seq_puts(p, "TAU: ");
-			for (j = 0; j < NR_CPUS; j++)
-				if (cpu_online(j))
-					seq_printf(p, "%10u ", tau_interrupts(j));
+			for_each_online_cpu(j)
+				seq_printf(p, "%10u ", tau_interrupts(j));
 			seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
 		}
 #endif
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index be12041c0fc..c1d62bf11f2 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -162,9 +162,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
 		unsigned long bogosum = 0;
 		int i;
-		for (i = 0; i < NR_CPUS; ++i)
-			if (cpu_online(i))
-				bogosum += loops_per_jiffy;
+		for_each_online_cpu(i)
+			bogosum += loops_per_jiffy;
 		seq_printf(m, "total bogomips\t: %lu.%02lu\n",
 			   bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
 #endif /* CONFIG_SMP && CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index db72a92943b..dc2770df25b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -272,9 +272,8 @@ int __init ppc_init(void)
 	if ( ppc_md.progress ) ppc_md.progress("             ", 0xffff);
 
 	/* register CPU devices */
-	for (i = 0; i < NR_CPUS; i++)
-		if (cpu_possible(i))
-			register_cpu(&cpu_devices[i], i, NULL);
+	for_each_cpu(i)
+		register_cpu(&cpu_devices[i], i, NULL);
 
 	/* call platform init */
 	if (ppc_md.init != NULL) {
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6d64a9bf347..1065d87fc27 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -191,9 +191,7 @@ static void smp_psurge_message_pass(int target, int msg)
 	if (num_online_cpus() < 2)
 		return;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_online(i))
-			continue;
+	for_each_online_cpu(i) {
 		if (target == MSG_ALL
 		    || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
 		    || target == i) {
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index c08ab432e95..53e9deacee8 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -168,9 +168,8 @@ int show_cpuinfo(struct seq_file *m, void *v)
 		/* Show summary information */
 #ifdef CONFIG_SMP
 		unsigned long bogosum = 0;
-		for (i = 0; i < NR_CPUS; ++i)
-			if (cpu_online(i))
-				bogosum += cpu_data[i].loops_per_jiffy;
+		for_each_online_cpu(i)
+			bogosum += cpu_data[i].loops_per_jiffy;
 		seq_printf(m, "total bogomips\t: %lu.%02lu\n",
 			   bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
 #endif /* CONFIG_SMP */
@@ -712,9 +711,8 @@ int __init ppc_init(void)
 	if ( ppc_md.progress ) ppc_md.progress("             ", 0xffff);
 
 	/* register CPU devices */
-	for (i = 0; i < NR_CPUS; i++)
-		if (cpu_possible(i))
-			register_cpu(&cpu_devices[i], i, NULL);
+	for_each_cpu(i)
+		register_cpu(&cpu_devices[i], i, NULL);
 
 	/* call platform init */
 	if (ppc_md.init != NULL) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7dbe00c76c6..d52d6d211d9 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -799,9 +799,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
          */
 	print_cpu_info(&S390_lowcore.cpu_data);
 
-        for(i = 0; i < NR_CPUS; i++) {
-		if (!cpu_possible(i))
-			continue;
+        for_each_cpu(i) {
 		lowcore_ptr[i] = (struct _lowcore *)
 			__get_free_pages(GFP_KERNEL|GFP_DMA, 
 					sizeof(void*) == 8 ? 1 : 0);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 6883c00728c..b56e79632f2 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -35,9 +35,8 @@ int show_interrupts(struct seq_file *p, void *v)
 
 	if (i == 0) {
 		seq_puts(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 		seq_putc(p, '\n');
 	}
 
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index a067a34e0b6..c0e79843f58 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -404,9 +404,8 @@ static int __init topology_init(void)
 {
 	int cpu_id;
 
-	for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++)
-		if (cpu_possible(cpu_id))
-			register_cpu(&cpu[cpu_id], cpu_id, NULL);
+	for_each_cpu(cpu_id)
+		register_cpu(&cpu[cpu_id], cpu_id, NULL);
 
 	return 0;
 }
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
index 9fc2b71dbd8..d69879c0e06 100644
--- a/arch/sh64/kernel/irq.c
+++ b/arch/sh64/kernel/irq.c
@@ -53,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v)
 
 	if (i == 0) {
 		seq_puts(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 		seq_putc(p, '\n');
 	}
 
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 410b9a72aba..4c60a6ef54a 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -184,9 +184,8 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j = 0; j < NR_CPUS; j++) {
-			if (cpu_online(j))
-				seq_printf(p, "%10u ",
+		for_each_online_cpu(j) {
+			seq_printf(p, "%10u ",
 				    kstat_cpu(cpu_logical_map(j)).irqs[i]);
 		}
 #endif
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index c6e721d8f47..ea5682ce703 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -243,9 +243,8 @@ int setup_profiling_timer(unsigned int multiplier)
 		return -EINVAL;
 
 	spin_lock_irqsave(&prof_setup_lock, flags);
-	for(i = 0; i < NR_CPUS; i++) {
-		if (cpu_possible(i))
-			load_profile_irq(i, lvl14_resolution / multiplier);
+	for_each_cpu(i) {
+		load_profile_irq(i, lvl14_resolution / multiplier);
 		prof_multiplier(i) = multiplier;
 	}
 	spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -273,13 +272,12 @@ void smp_bogo(struct seq_file *m)
 {
 	int i;
 	
-	for (i = 0; i < NR_CPUS; i++) {
-		if (cpu_online(i))
-			seq_printf(m,
-				   "Cpu%dBogo\t: %lu.%02lu\n", 
-				   i,
-				   cpu_data(i).udelay_val/(500000/HZ),
-				   (cpu_data(i).udelay_val/(5000/HZ))%100);
+	for_each_online_cpu(i) {
+		seq_printf(m,
+			   "Cpu%dBogo\t: %lu.%02lu\n",
+			   i,
+			   cpu_data(i).udelay_val/(500000/HZ),
+			   (cpu_data(i).udelay_val/(5000/HZ))%100);
 	}
 }
 
@@ -288,8 +286,6 @@ void smp_info(struct seq_file *m)
 	int i;
 
 	seq_printf(m, "State:\n");
-	for (i = 0; i < NR_CPUS; i++) {
-		if (cpu_online(i))
-			seq_printf(m, "CPU%d\t\t: online\n", i);
-	}
+	for_each_online_cpu(i)
+		seq_printf(m, "CPU%d\t\t: online\n", i);
 }
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 52621348a56..cea7fc6fc6e 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -103,11 +103,9 @@ found_it:	seq_printf(p, "%3d: ", i);
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (x = 0; x < NR_CPUS; x++) {
-			if (cpu_online(x))
-				seq_printf(p, "%10u ",
-				       kstat_cpu(cpu_logical_map(x)).irqs[i]);
-		}
+		for_each_online_cpu(x)
+			seq_printf(p, "%10u ",
+			       kstat_cpu(cpu_logical_map(x)).irqs[i]);
 #endif
 		seq_printf(p, "%c %s",
 			(action->flags & SA_INTERRUPT) ? '+' : ' ',
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 4219dd2ce3a..41bb9596be4 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -249,11 +249,9 @@ void __init smp4d_boot_cpus(void)
 	} else {
 		unsigned long bogosum = 0;
 		
-		for(i = 0; i < NR_CPUS; i++) {
-			if (cpu_isset(i, cpu_present_map)) {
-				bogosum += cpu_data(i).udelay_val;
-				smp_highest_cpu = i;
-			}
+		for_each_present_cpu(i) {
+			bogosum += cpu_data(i).udelay_val;
+			smp_highest_cpu = i;
 		}
 		SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
 		printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index fbbd8a474c4..1dde312eebd 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -218,10 +218,8 @@ void __init smp4m_boot_cpus(void)
 		cpu_present_map = cpumask_of_cpu(smp_processor_id());
 	} else {
 		unsigned long bogosum = 0;
-		for(i = 0; i < NR_CPUS; i++) {
-			if (cpu_isset(i, cpu_present_map))
-				bogosum += cpu_data(i).udelay_val;
-		}
+		for_each_present_cpu(i)
+			bogosum += cpu_data(i).udelay_val;
 		printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
 		       cpucount + 1,
 		       bogosum/(500000/HZ),
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 8c93ba655b3..e505a4125e3 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -117,9 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j = 0; j < NR_CPUS; j++) {
-			if (!cpu_online(j))
-				continue;
+		for_each_online_cpu(j) {
 			seq_printf(p, "%10u ",
 				   kstat_cpu(j).irqs[i]);
 		}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 373a701c90a..1b6e2ade100 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -57,25 +57,21 @@ void smp_info(struct seq_file *m)
 	int i;
 	
 	seq_printf(m, "State:\n");
-	for (i = 0; i < NR_CPUS; i++) {
-		if (cpu_online(i))
-			seq_printf(m,
-				   "CPU%d:\t\tonline\n", i);
-	}
+	for_each_online_cpu(i)
+		seq_printf(m, "CPU%d:\t\tonline\n", i);
 }
 
 void smp_bogo(struct seq_file *m)
 {
 	int i;
 	
-	for (i = 0; i < NR_CPUS; i++)
-		if (cpu_online(i))
-			seq_printf(m,
-				   "Cpu%dBogo\t: %lu.%02lu\n"
-				   "Cpu%dClkTck\t: %016lx\n",
-				   i, cpu_data(i).udelay_val / (500000/HZ),
-				   (cpu_data(i).udelay_val / (5000/HZ)) % 100,
-				   i, cpu_data(i).clock_tick);
+	for_each_online_cpu(i)
+		seq_printf(m,
+			   "Cpu%dBogo\t: %lu.%02lu\n"
+			   "Cpu%dClkTck\t: %016lx\n",
+			   i, cpu_data(i).udelay_val / (500000/HZ),
+			   (cpu_data(i).udelay_val / (5000/HZ)) % 100,
+			   i, cpu_data(i).clock_tick);
 }
 
 void __init smp_store_cpu_info(int id)
@@ -1282,7 +1278,7 @@ int setup_profiling_timer(unsigned int multiplier)
 		return -EINVAL;
 
 	spin_lock_irqsave(&prof_setup_lock, flags);
-	for (i = 0; i < NR_CPUS; i++)
+	for_each_cpu(i)
 		prof_multiplier(i) = multiplier;
 	current_tick_offset = (timer_tick_offset / multiplier);
 	spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1384,10 +1380,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
 	unsigned long bogosum = 0;
 	int i;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (cpu_online(i))
-			bogosum += cpu_data(i).udelay_val;
-	}
+	for_each_online_cpu(i)
+		bogosum += cpu_data(i).udelay_val;
 	printk("Total of %ld processors activated "
 	       "(%lu.%02lu BogoMIPS).\n",
 	       (long) num_online_cpus(),
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 30d2a1e545f..d8bd0b345b1 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v)
 
 	if (i == 0) {
 		seq_printf(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 		seq_putc(p, '\n');
 	}
 
@@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-			seq_printf(p, "%10u ",
-				kstat_cpu(j).irqs[i]);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
 		seq_printf(p, " %14s", irq_desc[i].handler->typename);
 
@@ -68,15 +65,13 @@ skip:
 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
 	} else if (i == NR_IRQS) {
 		seq_printf(p, "NMI: ");
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
 		seq_putc(p, '\n');
 #ifdef CONFIG_X86_LOCAL_APIC
 		seq_printf(p, "LOC: ");
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
 		seq_putc(p, '\n');
 #endif
 		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5bf17e41cd2..66c009e10ba 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -162,9 +162,7 @@ int __init check_nmi_watchdog (void)
 	local_irq_enable();
 	mdelay((10*1000)/nmi_hz); // wait 10 ticks
 
-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-		if (!cpu_online(cpu))
-			continue;
+	for_each_online_cpu(cpu) {
 		if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
 			endflag = 1;
 			printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4cbf6d91571..51f9bed455f 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -83,9 +83,8 @@ int show_interrupts(struct seq_file *p, void *v)
 
 	if (i == 0) {
 		seq_printf(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
 		seq_putc(p, '\n');
 	}
 
@@ -98,9 +97,8 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
 		seq_printf(p, " %14s", irq_desc[i].handler->typename);
 		seq_printf(p, "  %s", action->name);
@@ -113,9 +111,8 @@ skip:
 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
 	} else if (i == NR_IRQS) {
 		seq_printf(p, "NMI: ");
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", nmi_count(j));
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", nmi_count(j));
 		seq_putc(p, '\n');
 		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
 	}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 690a1aae0b3..0c13795dca3 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -172,11 +172,9 @@ static struct net_device_stats *get_stats(struct net_device *dev)
 
 	memset(stats, 0, sizeof(struct net_device_stats));
 
-	for (i=0; i < NR_CPUS; i++) {
+	for_each_cpu(i) {
 		struct net_device_stats *lb_stats;
 
-		if (!cpu_possible(i)) 
-			continue;
 		lb_stats = &per_cpu(loopback_stats, i);
 		stats->rx_bytes   += lb_stats->rx_bytes;
 		stats->tx_bytes   += lb_stats->tx_bytes;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 78193e4bbdb..330d3869b41 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -38,9 +38,8 @@ void free_cpu_buffers(void)
 {
 	int i;
  
-	for_each_online_cpu(i) {
+	for_each_online_cpu(i)
 		vfree(cpu_buffer[i].buffer);
-	}
 }
 
 int alloc_cpu_buffers(void)
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
index 8955720a2c6..713e6a7505d 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -62,18 +62,15 @@ xfs_read_xfsstats(
 		while (j < xstats[i].endpoint) {
 			val = 0;
 			/* sum over all cpus */
-			for (c = 0; c < NR_CPUS; c++) {
-				if (!cpu_possible(c)) continue;
+			for_each_cpu(c)
 				val += *(((__u32*)&per_cpu(xfsstats, c) + j));
-			}
 			len += sprintf(buffer + len, " %u", val);
 			j++;
 		}
 		buffer[len++] = '\n';
 	}
 	/* extra precision counters */
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_possible(i)) continue;
+	for_each_cpu(i) {
 		xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
 		xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
 		xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index a0256497242..7079cc83721 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler(
 
 	if (!ret && write && *valp) {
 		printk("XFS Clearing xfsstats\n");
-		for (c = 0; c < NR_CPUS; c++) {
-			if (!cpu_possible(c)) continue;
+		for_each_cpu(c) {
 			preempt_disable();
 			/* save vn_active, it's a universal truth! */
 			vn_active = per_cpu(xfsstats, c).vn_active;
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 6f92482cc96..0c017fc181c 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -231,9 +231,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
 	int i;
 
-	for (i = 0; i < NR_CPUS; i++)
-		if (cpu_online(i))
-			mm->context[i] = 0;
+	for_each_online_cpu(i)
+		mm->context[i] = 0;
 	if (tsk != current)
 		task_thread_info(tsk)->pcb.ptbr
 		  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
index eb740e280d9..420ccde6b91 100644
--- a/include/asm-alpha/topology.h
+++ b/include/asm-alpha/topology.h
@@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node)
 	cpumask_t node_cpu_mask = CPU_MASK_NONE;
 	int cpu;
 
-	for(cpu = 0; cpu < NR_CPUS; cpu++) {
-		if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
+	for_each_online_cpu(cpu) {
+		if (cpu_to_node(cpu) == node)
 			cpu_set(cpu, node_cpu_mask);
 	}
 
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 9044aeb3782..78cf45547e3 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -19,10 +19,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #define percpu_modcopy(pcpudst, src, size)			\
 do {								\
 	unsigned int __i;					\
-	for (__i = 0; __i < NR_CPUS; __i++)			\
-		if (cpu_possible(__i))				\
-			memcpy((pcpudst)+__per_cpu_offset[__i],	\
-			       (src), (size));			\
+	for_each_cpu(__i)					\
+		memcpy((pcpudst)+__per_cpu_offset[__i],		\
+		       (src), (size));				\
 } while (0)
 #else /* ! SMP */
 
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index e31922c50e5..464301cd0d0 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -27,10 +27,9 @@
 #define percpu_modcopy(pcpudst, src, size)			\
 do {								\
 	unsigned int __i;					\
-	for (__i = 0; __i < NR_CPUS; __i++)			\
-		if (cpu_possible(__i))				\
-			memcpy((pcpudst)+__per_cpu_offset(__i),	\
-			       (src), (size));			\
+	for_each_cpu(__i)					\
+		memcpy((pcpudst)+__per_cpu_offset(__i),		\
+		       (src), (size));				\
 } while (0)
 
 extern void setup_per_cpu_areas(void);
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 123fcaca295..e10ed87094f 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -46,10 +46,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #define percpu_modcopy(pcpudst, src, size)			\
 do {								\
 	unsigned int __i;					\
-	for (__i = 0; __i < NR_CPUS; __i++)			\
-		if (cpu_possible(__i))				\
-			memcpy((pcpudst)+__per_cpu_offset[__i],	\
-			       (src), (size));			\
+	for_each_cpu(__i)					\
+		memcpy((pcpudst)+__per_cpu_offset[__i],		\
+		       (src), (size));				\
 } while (0)
 
 #else /* ! SMP */
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index aea4e51e7cd..82032e159a7 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -26,10 +26,9 @@ register unsigned long __local_per_cpu_offset asm("g5");
 #define percpu_modcopy(pcpudst, src, size)			\
 do {								\
 	unsigned int __i;					\
-	for (__i = 0; __i < NR_CPUS; __i++)			\
-		if (cpu_possible(__i))				\
-			memcpy((pcpudst)+__per_cpu_offset(__i),	\
-			       (src), (size));			\
+	for_each_cpu(__i)					\
+		memcpy((pcpudst)+__per_cpu_offset(__i),		\
+		       (src), (size));				\
 } while (0)
 #else /* ! SMP */
 
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 29a6b0408f7..4405b4adeab 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -26,10 +26,9 @@
 #define percpu_modcopy(pcpudst, src, size)			\
 do {								\
 	unsigned int __i;					\
-	for (__i = 0; __i < NR_CPUS; __i++)			\
-		if (cpu_possible(__i))				\
-			memcpy((pcpudst)+__per_cpu_offset(__i),	\
-			       (src), (size));			\
+	for_each_cpu(__i)					\
+		memcpy((pcpudst)+__per_cpu_offset(__i),		\
+		       (src), (size));				\
 } while (0)
 
 extern void setup_per_cpu_areas(void);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index eef5ccdcd73..fd647fde5ec 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -149,22 +149,16 @@ struct disk_attribute {
 ({									\
 	typeof(gendiskp->dkstats->field) res = 0;			\
 	int i;								\
-	for (i=0; i < NR_CPUS; i++) {					\
-		if (!cpu_possible(i))					\
-			continue;					\
+	for_each_cpu(i)							\
 		res += per_cpu_ptr(gendiskp->dkstats, i)->field;	\
-	}								\
 	res;								\
 })
 
 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value)	{
 	int i;
-	for (i=0; i < NR_CPUS; i++) {
-		if (cpu_possible(i)) {
-			memset(per_cpu_ptr(gendiskp->dkstats, i), value,	
-					sizeof (struct disk_stats));
-		}
-	}
+	for_each_cpu(i)
+		memset(per_cpu_ptr(gendiskp->dkstats, i), value,
+				sizeof (struct disk_stats));
 }		
 				
 #else
-- 
cgit v1.2.3-70-g09d2


From f348d70a324e15afc701a494f32ec468abb7d1eb Mon Sep 17 00:00:00 2001
From: Davide Libenzi <davidel@xmailserver.org>
Date: Sat, 25 Mar 2006 03:07:39 -0800
Subject: [PATCH] POLLRDHUP/EPOLLRDHUP handling for half-closed devices
 notifications

Implement the half-closed devices notifiation, by adding a new POLLRDHUP
(and its alias EPOLLRDHUP) bit to the existing poll/select sets.  Since the
existing POLLHUP handling, that does not report correctly half-closed
devices, was feared to be changed, this implementation leaves the current
POLLHUP reporting unchanged and simply add a new bit that is set in the few
places where it makes sense.  The same thing was discussed and conceptually
agreed quite some time ago:

http://lkml.org/lkml/2003/7/12/116

Since this new event bit is added to the existing Linux poll infrastruture,
even the existing poll/select system calls will be able to use it.  As far
as the existing POLLHUP handling, the patch leaves it as is.  The
pollrdhup-2.6.16.rc5-0.10.diff defines the POLLRDHUP for all the existing
archs and sets the bit in the six relevant files.  The other attached diff
is the simple change required to sys/epoll.h to add the EPOLLRDHUP
definition.

There is "a stupid program" to test POLLRDHUP delivery here:

 http://www.xmailserver.org/pollrdhup-test.c

It tests poll(2), but since the delivery is same epoll(2) will work equally.

Signed-off-by: Davide Libenzi <davidel@xmailserver.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 fs/eventpoll.c               | 4 ++--
 include/asm-alpha/poll.h     | 2 ++
 include/asm-arm/poll.h       | 1 +
 include/asm-arm26/poll.h     | 1 +
 include/asm-cris/poll.h      | 1 +
 include/asm-frv/poll.h       | 1 +
 include/asm-h8300/poll.h     | 1 +
 include/asm-i386/poll.h      | 1 +
 include/asm-ia64/poll.h      | 1 +
 include/asm-m32r/poll.h      | 1 +
 include/asm-m68k/poll.h      | 1 +
 include/asm-mips/poll.h      | 1 +
 include/asm-parisc/poll.h    | 1 +
 include/asm-powerpc/poll.h   | 1 +
 include/asm-s390/poll.h      | 1 +
 include/asm-sh/poll.h        | 1 +
 include/asm-sh64/poll.h      | 1 +
 include/asm-sparc/poll.h     | 1 +
 include/asm-sparc64/poll.h   | 1 +
 include/asm-v850/poll.h      | 1 +
 include/asm-x86_64/poll.h    | 1 +
 include/asm-xtensa/poll.h    | 1 +
 net/bluetooth/af_bluetooth.c | 3 +++
 net/core/datagram.c          | 2 ++
 net/dccp/proto.c             | 2 +-
 net/ipv4/tcp.c               | 2 +-
 net/sctp/socket.c            | 2 ++
 net/unix/af_unix.c           | 2 ++
 28 files changed, 35 insertions(+), 4 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1c2b16fda13..a0f682cdd03 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -599,7 +599,7 @@ sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event)
 	switch (op) {
 	case EPOLL_CTL_ADD:
 		if (!epi) {
-			epds.events |= POLLERR | POLLHUP;
+			epds.events |= POLLERR | POLLHUP | POLLRDHUP;
 
 			error = ep_insert(ep, &epds, tfile, fd);
 		} else
@@ -613,7 +613,7 @@ sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event)
 		break;
 	case EPOLL_CTL_MOD:
 		if (epi) {
-			epds.events |= POLLERR | POLLHUP;
+			epds.events |= POLLERR | POLLHUP | POLLRDHUP;
 			error = ep_modify(ep, epi, &epds);
 		} else
 			error = -ENOENT;
diff --git a/include/asm-alpha/poll.h b/include/asm-alpha/poll.h
index 34f333b762a..95707182b3e 100644
--- a/include/asm-alpha/poll.h
+++ b/include/asm-alpha/poll.h
@@ -13,6 +13,8 @@
 #define POLLWRBAND	(1 << 9)
 #define POLLMSG		(1 << 10)
 #define POLLREMOVE	(1 << 11)
+#define POLLRDHUP       (1 << 12)
+
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-arm/poll.h b/include/asm-arm/poll.h
index 2744ca831f5..5030b2b232a 100644
--- a/include/asm-arm/poll.h
+++ b/include/asm-arm/poll.h
@@ -16,6 +16,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-arm26/poll.h b/include/asm-arm26/poll.h
index fdfdab064a6..9ccb7f4190c 100644
--- a/include/asm-arm26/poll.h
+++ b/include/asm-arm26/poll.h
@@ -15,6 +15,7 @@
 #define POLLWRNORM	0x0100
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-cris/poll.h b/include/asm-cris/poll.h
index 1c0efc3e4be..1b25d4cf498 100644
--- a/include/asm-cris/poll.h
+++ b/include/asm-cris/poll.h
@@ -15,6 +15,7 @@
 #define POLLWRBAND	512
 #define POLLMSG		1024
 #define POLLREMOVE	4096
+#define POLLRDHUP       8192
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-frv/poll.h b/include/asm-frv/poll.h
index 8cbcd60e334..c8fe8801d07 100644
--- a/include/asm-frv/poll.h
+++ b/include/asm-frv/poll.h
@@ -12,6 +12,7 @@
 #define POLLRDBAND	128
 #define POLLWRBAND	256
 #define POLLMSG		0x0400
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-h8300/poll.h b/include/asm-h8300/poll.h
index bf49ab8ad6d..fc52103b276 100644
--- a/include/asm-h8300/poll.h
+++ b/include/asm-h8300/poll.h
@@ -12,6 +12,7 @@
 #define POLLRDBAND	128
 #define POLLWRBAND	256
 #define POLLMSG		0x0400
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h
index aecc80a15d3..2cd4929abd4 100644
--- a/include/asm-i386/poll.h
+++ b/include/asm-i386/poll.h
@@ -16,6 +16,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-ia64/poll.h b/include/asm-ia64/poll.h
index 160258a0528..bcaf9f14024 100644
--- a/include/asm-ia64/poll.h
+++ b/include/asm-ia64/poll.h
@@ -21,6 +21,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-m32r/poll.h b/include/asm-m32r/poll.h
index 43b7acf732d..9e0e700e727 100644
--- a/include/asm-m32r/poll.h
+++ b/include/asm-m32r/poll.h
@@ -21,6 +21,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-m68k/poll.h b/include/asm-m68k/poll.h
index c4b69c4a87e..0fb8843647f 100644
--- a/include/asm-m68k/poll.h
+++ b/include/asm-m68k/poll.h
@@ -13,6 +13,7 @@
 #define POLLWRBAND	256
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-mips/poll.h b/include/asm-mips/poll.h
index a000f1f789e..70881f8c5c5 100644
--- a/include/asm-mips/poll.h
+++ b/include/asm-mips/poll.h
@@ -17,6 +17,7 @@
 /* These seem to be more or less nonstandard ...  */
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-parisc/poll.h b/include/asm-parisc/poll.h
index 1c1da86934c..20e4d03c74c 100644
--- a/include/asm-parisc/poll.h
+++ b/include/asm-parisc/poll.h
@@ -16,6 +16,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-powerpc/poll.h b/include/asm-powerpc/poll.h
index edd2054da86..9c7d1263103 100644
--- a/include/asm-powerpc/poll.h
+++ b/include/asm-powerpc/poll.h
@@ -13,6 +13,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-s390/poll.h b/include/asm-s390/poll.h
index e90a5ca4206..6f7f65ac7d2 100644
--- a/include/asm-s390/poll.h
+++ b/include/asm-s390/poll.h
@@ -24,6 +24,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-sh/poll.h b/include/asm-sh/poll.h
index 52f95b9188d..dbca9b32f4a 100644
--- a/include/asm-sh/poll.h
+++ b/include/asm-sh/poll.h
@@ -16,6 +16,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-sh64/poll.h b/include/asm-sh64/poll.h
index a420d14eb70..3a6cbad08d2 100644
--- a/include/asm-sh64/poll.h
+++ b/include/asm-sh64/poll.h
@@ -26,6 +26,7 @@
 #define POLLWRNORM	0x0100
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-sparc/poll.h b/include/asm-sparc/poll.h
index 3ddcc6481f0..26f13fb3549 100644
--- a/include/asm-sparc/poll.h
+++ b/include/asm-sparc/poll.h
@@ -13,6 +13,7 @@
 #define POLLWRBAND	256
 #define POLLMSG		512
 #define POLLREMOVE	1024
+#define POLLRDHUP       2048
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-sparc64/poll.h b/include/asm-sparc64/poll.h
index 31b611aa746..ab6b0d1bb4a 100644
--- a/include/asm-sparc64/poll.h
+++ b/include/asm-sparc64/poll.h
@@ -13,6 +13,7 @@
 #define POLLWRBAND	256
 #define POLLMSG		512
 #define POLLREMOVE	1024
+#define POLLRDHUP       2048
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-v850/poll.h b/include/asm-v850/poll.h
index 0369562c7e1..c10176c2c28 100644
--- a/include/asm-v850/poll.h
+++ b/include/asm-v850/poll.h
@@ -13,6 +13,7 @@
 #define POLLWRBAND	0x0100
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-x86_64/poll.h b/include/asm-x86_64/poll.h
index c43cbba3191..c0475a9d8bb 100644
--- a/include/asm-x86_64/poll.h
+++ b/include/asm-x86_64/poll.h
@@ -16,6 +16,7 @@
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x1000
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/include/asm-xtensa/poll.h b/include/asm-xtensa/poll.h
index dffe447534e..6fd94773e86 100644
--- a/include/asm-xtensa/poll.h
+++ b/include/asm-xtensa/poll.h
@@ -27,6 +27,7 @@
 
 #define POLLMSG		0x0400
 #define POLLREMOVE	0x0800
+#define POLLRDHUP       0x2000
 
 struct pollfd {
 	int fd;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index fb031fe9be9..469eda0f0df 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -238,6 +238,9 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 		mask |= POLLERR;
 
+	if (sk->sk_shutdown & RCV_SHUTDOWN)
+		mask |= POLLRDHUP;
+
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
 		mask |= POLLHUP;
 
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b8ce6bf8118..aecddcc3040 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -500,6 +500,8 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
 	/* exceptional events? */
 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 		mask |= POLLERR;
+	if (sk->sk_shutdown & RCV_SHUTDOWN)
+		mask |= POLLRDHUP;
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
 		mask |= POLLHUP;
 
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index d4b293e1628..1ff7328b0e1 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -350,7 +350,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
 		mask |= POLLHUP;
 	if (sk->sk_shutdown & RCV_SHUTDOWN)
-		mask |= POLLIN | POLLRDNORM;
+		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 
 	/* Connected? */
 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4b0272c92d6..19ea5c0b094 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -365,7 +365,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
 		mask |= POLLHUP;
 	if (sk->sk_shutdown & RCV_SHUTDOWN)
-		mask |= POLLIN | POLLRDNORM;
+		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 
 	/* Connected? */
 	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0ea947eb681..b6e4b89539b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4894,6 +4894,8 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	/* Is there any exceptional events?  */
 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 		mask |= POLLERR;
+	if (sk->sk_shutdown & RCV_SHUTDOWN)
+		mask |= POLLRDHUP;
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
 		mask |= POLLHUP;
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2b4cc2eea5b..d901465ce01 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1878,6 +1878,8 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl
 		mask |= POLLERR;
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
 		mask |= POLLHUP;
+	if (sk->sk_shutdown & RCV_SHUTDOWN)
+		mask |= POLLRDHUP;
 
 	/* readable? */
 	if (!skb_queue_empty(&sk->sk_receive_queue) ||
-- 
cgit v1.2.3-70-g09d2


From 67b0ad574b5ee90f8ea58196ff8a7f3780b75365 Mon Sep 17 00:00:00 2001
From: Akinobu Mita <mita@miraclelinux.com>
Date: Sun, 26 Mar 2006 01:39:05 -0800
Subject: [PATCH] bitops: use non atomic operations for minix_*_bit() and
 ext2_*_bit()

Bitmap functions for the minix filesystem and the ext2 filesystem except
ext2_set_bit_atomic() and ext2_clear_bit_atomic() do not require the atomic
guarantees.

But these are defined by using atomic bit operations on several architectures.
 (cris, frv, h8300, ia64, m32r, m68k, m68knommu, mips, s390, sh, sh64, sparc,
sparc64, v850, and xtensa)

This patch switches to non atomic bit operation.

Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 include/asm-cris/bitops.h      |  8 ++++----
 include/asm-frv/bitops.h       | 14 +++++++-------
 include/asm-h8300/bitops.h     |  6 +++---
 include/asm-ia64/bitops.h      | 10 +++++-----
 include/asm-m32r/bitops.h      |  2 +-
 include/asm-m68k/bitops.h      | 10 +++++-----
 include/asm-m68knommu/bitops.h |  6 +++---
 include/asm-mips/bitops.h      |  6 +++---
 include/asm-s390/bitops.h      | 10 +++++-----
 include/asm-sh/bitops.h        | 16 +++++-----------
 include/asm-sh64/bitops.h      | 16 +++++-----------
 include/asm-sparc/bitops.h     |  6 +++---
 include/asm-sparc64/bitops.h   |  6 +++---
 include/asm-v850/bitops.h      | 10 +++++-----
 include/asm-xtensa/bitops.h    |  6 +++---
 15 files changed, 60 insertions(+), 72 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index f8a67476931..b1fca1fd8a5 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -352,17 +352,17 @@ found_middle:
 #define find_first_bit(addr, size) \
         find_next_bit((addr), (size), 0)
 
-#define ext2_set_bit                 test_and_set_bit
+#define ext2_set_bit                 __test_and_set_bit
 #define ext2_set_bit_atomic(l,n,a)   test_and_set_bit(n,a)
-#define ext2_clear_bit               test_and_clear_bit
+#define ext2_clear_bit               __test_and_clear_bit
 #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
 #define ext2_test_bit                test_bit
 #define ext2_find_first_zero_bit     find_first_zero_bit
 #define ext2_find_next_zero_bit      find_next_zero_bit
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr) test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index f686b519878..9c5db5c34c1 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -259,11 +259,11 @@ static inline int sched_find_first_bit(const unsigned long *b)
 #define hweight16(x) generic_hweight16(x)
 #define hweight8(x) generic_hweight8(x)
 
-#define ext2_set_bit(nr, addr)		test_and_set_bit  ((nr) ^ 0x18, (addr))
-#define ext2_clear_bit(nr, addr)	test_and_clear_bit((nr) ^ 0x18, (addr))
+#define ext2_set_bit(nr, addr)		__test_and_set_bit  ((nr) ^ 0x18, (addr))
+#define ext2_clear_bit(nr, addr)	__test_and_clear_bit((nr) ^ 0x18, (addr))
 
-#define ext2_set_bit_atomic(lock,nr,addr)	ext2_set_bit((nr), addr)
-#define ext2_clear_bit_atomic(lock,nr,addr)	ext2_clear_bit((nr), addr)
+#define ext2_set_bit_atomic(lock,nr,addr)	test_and_set_bit  ((nr) ^ 0x18, (addr))
+#define ext2_clear_bit_atomic(lock,nr,addr)	test_and_clear_bit((nr) ^ 0x18, (addr))
 
 static inline int ext2_test_bit(int nr, const volatile void * addr)
 {
@@ -331,9 +331,9 @@ found_middle:
 }
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr)		ext2_set_bit(nr,addr)
-#define minix_set_bit(nr,addr)			ext2_set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr)	ext2_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr)		__test_and_set_bit  ((nr) ^ 0x18, (addr))
+#define minix_set_bit(nr,addr)			__set_bit((nr) ^ 0x18, (addr))
+#define minix_test_and_clear_bit(nr,addr)	__test_and_clear_bit((nr) ^ 0x18, (addr))
 #define minix_test_bit(nr,addr)			ext2_test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size)	ext2_find_first_zero_bit(addr,size)
 
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h
index ff7c2b72159..af95f914e51 100644
--- a/include/asm-h8300/bitops.h
+++ b/include/asm-h8300/bitops.h
@@ -397,9 +397,9 @@ found_middle:
 }
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr) test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 36d0fb95ea8..eccb01c79c1 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -394,18 +394,18 @@ extern int __find_next_bit(const void *addr, unsigned long size,
 
 #define __clear_bit(nr, addr)		clear_bit(nr, addr)
 
-#define ext2_set_bit			test_and_set_bit
+#define ext2_set_bit			__test_and_set_bit
 #define ext2_set_bit_atomic(l,n,a)	test_and_set_bit(n,a)
-#define ext2_clear_bit			test_and_clear_bit
+#define ext2_clear_bit			__test_and_clear_bit
 #define ext2_clear_bit_atomic(l,n,a)	test_and_clear_bit(n,a)
 #define ext2_test_bit			test_bit
 #define ext2_find_first_zero_bit	find_first_zero_bit
 #define ext2_find_next_zero_bit		find_next_zero_bit
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr)		test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr)			set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr)	test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr)		__test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr)			__set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr)	__test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr)			test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size)	find_first_zero_bit(addr,size)
 
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h
index abea2fdd868..f8e993e0bbc 100644
--- a/include/asm-m32r/bitops.h
+++ b/include/asm-m32r/bitops.h
@@ -575,7 +575,7 @@ found_middle:
  */
 
 #ifdef __LITTLE_ENDIAN__
-#define ext2_set_bit			test_and_set_bit
+#define ext2_set_bit			__test_and_set_bit
 #define ext2_clear_bit			__test_and_clear_bit
 #define ext2_test_bit			test_bit
 #define ext2_find_first_zero_bit	find_first_zero_bit
diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h
index 13f4c004846..b7955b39d96 100644
--- a/include/asm-m68k/bitops.h
+++ b/include/asm-m68k/bitops.h
@@ -365,9 +365,9 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
 	return ((p - addr) << 4) + (res ^ 31);
 }
 
-#define minix_test_and_set_bit(nr, addr)	test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
-#define minix_set_bit(nr,addr)			set_bit((nr) ^ 16, (unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr, addr)	test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_test_and_set_bit(nr, addr)	__test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_set_bit(nr,addr)			__set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr, addr)	__test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
 
 static inline int minix_test_bit(int nr, const void *vaddr)
 {
@@ -377,9 +377,9 @@ static inline int minix_test_bit(int nr, const void *vaddr)
 
 /* Bitmap functions for the ext2 filesystem. */
 
-#define ext2_set_bit(nr, addr)			test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
+#define ext2_set_bit(nr, addr)			__test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
 #define ext2_set_bit_atomic(lock, nr, addr)	test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
-#define ext2_clear_bit(nr, addr)		test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
+#define ext2_clear_bit(nr, addr)		__test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
 #define ext2_clear_bit_atomic(lock, nr, addr)	test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
 
 static inline int ext2_test_bit(int nr, const void *vaddr)
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h
index 25d8a3cfef9..00c4a2548e6 100644
--- a/include/asm-m68knommu/bitops.h
+++ b/include/asm-m68knommu/bitops.h
@@ -476,9 +476,9 @@ found_middle:
 }
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr) test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 8e802059fe6..0e83abc829d 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -962,9 +962,9 @@ found_middle:
  * FIXME: These assume that Minix uses the native byte/bitorder.
  * This limits the Minix filesystem's value for data exchange very much.
  */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr) test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 3628899f48b..6b6a01dfc8d 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -871,11 +871,11 @@ static inline int sched_find_first_bit(unsigned long *b)
  */
 
 #define ext2_set_bit(nr, addr)       \
-	test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
+	__test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
 #define ext2_set_bit_atomic(lock, nr, addr)       \
 	test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
 #define ext2_clear_bit(nr, addr)     \
-	test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
+	__test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
 #define ext2_clear_bit_atomic(lock, nr, addr)     \
 	test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
 #define ext2_test_bit(nr, addr)      \
@@ -1014,11 +1014,11 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
 /* Bitmap functions for the minix filesystem.  */
 /* FIXME !!! */
 #define minix_test_and_set_bit(nr,addr) \
-	test_and_set_bit(nr,(unsigned long *)addr)
+	__test_and_set_bit(nr,(unsigned long *)addr)
 #define minix_set_bit(nr,addr) \
-	set_bit(nr,(unsigned long *)addr)
+	__set_bit(nr,(unsigned long *)addr)
 #define minix_test_and_clear_bit(nr,addr) \
-	test_and_clear_bit(nr,(unsigned long *)addr)
+	__test_and_clear_bit(nr,(unsigned long *)addr)
 #define minix_test_bit(nr,addr) \
 	test_bit(nr,(unsigned long *)addr)
 #define minix_find_first_zero_bit(addr,size) \
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index 1c526086004..f8d504e7d9d 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -339,8 +339,8 @@ static inline int sched_find_first_bit(const unsigned long *b)
 }
 
 #ifdef __LITTLE_ENDIAN__
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
+#define ext2_set_bit(nr, addr) __test_and_set_bit((nr), (addr))
+#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr))
 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
 #define ext2_find_next_zero_bit(addr, size, offset) \
@@ -349,30 +349,24 @@ static inline int sched_find_first_bit(const unsigned long *b)
 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
 {
 	int		mask, retval;
-	unsigned long	flags;
 	volatile unsigned char	*ADDR = (unsigned char *) addr;
 
 	ADDR += nr >> 3;
 	mask = 1 << (nr & 0x07);
-	local_irq_save(flags);
 	retval = (mask & *ADDR) != 0;
 	*ADDR |= mask;
-	local_irq_restore(flags);
 	return retval;
 }
 
 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
 {
 	int		mask, retval;
-	unsigned long	flags;
 	volatile unsigned char	*ADDR = (unsigned char *) addr;
 
 	ADDR += nr >> 3;
 	mask = 1 << (nr & 0x07);
-	local_irq_save(flags);
 	retval = (mask & *ADDR) != 0;
 	*ADDR &= ~mask;
-	local_irq_restore(flags);
 	return retval;
 }
 
@@ -459,9 +453,9 @@ found_middle:
 	})
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr) test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
index ce9c3ad45fe..5622b1a50cb 100644
--- a/include/asm-sh64/bitops.h
+++ b/include/asm-sh64/bitops.h
@@ -382,8 +382,8 @@ static inline int sched_find_first_bit(unsigned long *b)
 #define hweight8(x) generic_hweight8(x)
 
 #ifdef __LITTLE_ENDIAN__
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
+#define ext2_set_bit(nr, addr) __test_and_set_bit((nr), (addr))
+#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr))
 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
 #define ext2_find_next_zero_bit(addr, size, offset) \
@@ -392,30 +392,24 @@ static inline int sched_find_first_bit(unsigned long *b)
 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
 {
 	int		mask, retval;
-	unsigned long	flags;
 	volatile unsigned char	*ADDR = (unsigned char *) addr;
 
 	ADDR += nr >> 3;
 	mask = 1 << (nr & 0x07);
-	local_irq_save(flags);
 	retval = (mask & *ADDR) != 0;
 	*ADDR |= mask;
-	local_irq_restore(flags);
 	return retval;
 }
 
 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
 {
 	int		mask, retval;
-	unsigned long	flags;
 	volatile unsigned char	*ADDR = (unsigned char *) addr;
 
 	ADDR += nr >> 3;
 	mask = 1 << (nr & 0x07);
-	local_irq_save(flags);
 	retval = (mask & *ADDR) != 0;
 	*ADDR &= ~mask;
-	local_irq_restore(flags);
 	return retval;
 }
 
@@ -502,9 +496,9 @@ found_middle:
 	})
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr) test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 41722b5e45e..f25109d6203 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -523,11 +523,11 @@ found_middle:
 
 /* Bitmap functions for the minix filesystem.  */
 #define minix_test_and_set_bit(nr,addr)	\
-	test_and_set_bit((nr),(unsigned long *)(addr))
+	__test_and_set_bit((nr),(unsigned long *)(addr))
 #define minix_set_bit(nr,addr)		\
-	set_bit((nr),(unsigned long *)(addr))
+	__set_bit((nr),(unsigned long *)(addr))
 #define minix_test_and_clear_bit(nr,addr) \
-	test_and_clear_bit((nr),(unsigned long *)(addr))
+	__test_and_clear_bit((nr),(unsigned long *)(addr))
 #define minix_test_bit(nr,addr)		\
 	test_bit((nr),(unsigned long *)(addr))
 #define minix_find_first_zero_bit(addr,size) \
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 6efc0162fb0..2361f873649 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -280,11 +280,11 @@ extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsig
 
 /* Bitmap functions for the minix filesystem.  */
 #define minix_test_and_set_bit(nr,addr)	\
-	test_and_set_bit((nr),(unsigned long *)(addr))
+	__test_and_set_bit((nr),(unsigned long *)(addr))
 #define minix_set_bit(nr,addr)	\
-	set_bit((nr),(unsigned long *)(addr))
+	__set_bit((nr),(unsigned long *)(addr))
 #define minix_test_and_clear_bit(nr,addr) \
-	test_and_clear_bit((nr),(unsigned long *)(addr))
+	__test_and_clear_bit((nr),(unsigned long *)(addr))
 #define minix_test_bit(nr,addr)	\
 	test_bit((nr),(unsigned long *)(addr))
 #define minix_find_first_zero_bit(addr,size) \
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h
index 609b9e87222..44d596e792e 100644
--- a/include/asm-v850/bitops.h
+++ b/include/asm-v850/bitops.h
@@ -336,18 +336,18 @@ static inline int sched_find_first_bit(unsigned long *b)
 #define hweight16(x) 			generic_hweight16 (x)
 #define hweight8(x) 			generic_hweight8 (x)
 
-#define ext2_set_bit			test_and_set_bit
+#define ext2_set_bit			__test_and_set_bit
 #define ext2_set_bit_atomic(l,n,a)      test_and_set_bit(n,a)
-#define ext2_clear_bit			test_and_clear_bit
+#define ext2_clear_bit			__test_and_clear_bit
 #define ext2_clear_bit_atomic(l,n,a)    test_and_clear_bit(n,a)
 #define ext2_test_bit			test_bit
 #define ext2_find_first_zero_bit	find_first_zero_bit
 #define ext2_find_next_zero_bit		find_next_zero_bit
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit		test_and_set_bit
-#define minix_set_bit			set_bit
-#define minix_test_and_clear_bit	test_and_clear_bit
+#define minix_test_and_set_bit		__test_and_set_bit
+#define minix_set_bit			__set_bit
+#define minix_test_and_clear_bit	__test_and_clear_bit
 #define minix_test_bit 			test_bit
 #define minix_find_first_zero_bit 	find_first_zero_bit
 
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index 0a2065f1a37..50b83726a49 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -436,9 +436,9 @@ static inline int sched_find_first_bit(const unsigned long *b)
 
 /* Bitmap functions for the minix filesystem.  */
 
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 #define minix_test_bit(nr,addr) test_bit(nr,addr)
 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 
-- 
cgit v1.2.3-70-g09d2


From 2d78d4beb64eb07d50665432867971c481192ebf Mon Sep 17 00:00:00 2001
From: Akinobu Mita <mita@miraclelinux.com>
Date: Sun, 26 Mar 2006 01:39:40 -0800
Subject: [PATCH] bitops: sparc64: use generic bitops

- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- remove ffz()
- remove __ffs()
- remove generic_fls()
- remove generic_fls64()
- remove sched_find_first_bit()
- remove ffs()

- unless defined(ULTRA_HAS_POPULATION_COUNT)

  - remove generic_hweight{64,32,16,8}()

- remove find_{next,first}{,_zero}_bit()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()

Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 arch/sparc64/Kconfig                |   8 ++
 arch/sparc64/kernel/sparc64_ksyms.c |   5 -
 arch/sparc64/lib/Makefile           |   2 +-
 arch/sparc64/lib/find_bit.c         | 127 ---------------------
 include/asm-sparc64/bitops.h        | 219 +++---------------------------------
 5 files changed, 22 insertions(+), 339 deletions(-)
 delete mode 100644 arch/sparc64/lib/find_bit.c

(limited to 'include/asm-sparc64')

diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 267afddf63c..d1e2fc56648 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -162,6 +162,14 @@ config RWSEM_XCHGADD_ALGORITHM
 	bool
 	default y
 
+config GENERIC_FIND_NEXT_BIT
+	bool
+	default y
+
+config GENERIC_HWEIGHT
+	bool
+	default y if !ULTRA_HAS_POPULATION_COUNT
+
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 9914a17651b..c7fbbcfce82 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -175,11 +175,6 @@ EXPORT_SYMBOL(set_bit);
 EXPORT_SYMBOL(clear_bit);
 EXPORT_SYMBOL(change_bit);
 
-/* Bit searching */
-EXPORT_SYMBOL(find_next_bit);
-EXPORT_SYMBOL(find_next_zero_bit);
-EXPORT_SYMBOL(find_next_zero_le_bit);
-
 EXPORT_SYMBOL(ivector_table);
 EXPORT_SYMBOL(enable_irq);
 EXPORT_SYMBOL(disable_irq);
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 8812ded19f0..4a725d8985f 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
 	 NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
 	 NGpage.o NGbzero.o \
 	 copy_in_user.o user_fixup.o memmove.o \
-	 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+	 mcount.o ipcsum.o rwsem.o xor.o delay.o
 
 obj-y += iomap.o
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c
deleted file mode 100644
index 6059557067b..00000000000
--- a/arch/sparc64/lib/find_bit.c
+++ /dev/null
@@ -1,127 +0,0 @@
-#include <linux/bitops.h>
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
-				unsigned long offset)
-{
-	const unsigned long *p = addr + (offset >> 6);
-	unsigned long result = offset & ~63UL;
-	unsigned long tmp;
-
-	if (offset >= size)
-		return size;
-	size -= result;
-	offset &= 63UL;
-	if (offset) {
-		tmp = *(p++);
-		tmp &= (~0UL << offset);
-		if (size < 64)
-			goto found_first;
-		if (tmp)
-			goto found_middle;
-		size -= 64;
-		result += 64;
-	}
-	while (size & ~63UL) {
-		if ((tmp = *(p++)))
-			goto found_middle;
-		result += 64;
-		size -= 64;
-	}
-	if (!size)
-		return result;
-	tmp = *p;
-
-found_first:
-	tmp &= (~0UL >> (64 - size));
-	if (tmp == 0UL)        /* Are any bits set? */
-		return result + size; /* Nope. */
-found_middle:
-	return result + __ffs(tmp);
-}
-
-/* find_next_zero_bit() finds the first zero bit in a bit string of length
- * 'size' bits, starting the search at bit 'offset'. This is largely based
- * on Linus's ALPHA routines, which are pretty portable BTW.
- */
-
-unsigned long find_next_zero_bit(const unsigned long *addr,
-			unsigned long size, unsigned long offset)
-{
-	const unsigned long *p = addr + (offset >> 6);
-	unsigned long result = offset & ~63UL;
-	unsigned long tmp;
-
-	if (offset >= size)
-		return size;
-	size -= result;
-	offset &= 63UL;
-	if (offset) {
-		tmp = *(p++);
-		tmp |= ~0UL >> (64-offset);
-		if (size < 64)
-			goto found_first;
-		if (~tmp)
-			goto found_middle;
-		size -= 64;
-		result += 64;
-	}
-	while (size & ~63UL) {
-		if (~(tmp = *(p++)))
-			goto found_middle;
-		result += 64;
-		size -= 64;
-	}
-	if (!size)
-		return result;
-	tmp = *p;
-
-found_first:
-	tmp |= ~0UL << size;
-	if (tmp == ~0UL)        /* Are any bits zero? */
-		return result + size; /* Nope. */
-found_middle:
-	return result + ffz(tmp);
-}
-
-unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
-{
-	unsigned long *p = addr + (offset >> 6);
-	unsigned long result = offset & ~63UL;
-	unsigned long tmp;
-
-	if (offset >= size)
-		return size;
-	size -= result;
-	offset &= 63UL;
-	if(offset) {
-		tmp = __swab64p(p++);
-		tmp |= (~0UL >> (64-offset));
-		if(size < 64)
-			goto found_first;
-		if(~tmp)
-			goto found_middle;
-		size -= 64;
-		result += 64;
-	}
-	while(size & ~63) {
-		if(~(tmp = __swab64p(p++)))
-			goto found_middle;
-		result += 64;
-		size -= 64;
-	}
-	if(!size)
-		return result;
-	tmp = __swab64p(p);
-found_first:
-	tmp |= (~0UL << size);
-	if (tmp == ~0UL)        /* Are any bits zero? */
-		return result + size; /* Nope. */
-found_middle:
-	return result + ffz(tmp);
-}
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 2361f873649..71944b0f09d 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -18,58 +18,7 @@ extern void set_bit(unsigned long nr, volatile unsigned long *addr);
 extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
 extern void change_bit(unsigned long nr, volatile unsigned long *addr);
 
-/* "non-atomic" versions... */
-
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-
-	*m |= (1UL << (nr & 63));
-}
-
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-
-	*m &= ~(1UL << (nr & 63));
-}
-
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-
-	*m ^= (1UL << (nr & 63));
-}
-
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-	unsigned long old = *m;
-	unsigned long mask = (1UL << (nr & 63));
-
-	*m = (old | mask);
-	return ((old & mask) != 0);
-}
-
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-	unsigned long old = *m;
-	unsigned long mask = (1UL << (nr & 63));
-
-	*m = (old & ~mask);
-	return ((old & mask) != 0);
-}
-
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-	unsigned long old = *m;
-	unsigned long mask = (1UL << (nr & 63));
-
-	*m = (old ^ mask);
-	return ((old & mask) != 0);
-}
+#include <asm-generic/bitops/non-atomic.h>
 
 #ifdef CONFIG_SMP
 #define smp_mb__before_clear_bit()	membar_storeload_loadload()
@@ -79,78 +28,15 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
 #define smp_mb__after_clear_bit()	barrier()
 #endif
 
-static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
-{
-	return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL;
-}
-
-/* The easy/cheese version for now. */
-static inline unsigned long ffz(unsigned long word)
-{
-	unsigned long result;
-
-	result = 0;
-	while(word & 1) {
-		result++;
-		word >>= 1;
-	}
-	return result;
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
-	unsigned long result = 0;
-
-	while (!(word & 1UL)) {
-		result++;
-		word >>= 1;
-	}
-	return result;
-}
-
-/*
- * fls: find last bit set.
- */
-
-#define fls(x) generic_fls(x)
-#define fls64(x)   generic_fls64(x)
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
 
 #ifdef __KERNEL__
 
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
-	if (unlikely(b[0]))
-		return __ffs(b[0]);
-	if (unlikely(((unsigned int)b[1])))
-		return __ffs(b[1]) + 64;
-	if (b[1] >> 32)
-		return __ffs(b[1] >> 32) + 96;
-	return __ffs(b[2]) + 128;
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static inline int ffs(int x)
-{
-	if (!x)
-		return 0;
-	return __ffs((unsigned long)x) + 1;
-}
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
 
 /*
  * hweightN: returns the hamming weight (i.e. the number
@@ -193,102 +79,23 @@ static inline unsigned int hweight8(unsigned int w)
 
 #else
 
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
 
 #endif
 #endif /* __KERNEL__ */
 
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-extern unsigned long find_next_bit(const unsigned long *, unsigned long,
-					unsigned long);
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
-	find_next_bit((addr), (size), 0)
-
-/* find_next_zero_bit() finds the first zero bit in a bit string of length
- * 'size' bits, starting the search at bit 'offset'. This is largely based
- * on Linus's ALPHA routines, which are pretty portable BTW.
- */
-
-extern unsigned long find_next_zero_bit(const unsigned long *,
-					unsigned long, unsigned long);
-
-#define find_first_zero_bit(addr, size) \
-        find_next_zero_bit((addr), (size), 0)
-
-#define test_and_set_le_bit(nr,addr)	\
-	test_and_set_bit((nr) ^ 0x38, (addr))
-#define test_and_clear_le_bit(nr,addr)	\
-	test_and_clear_bit((nr) ^ 0x38, (addr))
-
-static inline int test_le_bit(int nr, __const__ unsigned long * addr)
-{
-	int			mask;
-	__const__ unsigned char	*ADDR = (__const__ unsigned char *) addr;
-
-	ADDR += nr >> 3;
-	mask = 1 << (nr & 0x07);
-	return ((mask & *ADDR) != 0);
-}
-
-#define find_first_zero_le_bit(addr, size) \
-        find_next_zero_le_bit((addr), (size), 0)
-
-extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsigned long);
+#include <asm-generic/bitops/find.h>
 
 #ifdef __KERNEL__
 
-#define __set_le_bit(nr, addr) \
-	__set_bit((nr) ^ 0x38, (addr))
-#define __clear_le_bit(nr, addr) \
-	__clear_bit((nr) ^ 0x38, (addr))
-#define __test_and_clear_le_bit(nr, addr) \
-	__test_and_clear_bit((nr) ^ 0x38, (addr))
-#define __test_and_set_le_bit(nr, addr) \
-	__test_and_set_bit((nr) ^ 0x38, (addr))
+#include <asm-generic/bitops/ext2-non-atomic.h>
 
-#define ext2_set_bit(nr,addr)	\
-	__test_and_set_le_bit((nr),(unsigned long *)(addr))
 #define ext2_set_bit_atomic(lock,nr,addr) \
-	test_and_set_le_bit((nr),(unsigned long *)(addr))
-#define ext2_clear_bit(nr,addr)	\
-	__test_and_clear_le_bit((nr),(unsigned long *)(addr))
+	test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
 #define ext2_clear_bit_atomic(lock,nr,addr) \
-	test_and_clear_le_bit((nr),(unsigned long *)(addr))
-#define ext2_test_bit(nr,addr)	\
-	test_le_bit((nr),(unsigned long *)(addr))
-#define ext2_find_first_zero_bit(addr, size) \
-	find_first_zero_le_bit((unsigned long *)(addr), (size))
-#define ext2_find_next_zero_bit(addr, size, off) \
-	find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
+	test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
 
-/* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr)	\
-	__test_and_set_bit((nr),(unsigned long *)(addr))
-#define minix_set_bit(nr,addr)	\
-	__set_bit((nr),(unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr,addr) \
-	__test_and_clear_bit((nr),(unsigned long *)(addr))
-#define minix_test_bit(nr,addr)	\
-	test_bit((nr),(unsigned long *)(addr))
-#define minix_find_first_zero_bit(addr,size) \
-	find_first_zero_bit((unsigned long *)(addr),(size))
+#include <asm-generic/bitops/minix.h>
 
 #endif /* __KERNEL__ */
 
-- 
cgit v1.2.3-70-g09d2


From a117e66ed45ac0569c039ea60bd7a9a61e031858 Mon Sep 17 00:00:00 2001
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Date: Mon, 27 Mar 2006 01:15:25 -0800
Subject: [PATCH] unify pfn_to_page: generic functions

There are 3 memory models, FLATMEM, DISCONTIGMEM, SPARSEMEM.
Each arch has its own page_to_pfn(), pfn_to_page() for each models.
But most of them can use the same arithmetic.

This patch adds asm-generic/memory_model.h, which includes generic
page_to_pfn(), pfn_to_page() definitions for each memory model.

When CONFIG_OUT_OF_LINE_PFN_TO_PAGE=y, out-of-line functions are
used instead of macro. This is enabled by some archs and  reduces
text size.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ian Molton <spyro@f2s.com>
Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Hirokazu Takata <takata.hirokazu@renesas.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Kazumoto Kojima <kkojima@rr.iij4u.or.jp>
Cc: Richard Curnow <rc@rc0.org.uk>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Miles Bader <uclinux-v850@lsi.nec.co.jp>
Cc: Chris Zankel <chris@zankel.net>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 include/asm-generic/memory_model.h | 77 ++++++++++++++++++++++++++++++++++++++
 include/asm-sparc64/page.h         |  2 +
 include/linux/mmzone.h             | 11 ------
 mm/page_alloc.c                    | 42 +++++++++++++++++++++
 4 files changed, 121 insertions(+), 11 deletions(-)
 create mode 100644 include/asm-generic/memory_model.h

(limited to 'include/asm-sparc64')

diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
new file mode 100644
index 00000000000..a7bb4978e80
--- /dev/null
+++ b/include/asm-generic/memory_model.h
@@ -0,0 +1,77 @@
+#ifndef __ASM_MEMORY_MODEL_H
+#define __ASM_MEMORY_MODEL_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_FLATMEM)
+
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET		(0UL)
+#endif
+
+#elif defined(CONFIG_DISCONTIGMEM)
+
+#ifndef arch_pfn_to_nid
+#define arch_pfn_to_nid(pfn)	pfn_to_nid(pfn)
+#endif
+
+#ifndef arch_local_page_offset
+#define arch_local_page_offset(pfn, nid)	\
+	((pfn) - NODE_DATA(nid)->node_start_pfn)
+#endif
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
+struct page;
+/* this is useful when inlined pfn_to_page is too big */
+extern struct page *pfn_to_page(unsigned long pfn);
+extern unsigned long page_to_pfn(struct page *page);
+#else
+/*
+ * supports 3 memory models.
+ */
+#if defined(CONFIG_FLATMEM)
+
+#define pfn_to_page(pfn)	(mem_map + ((pfn) - ARCH_PFN_OFFSET))
+#define page_to_pfn(page)	((unsigned long)((page) - mem_map) + \
+				 ARCH_PFN_OFFSET)
+#elif defined(CONFIG_DISCONTIGMEM)
+
+#define pfn_to_page(pfn)			\
+({	unsigned long __pfn = (pfn);		\
+	unsigned long __nid = arch_pfn_to_nid(pfn);  \
+	NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
+})
+
+#define page_to_pfn(pg)			\
+({	struct page *__pg = (pg);		\
+	struct zone *__zone = page_zone(__pg);	\
+	(unsigned long)(__pg - __zone->zone_mem_map) +	\
+	 __zone->zone_start_pfn;			\
+})
+
+#elif defined(CONFIG_SPARSEMEM)
+/*
+ * Note: section's mem_map is encorded to reflect its start_pfn.
+ * section[i].section_mem_map == mem_map's address - start_pfn;
+ */
+#define page_to_pfn(pg)					\
+({	struct page *__pg = (pg);				\
+	int __sec = page_to_section(__pg);			\
+	__pg - __section_mem_map_addr(__nr_to_section(__sec));	\
+})
+
+#define pfn_to_page(pfn)				\
+({	unsigned long __pfn = (pfn);			\
+	struct mem_section *__sec = __pfn_to_section(__pfn);	\
+	__section_mem_map_addr(__sec) + __pfn;		\
+})
+#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 66fe4ac59fd..aabb2190672 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -111,6 +111,8 @@ typedef unsigned long pgprot_t;
 				 (_AC(0x0000000070000000,UL)) : \
 				 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
 
+#include <asm-generic/memory_model.h>
+
 #endif /* !(__ASSEMBLY__) */
 
 /* to align the pointer to the (next) page boundary */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ebfc238cc24..0c1c0c0cce6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -602,17 +602,6 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
 	return __nr_to_section(pfn_to_section_nr(pfn));
 }
 
-#define pfn_to_page(pfn) 						\
-({ 									\
-	unsigned long __pfn = (pfn);					\
-	__section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn;	\
-})
-#define page_to_pfn(page)						\
-({									\
-	page - __section_mem_map_addr(__nr_to_section(			\
-		page_to_section(page)));				\
-})
-
 static inline int pfn_valid(unsigned long pfn)
 {
 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 338a02bb004..349b328763b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2745,3 +2745,45 @@ void *__init alloc_large_system_hash(const char *tablename,
 
 	return table;
 }
+
+#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
+/*
+ * pfn <-> page translation. out-of-line version.
+ * (see asm-generic/memory_model.h)
+ */
+#if defined(CONFIG_FLATMEM)
+struct page *pfn_to_page(unsigned long pfn)
+{
+	return mem_map + (pfn - ARCH_PFN_OFFSET);
+}
+unsigned long page_to_pfn(struct page *page)
+{
+	return (page - mem_map) + ARCH_PFN_OFFSET;
+}
+#elif defined(CONFIG_DISCONTIGMEM)
+struct page *pfn_to_page(unsigned long pfn)
+{
+	int nid = arch_pfn_to_nid(pfn);
+	return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid);
+}
+unsigned long page_to_pfn(struct page *page)
+{
+	struct zone *zone = page_zone(page);
+	return (page - zone->zone_mem_map) + zone->zone_start_pfn;
+
+}
+#elif defined(CONFIG_SPARSEMEM)
+struct page *pfn_to_page(unsigned long pfn)
+{
+	return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn;
+}
+
+unsigned long page_to_pfn(struct page *page)
+{
+	long section_id = page_to_section(page);
+	return page - __section_mem_map_addr(__nr_to_section(section_id));
+}
+#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */
+EXPORT_SYMBOL(pfn_to_page);
+EXPORT_SYMBOL(page_to_pfn);
+#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
-- 
cgit v1.2.3-70-g09d2


From e9056f13bfcdd054a0c3d730e4e096748d8a363a Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Mon, 27 Mar 2006 01:16:21 -0800
Subject: [PATCH] lightweight robust futexes: arch defaults

This patchset provides a new (written from scratch) implementation of robust
futexes, called "lightweight robust futexes".  We believe this new
implementation is faster and simpler than the vma-based robust futex solutions
presented before, and we'd like this patchset to be adopted in the upstream
kernel.  This is version 1 of the patchset.

  Background
  ----------

What are robust futexes?  To answer that, we first need to understand what
futexes are: normal futexes are special types of locks that in the
noncontended case can be acquired/released from userspace without having to
enter the kernel.

A futex is in essence a user-space address, e.g.  a 32-bit lock variable
field.  If userspace notices contention (the lock is already owned and someone
else wants to grab it too) then the lock is marked with a value that says
"there's a waiter pending", and the sys_futex(FUTEX_WAIT) syscall is used to
wait for the other guy to release it.  The kernel creates a 'futex queue'
internally, so that it can later on match up the waiter with the waker -
without them having to know about each other.  When the owner thread releases
the futex, it notices (via the variable value) that there were waiter(s)
pending, and does the sys_futex(FUTEX_WAKE) syscall to wake them up.  Once all
waiters have taken and released the lock, the futex is again back to
'uncontended' state, and there's no in-kernel state associated with it.  The
kernel completely forgets that there ever was a futex at that address.  This
method makes futexes very lightweight and scalable.

"Robustness" is about dealing with crashes while holding a lock: if a process
exits prematurely while holding a pthread_mutex_t lock that is also shared
with some other process (e.g.  yum segfaults while holding a pthread_mutex_t,
or yum is kill -9-ed), then waiters for that lock need to be notified that the
last owner of the lock exited in some irregular way.

To solve such types of problems, "robust mutex" userspace APIs were created:
pthread_mutex_lock() returns an error value if the owner exits prematurely -
and the new owner can decide whether the data protected by the lock can be
recovered safely.

There is a big conceptual problem with futex based mutexes though: it is the
kernel that destroys the owner task (e.g.  due to a SEGFAULT), but the kernel
cannot help with the cleanup: if there is no 'futex queue' (and in most cases
there is none, futexes being fast lightweight locks) then the kernel has no
information to clean up after the held lock!  Userspace has no chance to clean
up after the lock either - userspace is the one that crashes, so it has no
opportunity to clean up.  Catch-22.

In practice, when e.g.  yum is kill -9-ed (or segfaults), a system reboot is
needed to release that futex based lock.  This is one of the leading
bugreports against yum.

To solve this problem, 'Robust Futex' patches were created and presented on
lkml: the one written by Todd Kneisel and David Singleton is the most advanced
at the moment.  These patches all tried to extend the futex abstraction by
registering futex-based locks in the kernel - and thus give the kernel a
chance to clean up.

E.g.  in David Singleton's robust-futex-6.patch, there are 3 new syscall
variants to sys_futex(): FUTEX_REGISTER, FUTEX_DEREGISTER and FUTEX_RECOVER.
The kernel attaches such robust futexes to vmas (via
vma->vm_file->f_mapping->robust_head), and at do_exit() time, all vmas are
searched to see whether they have a robust_head set.

Lots of work went into the vma-based robust-futex patch, and recently it has
improved significantly, but unfortunately it still has two fundamental
problems left:

 - they have quite complex locking and race scenarios.  The vma-based
   patches had been pending for years, but they are still not completely
   reliable.

 - they have to scan _every_ vma at sys_exit() time, per thread!

The second disadvantage is a real killer: pthread_exit() takes around 1
microsecond on Linux, but with thousands (or tens of thousands) of vmas every
pthread_exit() takes a millisecond or more, also totally destroying the CPU's
L1 and L2 caches!

This is very much noticeable even for normal process sys_exit_group() calls:
the kernel has to do the vma scanning unconditionally!  (this is because the
kernel has no knowledge about how many robust futexes there are to be cleaned
up, because a robust futex might have been registered in another task, and the
futex variable might have been simply mmap()-ed into this process's address
space).

This huge overhead forced the creation of CONFIG_FUTEX_ROBUST, but worse than
that: the overhead makes robust futexes impractical for any type of generic
Linux distribution.

So it became clear to us, something had to be done.  Last week, when Thomas
Gleixner tried to fix up the vma-based robust futex patch in the -rt tree, he
found a handful of new races and we were talking about it and were analyzing
the situation.  At that point a fundamentally different solution occured to
me.  This patchset (written in the past couple of days) implements that new
solution.  Be warned though - the patchset does things we normally dont do in
Linux, so some might find the approach disturbing.  Parental advice
recommended ;-)

  New approach to robust futexes
  ------------------------------

At the heart of this new approach there is a per-thread private list of robust
locks that userspace is holding (maintained by glibc) - which userspace list
is registered with the kernel via a new syscall [this registration happens at
most once per thread lifetime].  At do_exit() time, the kernel checks this
user-space list: are there any robust futex locks to be cleaned up?

In the common case, at do_exit() time, there is no list registered, so the
cost of robust futexes is just a simple current->robust_list != NULL
comparison.  If the thread has registered a list, then normally the list is
empty.  If the thread/process crashed or terminated in some incorrect way then
the list might be non-empty: in this case the kernel carefully walks the list
[not trusting it], and marks all locks that are owned by this thread with the
FUTEX_OWNER_DEAD bit, and wakes up one waiter (if any).

The list is guaranteed to be private and per-thread, so it's lockless.  There
is one race possible though: since adding to and removing from the list is
done after the futex is acquired by glibc, there is a few instructions window
for the thread (or process) to die there, leaving the futex hung.  To protect
against this possibility, userspace (glibc) also maintains a simple per-thread
'list_op_pending' field, to allow the kernel to clean up if the thread dies
after acquiring the lock, but just before it could have added itself to the
list.  Glibc sets this list_op_pending field before it tries to acquire the
futex, and clears it after the list-add (or list-remove) has finished.

That's all that is needed - all the rest of robust-futex cleanup is done in
userspace [just like with the previous patches].

Ulrich Drepper has implemented the necessary glibc support for this new
mechanism, which fully enables robust mutexes.  (Ulrich plans to commit these
changes to glibc-HEAD later today.)

Key differences of this userspace-list based approach, compared to the vma
based method:

 - it's much, much faster: at thread exit time, there's no need to loop
   over every vma (!), which the VM-based method has to do.  Only a very
   simple 'is the list empty' op is done.

 - no VM changes are needed - 'struct address_space' is left alone.

 - no registration of individual locks is needed: robust mutexes dont need
   any extra per-lock syscalls.  Robust mutexes thus become a very lightweight
   primitive - so they dont force the application designer to do a hard choice
   between performance and robustness - robust mutexes are just as fast.

 - no per-lock kernel allocation happens.

 - no resource limits are needed.

 - no kernel-space recovery call (FUTEX_RECOVER) is needed.

 - the implementation and the locking is "obvious", and there are no
   interactions with the VM.

  Performance
  -----------

I have benchmarked the time needed for the kernel to process a list of 1
million (!) held locks, using the new method [on a 2GHz CPU]:

 - with FUTEX_WAIT set [contended mutex]: 130 msecs
 - without FUTEX_WAIT set [uncontended mutex]: 30 msecs

I have also measured an approach where glibc does the lock notification [which
it currently does for !pshared robust mutexes], and that took 256 msecs -
clearly slower, due to the 1 million FUTEX_WAKE syscalls userspace had to do.

(1 million held locks are unheard of - we expect at most a handful of locks to
be held at a time.  Nevertheless it's nice to know that this approach scales
nicely.)

  Implementation details
  ----------------------

The patch adds two new syscalls: one to register the userspace list, and one
to query the registered list pointer:

 asmlinkage long
 sys_set_robust_list(struct robust_list_head __user *head,
                     size_t len);

 asmlinkage long
 sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
                     size_t __user *len_ptr);

List registration is very fast: the pointer is simply stored in
current->robust_list.  [Note that in the future, if robust futexes become
widespread, we could extend sys_clone() to register a robust-list head for new
threads, without the need of another syscall.]

So there is virtually zero overhead for tasks not using robust futexes, and
even for robust futex users, there is only one extra syscall per thread
lifetime, and the cleanup operation, if it happens, is fast and
straightforward.  The kernel doesnt have any internal distinction between
robust and normal futexes.

If a futex is found to be held at exit time, the kernel sets the highest bit
of the futex word:

	#define FUTEX_OWNER_DIED        0x40000000

and wakes up the next futex waiter (if any). User-space does the rest of
the cleanup.

Otherwise, robust futexes are acquired by glibc by putting the TID into the
futex field atomically.  Waiters set the FUTEX_WAITERS bit:

	#define FUTEX_WAITERS           0x80000000

and the remaining bits are for the TID.

  Testing, architecture support
  -----------------------------

I've tested the new syscalls on x86 and x86_64, and have made sure the parsing
of the userspace list is robust [ ;-) ] even if the list is deliberately
corrupted.

i386 and x86_64 syscalls are wired up at the moment, and Ulrich has tested the
new glibc code (on x86_64 and i386), and it works for his robust-mutex
testcases.

All other architectures should build just fine too - but they wont have the
new syscalls yet.

Architectures need to implement the new futex_atomic_cmpxchg_inuser() inline
function before writing up the syscalls (that function returns -ENOSYS right
now).

This patch:

Add placeholder futex_atomic_cmpxchg_inuser() implementations to every
architecture that supports futexes.  It returns -ENOSYS.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Acked-by: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 include/asm-frv/futex.h     | 6 ++++++
 include/asm-generic/futex.h | 6 ++++++
 include/asm-i386/futex.h    | 6 ++++++
 include/asm-mips/futex.h    | 6 ++++++
 include/asm-powerpc/futex.h | 6 ++++++
 include/asm-sparc64/futex.h | 6 ++++++
 include/asm-x86_64/futex.h  | 6 ++++++
 7 files changed, 42 insertions(+)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h
index fca9d90e32c..9a0e9026ba5 100644
--- a/include/asm-frv/futex.h
+++ b/include/asm-frv/futex.h
@@ -9,5 +9,11 @@
 
 extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
 
+static inline int
+futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+{
+	return -ENOSYS;
+}
+
 #endif
 #endif
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 3ae2c734754..514bd401cd7 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -49,5 +49,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 	return ret;
 }
 
+static inline int
+futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+{
+	return -ENOSYS;
+}
+
 #endif
 #endif
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 44b9db80647..1f39ad9d52a 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -104,5 +104,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 	return ret;
 }
 
+static inline int
+futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+{
+	return -ENOSYS;
+}
+
 #endif
 #endif
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 2454c44a8f5..c5fb2d6d918 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -99,5 +99,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 	return ret;
 }
 
+static inline int
+futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+{
+	return -ENOSYS;
+}
+
 #endif
 #endif
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 39e85f320a7..80ed9854e42 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -81,5 +81,11 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 	return ret;
 }
 
+static inline int
+futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+{
+	return -ENOSYS;
+}
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 34c4b43d3f9..cd340a23315 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -83,4 +83,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 	return ret;
 }
 
+static inline int
+futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+{
+	return -ENOSYS;
+}
+
 #endif /* !(_SPARC64_FUTEX_H) */
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 8602c09bf89..4f4cb3410d0 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -94,5 +94,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 	return ret;
 }
 
+static inline int
+futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+{
+	return -ENOSYS;
+}
+
 #endif
 #endif
-- 
cgit v1.2.3-70-g09d2


From e041c683412d5bf44dc2b109053e3b837b71742d Mon Sep 17 00:00:00 2001
From: Alan Stern <stern@rowland.harvard.edu>
Date: Mon, 27 Mar 2006 01:16:30 -0800
Subject: [PATCH] Notifier chain update: API changes

The kernel's implementation of notifier chains is unsafe.  There is no
protection against entries being added to or removed from a chain while the
chain is in use.  The issues were discussed in this thread:

    http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2

We noticed that notifier chains in the kernel fall into two basic usage
classes:

	"Blocking" chains are always called from a process context
	and the callout routines are allowed to sleep;

	"Atomic" chains can be called from an atomic context and
	the callout routines are not allowed to sleep.

We decided to codify this distinction and make it part of the API.  Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name).  New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain.  The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.

With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed.  For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections.  (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)

There are some limitations, which should not be too hard to live with.  For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem.  Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain.  (This did happen in a couple of places and the code
had to be changed to avoid it.)

Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization.  Instead we use RCU.  The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.

Here is the list of chains that we adjusted and their classifications.  None
of them use the raw API, so for the moment it is only a placeholder.

  ATOMIC CHAINS
  -------------
arch/i386/kernel/traps.c:		i386die_chain
arch/ia64/kernel/traps.c:		ia64die_chain
arch/powerpc/kernel/traps.c:		powerpc_die_chain
arch/sparc64/kernel/traps.c:		sparc64die_chain
arch/x86_64/kernel/traps.c:		die_chain
drivers/char/ipmi/ipmi_si_intf.c:	xaction_notifier_list
kernel/panic.c:				panic_notifier_list
kernel/profile.c:			task_free_notifier
net/bluetooth/hci_core.c:		hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c:	ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c:	ip_conntrack_expect_chain
net/ipv6/addrconf.c:			inet6addr_chain
net/netfilter/nf_conntrack_core.c:	nf_conntrack_chain
net/netfilter/nf_conntrack_core.c:	nf_conntrack_expect_chain
net/netlink/af_netlink.c:		netlink_chain

  BLOCKING CHAINS
  ---------------
arch/powerpc/platforms/pseries/reconfig.c:	pSeries_reconfig_chain
arch/s390/kernel/process.c:		idle_chain
arch/x86_64/kernel/process.c		idle_notifier
drivers/base/memory.c:			memory_chain
drivers/cpufreq/cpufreq.c		cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c		cpufreq_transition_notifier_list
drivers/macintosh/adb.c:		adb_client_list
drivers/macintosh/via-pmu.c		sleep_notifier_list
drivers/macintosh/via-pmu68k.c		sleep_notifier_list
drivers/macintosh/windfarm_core.c	wf_client_list
drivers/usb/core/notify.c		usb_notifier_list
drivers/video/fbmem.c			fb_notifier_list
kernel/cpu.c				cpu_chain
kernel/module.c				module_notify_list
kernel/profile.c			munmap_notifier
kernel/profile.c			task_exit_notifier
kernel/sys.c				reboot_notifier_list
net/core/dev.c				netdev_chain
net/decnet/dn_dev.c:			dnaddr_chain
net/ipv4/devinet.c:			inetaddr_chain

It's possible that some of these classifications are wrong.  If they are,
please let us know or submit a patch to fix them.  Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)

The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.

[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 arch/alpha/kernel/setup.c                   |   5 +-
 arch/arm/mach-omap1/board-netstar.c         |   2 +-
 arch/arm/mach-omap1/board-voiceblue.c       |   2 +-
 arch/i386/kernel/traps.c                    |  17 +-
 arch/ia64/kernel/traps.c                    |   6 +-
 arch/mips/lasat/setup.c                     |   3 +-
 arch/mips/sgi-ip22/ip22-reset.c             |   2 +-
 arch/mips/sgi-ip32/ip32-reset.c             |   2 +-
 arch/parisc/kernel/pdc_chassis.c            |   3 +-
 arch/powerpc/kernel/setup_64.c              |   3 +-
 arch/powerpc/kernel/traps.c                 |  16 +-
 arch/powerpc/platforms/pseries/reconfig.c   |  10 +-
 arch/ppc/platforms/prep_setup.c             |   2 +-
 arch/s390/kernel/process.c                  |  11 +-
 arch/sparc64/kernel/traps.c                 |  17 +-
 arch/um/drivers/mconsole_kern.c             |   3 +-
 arch/um/kernel/um_arch.c                    |   3 +-
 arch/x86_64/kernel/process.c                |  17 +-
 arch/x86_64/kernel/traps.c                  |  18 +-
 arch/xtensa/platform-iss/setup.c            |   2 +-
 drivers/base/memory.c                       |   8 +-
 drivers/char/ipmi/ipmi_msghandler.c         |   4 +-
 drivers/char/ipmi/ipmi_si_intf.c            |   7 +-
 drivers/char/ipmi/ipmi_watchdog.c           |   6 +-
 drivers/cpufreq/cpufreq.c                   |  61 +++---
 drivers/firmware/dcdbas.c                   |  19 +-
 drivers/macintosh/adb.c                     |  11 +-
 drivers/macintosh/adbhid.c                  |   3 +-
 drivers/macintosh/via-pmu.c                 |   2 +-
 drivers/macintosh/via-pmu68k.c              |   7 +-
 drivers/macintosh/windfarm_core.c           |   8 +-
 drivers/misc/ibmasm/heartbeat.c             |   5 +-
 drivers/net/bonding/bond_main.c             |   2 +-
 drivers/parisc/led.c                        |  14 +-
 drivers/parisc/power.c                      |   6 +-
 drivers/scsi/gdth.c                         |   9 +-
 drivers/usb/core/notify.c                   |  65 +-----
 drivers/video/fbmem.c                       |  31 +--
 include/asm-i386/kdebug.h                   |  10 +-
 include/asm-ia64/kdebug.h                   |   4 +-
 include/asm-powerpc/kdebug.h                |  12 +-
 include/asm-sparc64/kdebug.h                |  11 +-
 include/asm-x86_64/kdebug.h                 |  23 +-
 include/linux/adb.h                         |   2 +-
 include/linux/kernel.h                      |   2 +-
 include/linux/memory.h                      |   1 -
 include/linux/netfilter_ipv4/ip_conntrack.h |  17 +-
 include/linux/notifier.h                    |  96 +++++++-
 include/net/netfilter/nf_conntrack.h        |  17 +-
 kernel/cpu.c                                |  29 +--
 kernel/module.c                             |  20 +-
 kernel/panic.c                              |   4 +-
 kernel/profile.c                            |  53 ++---
 kernel/softlockup.c                         |   2 +-
 kernel/sys.c                                | 327 ++++++++++++++++++++++------
 net/bluetooth/hci_core.c                    |   8 +-
 net/core/dev.c                              |  42 ++--
 net/decnet/dn_dev.c                         |  10 +-
 net/ipv4/devinet.c                          |  16 +-
 net/ipv4/netfilter/ip_conntrack_core.c      |   6 +-
 net/ipv6/addrconf.c                         |  10 +-
 net/netfilter/nf_conntrack_core.c           |   6 +-
 net/netlink/af_netlink.c                    |   9 +-
 63 files changed, 677 insertions(+), 472 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 9402624453c..dd876967059 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -43,7 +43,7 @@
 #include <asm/setup.h>
 #include <asm/io.h>
 
-extern struct notifier_block *panic_notifier_list;
+extern struct atomic_notifier_head panic_notifier_list;
 static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
 static struct notifier_block alpha_panic_block = {
 	alpha_panic_event,
@@ -500,7 +500,8 @@ setup_arch(char **cmdline_p)
 	}
 
 	/* Register a call for panic conditions. */
-	notifier_chain_register(&panic_notifier_list, &alpha_panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&alpha_panic_block);
 
 #ifdef CONFIG_ALPHA_GENERIC
 	/* Assume that we've booted from SRM if we haven't booted from MILO.
diff --git a/arch/arm/mach-omap1/board-netstar.c b/arch/arm/mach-omap1/board-netstar.c
index 60d5f8a3339..7520e602d7a 100644
--- a/arch/arm/mach-omap1/board-netstar.c
+++ b/arch/arm/mach-omap1/board-netstar.c
@@ -141,7 +141,7 @@ static int __init netstar_late_init(void)
 	/* TODO: Setup front panel switch here */
 
 	/* Setup panic notifier */
-	notifier_chain_register(&panic_notifier_list, &panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
 	return 0;
 }
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index bfd5fdd1a87..52e4a9d6964 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -235,7 +235,7 @@ static struct notifier_block panic_block = {
 static int __init voiceblue_setup(void)
 {
 	/* Setup panic notifier */
-	notifier_chain_register(&panic_notifier_list, &panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
 	return 0;
 }
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 4624f8ca245..6b63a5aa1e4 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -92,22 +92,21 @@ asmlinkage void spurious_interrupt_bug(void);
 asmlinkage void machine_check(void);
 
 static int kstack_depth_to_print = 24;
-struct notifier_block *i386die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(i386die_chain);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-	int err = 0;
-	unsigned long flags;
-
 	vmalloc_sync_all();
-	spin_lock_irqsave(&die_notifier_lock, flags);
-	err = notifier_chain_register(&i386die_chain, nb);
-	spin_unlock_irqrestore(&die_notifier_lock, flags);
-	return err;
+	return atomic_notifier_chain_register(&i386die_chain, nb);
 }
 EXPORT_SYMBOL(register_die_notifier);
 
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&i386die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
 {
 	return	p > (void *)tinfo &&
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index dabd6c32641..7c1ddc8ac44 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -30,19 +30,19 @@ extern spinlock_t timerlist_lock;
 fpswa_interface_t *fpswa_interface;
 EXPORT_SYMBOL(fpswa_interface);
 
-struct notifier_block *ia64die_chain;
+ATOMIC_NOTIFIER_HEAD(ia64die_chain);
 
 int
 register_die_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&ia64die_chain, nb);
+	return atomic_notifier_chain_register(&ia64die_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_die_notifier);
 
 int
 unregister_die_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&ia64die_chain, nb);
+	return atomic_notifier_chain_unregister(&ia64die_chain, nb);
 }
 EXPORT_SYMBOL_GPL(unregister_die_notifier);
 
diff --git a/arch/mips/lasat/setup.c b/arch/mips/lasat/setup.c
index 83eb08b7a07..e9e9a89c674 100644
--- a/arch/mips/lasat/setup.c
+++ b/arch/mips/lasat/setup.c
@@ -165,7 +165,8 @@ void __init plat_setup(void)
 
 	/* Set up panic notifier */
 	for (i = 0; i < sizeof(lasat_panic_block) / sizeof(struct notifier_block); i++)
-		notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]);
+		atomic_notifier_chain_register(&panic_notifier_list,
+				&lasat_panic_block[i]);
 
 	lasat_reboot_setup();
 
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 92a3b3c15ed..a9c58e067b5 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -238,7 +238,7 @@ static int __init reboot_setup(void)
 	request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL);
 	init_timer(&blink_timer);
 	blink_timer.function = blink_timeout;
-	notifier_chain_register(&panic_notifier_list, &panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
 	return 0;
 }
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 0c948008b02..ab9d9cef089 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -193,7 +193,7 @@ static __init int ip32_reboot_setup(void)
 
 	init_timer(&blink_timer);
 	blink_timer.function = blink_timeout;
-	notifier_chain_register(&panic_notifier_list, &panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
 	request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL);
 
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index 2a01fe1bdc9..0cea6958f42 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -150,7 +150,8 @@ void __init parisc_pdc_chassis_init(void)
 
 		if (handle) {
 			/* initialize panic notifier chain */
-			notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
+			atomic_notifier_chain_register(&panic_notifier_list,
+					&pdc_chassis_panic_block);
 
 			/* initialize reboot notifier chain */
 			register_reboot_notifier(&pdc_chassis_reboot_block);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2f3fdad3559..e20c1fae342 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -579,7 +579,8 @@ void __init setup_arch(char **cmdline_p)
 	panic_timeout = 180;
 
 	if (ppc_md.panic)
-		notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
+		atomic_notifier_chain_register(&panic_notifier_list,
+				&ppc64_panic_block);
 
 	init_mm.start_code = PAGE_OFFSET;
 	init_mm.end_code = (unsigned long) _etext;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 98660aedeeb..9763faab673 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -74,19 +74,19 @@ EXPORT_SYMBOL(__debugger_dabr_match);
 EXPORT_SYMBOL(__debugger_fault_handler);
 #endif
 
-struct notifier_block *powerpc_die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-	int err = 0;
-	unsigned long flags;
+	return atomic_notifier_chain_register(&powerpc_die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
 
-	spin_lock_irqsave(&die_notifier_lock, flags);
-	err = notifier_chain_register(&powerpc_die_chain, nb);
-	spin_unlock_irqrestore(&die_notifier_lock, flags);
-	return err;
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
 }
+EXPORT_SYMBOL(unregister_die_notifier);
 
 /*
  * Trap & Exception support
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 86cfa6ecdcf..5ad90676567 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -94,16 +94,16 @@ static struct device_node *derive_parent(const char *path)
 	return parent;
 }
 
-static struct notifier_block *pSeries_reconfig_chain;
+static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
 
 int pSeries_reconfig_notifier_register(struct notifier_block *nb)
 {
-	return notifier_chain_register(&pSeries_reconfig_chain, nb);
+	return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb);
 }
 
 void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
 {
-	notifier_chain_unregister(&pSeries_reconfig_chain, nb);
+	blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
 }
 
 static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
@@ -131,7 +131,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
 		goto out_err;
 	}
 
-	err = notifier_call_chain(&pSeries_reconfig_chain,
+	err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
 				  PSERIES_RECONFIG_ADD, np);
 	if (err == NOTIFY_BAD) {
 		printk(KERN_ERR "Failed to add device node %s\n", path);
@@ -171,7 +171,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
 
 	remove_node_proc_entries(np);
 
-	notifier_call_chain(&pSeries_reconfig_chain,
+	blocking_notifier_call_chain(&pSeries_reconfig_chain,
 			    PSERIES_RECONFIG_REMOVE, np);
 	of_detach_node(np);
 
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index a0fc628ffb1..d95c05d9824 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -736,7 +736,7 @@ ibm_statusled_progress(char *s, unsigned short hex)
 		hex = 0xfff;
 		if (!notifier_installed) {
 			++notifier_installed;
-			notifier_chain_register(&panic_notifier_list,
+			atomic_notifier_chain_register(&panic_notifier_list,
 						&ibm_statusled_block);
 		}
 	}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 99182a415fe..4a0f5a1551e 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -76,17 +76,17 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 /*
  * Need to know about CPUs going idle?
  */
-static struct notifier_block *idle_chain;
+static ATOMIC_NOTIFIER_HEAD(idle_chain);
 
 int register_idle_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&idle_chain, nb);
+	return atomic_notifier_chain_register(&idle_chain, nb);
 }
 EXPORT_SYMBOL(register_idle_notifier);
 
 int unregister_idle_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&idle_chain, nb);
+	return atomic_notifier_chain_unregister(&idle_chain, nb);
 }
 EXPORT_SYMBOL(unregister_idle_notifier);
 
@@ -95,7 +95,7 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
 	/* disable monitor call class 0 */
 	__ctl_clear_bit(8, 15);
 
-	notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
+	atomic_notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
 			    (void *)(long) smp_processor_id());
 }
 
@@ -116,7 +116,8 @@ static void default_idle(void)
 		return;
 	}
 
-	rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
+	rc = atomic_notifier_call_chain(&idle_chain,
+			CPU_IDLE, (void *)(long) cpu);
 	if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
 		BUG();
 	if (rc != NOTIFY_OK) {
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index df612e4f75f..ff090bb9734 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -43,18 +43,19 @@
 #include <linux/kmod.h>
 #endif
 
-struct notifier_block *sparc64die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(sparc64die_chain);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-	int err = 0;
-	unsigned long flags;
-	spin_lock_irqsave(&die_notifier_lock, flags);
-	err = notifier_chain_register(&sparc64die_chain, nb);
-	spin_unlock_irqrestore(&die_notifier_lock, flags);
-	return err;
+	return atomic_notifier_chain_register(&sparc64die_chain, nb);
 }
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&sparc64die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
 
 /* When an irrecoverable trap occurs at tl > 0, the trap entry
  * code logs the trap state registers at every level in the trap
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 54388d10bcf..1488816588e 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -762,7 +762,8 @@ static struct notifier_block panic_exit_notifier = {
 
 static int add_notifier(void)
 {
-	notifier_chain_register(&panic_notifier_list, &panic_exit_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&panic_exit_notifier);
 	return(0);
 }
 
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index bb1c87211ac..7d51dd7201c 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -477,7 +477,8 @@ static struct notifier_block panic_exit_notifier = {
 
 void __init setup_arch(char **cmdline_p)
 {
-	notifier_chain_register(&panic_notifier_list, &panic_exit_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&panic_exit_notifier);
 	paging_init();
         strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
  	*cmdline_p = command_line;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 0370720515f..70dd8e5c688 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -66,24 +66,17 @@ EXPORT_SYMBOL(boot_option_idle_override);
 void (*pm_idle)(void);
 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 
-static struct notifier_block *idle_notifier;
-static DEFINE_SPINLOCK(idle_notifier_lock);
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
 
 void idle_notifier_register(struct notifier_block *n)
 {
-	unsigned long flags;
-	spin_lock_irqsave(&idle_notifier_lock, flags);
-	notifier_chain_register(&idle_notifier, n);
-	spin_unlock_irqrestore(&idle_notifier_lock, flags);
+	atomic_notifier_chain_register(&idle_notifier, n);
 }
 EXPORT_SYMBOL_GPL(idle_notifier_register);
 
 void idle_notifier_unregister(struct notifier_block *n)
 {
-	unsigned long flags;
-	spin_lock_irqsave(&idle_notifier_lock, flags);
-	notifier_chain_unregister(&idle_notifier, n);
-	spin_unlock_irqrestore(&idle_notifier_lock, flags);
+	atomic_notifier_chain_unregister(&idle_notifier, n);
 }
 EXPORT_SYMBOL(idle_notifier_unregister);
 
@@ -93,13 +86,13 @@ static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
 void enter_idle(void)
 {
 	__get_cpu_var(idle_state) = CPU_IDLE;
-	notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
 }
 
 static void __exit_idle(void)
 {
 	__get_cpu_var(idle_state) = CPU_NOT_IDLE;
-	notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
 }
 
 /* Called from interrupts to signify idle end */
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7b148309c52..edaa9fe654d 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -69,20 +69,20 @@ asmlinkage void alignment_check(void);
 asmlinkage void machine_check(void);
 asmlinkage void spurious_interrupt_bug(void);
 
-struct notifier_block *die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(die_chain);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-	int err = 0;
-	unsigned long flags;
-
 	vmalloc_sync_all();
-	spin_lock_irqsave(&die_notifier_lock, flags);
-	err = notifier_chain_register(&die_chain, nb);
-	spin_unlock_irqrestore(&die_notifier_lock, flags);
-	return err;
+	return atomic_notifier_chain_register(&die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&die_chain, nb);
 }
+EXPORT_SYMBOL(unregister_die_notifier);
 
 static inline void conditional_sti(struct pt_regs *regs)
 {
diff --git a/arch/xtensa/platform-iss/setup.c b/arch/xtensa/platform-iss/setup.c
index 2e6dcbf0cc0..23790a5610e 100644
--- a/arch/xtensa/platform-iss/setup.c
+++ b/arch/xtensa/platform-iss/setup.c
@@ -108,5 +108,5 @@ static struct notifier_block iss_panic_block = {
 
 void __init platform_setup(char **p_cmdline)
 {
-	notifier_chain_register(&panic_notifier_list, &iss_panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
 }
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 105a0d61eb1..dd547af4681 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -47,16 +47,16 @@ static struct kset_uevent_ops memory_uevent_ops = {
 	.uevent		= memory_uevent,
 };
 
-static struct notifier_block *memory_chain;
+static BLOCKING_NOTIFIER_HEAD(memory_chain);
 
 int register_memory_notifier(struct notifier_block *nb)
 {
-        return notifier_chain_register(&memory_chain, nb);
+        return blocking_notifier_chain_register(&memory_chain, nb);
 }
 
 void unregister_memory_notifier(struct notifier_block *nb)
 {
-        notifier_chain_unregister(&memory_chain, nb);
+        blocking_notifier_chain_unregister(&memory_chain, nb);
 }
 
 /*
@@ -140,7 +140,7 @@ static ssize_t show_mem_state(struct sys_device *dev, char *buf)
 
 static inline int memory_notify(unsigned long val, void *v)
 {
-	return notifier_call_chain(&memory_chain, val, v);
+	return blocking_notifier_call_chain(&memory_chain, val, v);
 }
 
 /*
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index b8fb87c6c29..40eb005b9d7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3744,7 +3744,7 @@ static int ipmi_init_msghandler(void)
 	ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
 	add_timer(&ipmi_timer);
 
-	notifier_chain_register(&panic_notifier_list, &panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
 	initialized = 1;
 
@@ -3764,7 +3764,7 @@ static __exit void cleanup_ipmi(void)
 	if (!initialized)
 		return;
 
-	notifier_chain_unregister(&panic_notifier_list, &panic_block);
+	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
 
 	/* This can't be called if any interfaces exist, so no worry about
 	   shutting down the interfaces. */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 12f858dc999..35fbd4d8ed4 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -237,10 +237,10 @@ struct smi_info
 
 static int try_smi_init(struct smi_info *smi);
 
-static struct notifier_block *xaction_notifier_list;
+static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block * nb)
 {
-	return notifier_chain_register(&xaction_notifier_list, nb);
+	return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 }
 
 static void si_restart_short_timer(struct smi_info *smi_info);
@@ -302,7 +302,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 		do_gettimeofday(&t);
 		printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
-		err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
+		err = atomic_notifier_call_chain(&xaction_notifier_list,
+				0, smi_info);
 		if (err & NOTIFY_STOP_MASK) {
 			rv = SI_SM_CALL_WITHOUT_DELAY;
 			goto out;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 616539310d9..7ece9f3c8f7 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1158,7 +1158,8 @@ static int __init ipmi_wdog_init(void)
 	}
 
 	register_reboot_notifier(&wdog_reboot_notifier);
-	notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&wdog_panic_notifier);
 
 	printk(KERN_INFO PFX "driver initialized\n");
 
@@ -1176,7 +1177,8 @@ static __exit void ipmi_unregister_watchdog(void)
 		release_nmi(&ipmi_nmi_handler);
 #endif
 
-	notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+			&wdog_panic_notifier);
 	unregister_reboot_notifier(&wdog_reboot_notifier);
 
 	if (! watchdog_user)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index aed80e6aec6..9b6ae7dc8b8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -52,9 +52,8 @@ static void handle_update(void *data);
  * changes to devices when the CPU clock speed changes.
  * The mutex locks both lists.
  */
-static struct notifier_block *cpufreq_policy_notifier_list;
-static struct notifier_block *cpufreq_transition_notifier_list;
-static DECLARE_RWSEM (cpufreq_notifier_rwsem);
+static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
+static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list);
 
 
 static LIST_HEAD(cpufreq_governor_list);
@@ -247,8 +246,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
 	dprintk("notification %u of frequency transition to %u kHz\n",
 		state, freqs->new);
 
-	down_read(&cpufreq_notifier_rwsem);
-
 	policy = cpufreq_cpu_data[freqs->cpu];
 	switch (state) {
 
@@ -266,20 +263,19 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
 				freqs->old = policy->cur;
 			}
 		}
-		notifier_call_chain(&cpufreq_transition_notifier_list,
-					CPUFREQ_PRECHANGE, freqs);
+		blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
+				CPUFREQ_PRECHANGE, freqs);
 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
 		break;
 
 	case CPUFREQ_POSTCHANGE:
 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
-		notifier_call_chain(&cpufreq_transition_notifier_list,
-					CPUFREQ_POSTCHANGE, freqs);
+		blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
+				CPUFREQ_POSTCHANGE, freqs);
 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
 			policy->cur = freqs->new;
 		break;
 	}
-	up_read(&cpufreq_notifier_rwsem);
 }
 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
 
@@ -1007,7 +1003,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
 		freqs.old = cpu_policy->cur;
 		freqs.new = cur_freq;
 
-		notifier_call_chain(&cpufreq_transition_notifier_list,
+		blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
 				    CPUFREQ_SUSPENDCHANGE, &freqs);
 		adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
 
@@ -1088,7 +1084,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
 			freqs.old = cpu_policy->cur;
 			freqs.new = cur_freq;
 
-			notifier_call_chain(&cpufreq_transition_notifier_list,
+			blocking_notifier_call_chain(
+					&cpufreq_transition_notifier_list,
 					CPUFREQ_RESUMECHANGE, &freqs);
 			adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
 
@@ -1125,24 +1122,24 @@ static struct sysdev_driver cpufreq_sysdev_driver = {
  *      changes in cpufreq policy.
  *
  *	This function may sleep, and has the same return conditions as
- *	notifier_chain_register.
+ *	blocking_notifier_chain_register.
  */
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
 {
 	int ret;
 
-	down_write(&cpufreq_notifier_rwsem);
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
-		ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb);
+		ret = blocking_notifier_chain_register(
+				&cpufreq_transition_notifier_list, nb);
 		break;
 	case CPUFREQ_POLICY_NOTIFIER:
-		ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb);
+		ret = blocking_notifier_chain_register(
+				&cpufreq_policy_notifier_list, nb);
 		break;
 	default:
 		ret = -EINVAL;
 	}
-	up_write(&cpufreq_notifier_rwsem);
 
 	return ret;
 }
@@ -1157,24 +1154,24 @@ EXPORT_SYMBOL(cpufreq_register_notifier);
  *	Remove a driver from the CPU frequency notifier list.
  *
  *	This function may sleep, and has the same return conditions as
- *	notifier_chain_unregister.
+ *	blocking_notifier_chain_unregister.
  */
 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
 {
 	int ret;
 
-	down_write(&cpufreq_notifier_rwsem);
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
-		ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb);
+		ret = blocking_notifier_chain_unregister(
+				&cpufreq_transition_notifier_list, nb);
 		break;
 	case CPUFREQ_POLICY_NOTIFIER:
-		ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb);
+		ret = blocking_notifier_chain_unregister(
+				&cpufreq_policy_notifier_list, nb);
 		break;
 	default:
 		ret = -EINVAL;
 	}
-	up_write(&cpufreq_notifier_rwsem);
 
 	return ret;
 }
@@ -1346,29 +1343,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
 	if (ret)
 		goto error_out;
 
-	down_read(&cpufreq_notifier_rwsem);
-
 	/* adjust if necessary - all reasons */
-	notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST,
-			    policy);
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_ADJUST, policy);
 
 	/* adjust if necessary - hardware incompatibility*/
-	notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE,
-			    policy);
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_INCOMPATIBLE, policy);
 
 	/* verify the cpu speed can be set within this limit,
 	   which might be different to the first one */
 	ret = cpufreq_driver->verify(policy);
-	if (ret) {
-		up_read(&cpufreq_notifier_rwsem);
+	if (ret)
 		goto error_out;
-	}
 
 	/* notification of the new policy */
-	notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY,
-			    policy);
-
-	up_read(&cpufreq_notifier_rwsem);
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_NOTIFY, policy);
 
 	data->min = policy->min;
 	data->max = policy->max;
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index d6543fc4a92..339f405ff70 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -484,26 +484,15 @@ static void dcdbas_host_control(void)
 static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
 				void *unused)
 {
-	static unsigned int notify_cnt = 0;
-
 	switch (code) {
 	case SYS_DOWN:
 	case SYS_HALT:
 	case SYS_POWER_OFF:
 		if (host_control_on_shutdown) {
 			/* firmware is going to perform host control action */
-			if (++notify_cnt == 2) {
-				printk(KERN_WARNING
-				       "Please wait for shutdown "
-				       "action to complete...\n");
-				dcdbas_host_control();
-			}
-			/*
-			 * register again and initiate the host control
-			 * action on the second notification to allow
-			 * everyone that registered to be notified
-			 */
-			register_reboot_notifier(nb);
+			printk(KERN_WARNING "Please wait for shutdown "
+			       "action to complete...\n");
+			dcdbas_host_control();
 		}
 		break;
 	}
@@ -514,7 +503,7 @@ static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
 static struct notifier_block dcdbas_reboot_nb = {
 	.notifier_call = dcdbas_reboot_notify,
 	.next = NULL,
-	.priority = 0
+	.priority = INT_MIN
 };
 
 static DCDBAS_BIN_ATTR_RW(smi_data);
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index d2ead1776c1..34fcabac5fd 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -80,7 +80,7 @@ static struct adb_driver *adb_driver_list[] = {
 static struct class *adb_dev_class;
 
 struct adb_driver *adb_controller;
-struct notifier_block *adb_client_list = NULL;
+BLOCKING_NOTIFIER_HEAD(adb_client_list);
 static int adb_got_sleep;
 static int adb_inited;
 static pid_t adb_probe_task_pid;
@@ -354,7 +354,8 @@ adb_notify_sleep(struct pmu_sleep_notifier *self, int when)
 		/* Stop autopoll */
 		if (adb_controller->autopoll)
 			adb_controller->autopoll(0);
-		ret = notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL);
+		ret = blocking_notifier_call_chain(&adb_client_list,
+				ADB_MSG_POWERDOWN, NULL);
 		if (ret & NOTIFY_STOP_MASK) {
 			up(&adb_probe_mutex);
 			return PBOOK_SLEEP_REFUSE;
@@ -391,7 +392,8 @@ do_adb_reset_bus(void)
 	if (adb_controller->autopoll)
 		adb_controller->autopoll(0);
 
-	nret = notifier_call_chain(&adb_client_list, ADB_MSG_PRE_RESET, NULL);
+	nret = blocking_notifier_call_chain(&adb_client_list,
+			ADB_MSG_PRE_RESET, NULL);
 	if (nret & NOTIFY_STOP_MASK) {
 		if (adb_controller->autopoll)
 			adb_controller->autopoll(autopoll_devs);
@@ -426,7 +428,8 @@ do_adb_reset_bus(void)
 	}
 	up(&adb_handler_sem);
 
-	nret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL);
+	nret = blocking_notifier_call_chain(&adb_client_list,
+			ADB_MSG_POST_RESET, NULL);
 	if (nret & NOTIFY_STOP_MASK)
 		return -EBUSY;
 	
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index c0b46bceb5d..f5779a73184 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -1214,7 +1214,8 @@ static int __init adbhid_init(void)
 
 	adbhid_probe();
 
-	notifier_chain_register(&adb_client_list, &adbhid_adb_notifier);
+	blocking_notifier_chain_register(&adb_client_list,
+			&adbhid_adb_notifier);
 
 	return 0;
 }
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 4f5f3abc9cb..0b5ff553e39 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -187,7 +187,7 @@ extern int disable_kernel_backlight;
 
 int __fake_sleep;
 int asleep;
-struct notifier_block *sleep_notifier_list;
+BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
 
 #ifdef CONFIG_ADB
 static int adb_dev_map = 0;
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index f08e52f2107..35b70323e7e 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -102,7 +102,7 @@ static int pmu_kind = PMU_UNKNOWN;
 static int pmu_fully_inited = 0;
 
 int asleep;
-struct notifier_block *sleep_notifier_list;
+BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
 
 static int pmu_probe(void);
 static int pmu_init(void);
@@ -913,7 +913,8 @@ int powerbook_sleep(void)
 	struct adb_request sleep_req;
 
 	/* Notify device drivers */
-	ret = notifier_call_chain(&sleep_notifier_list, PBOOK_SLEEP, NULL);
+	ret = blocking_notifier_call_chain(&sleep_notifier_list,
+			PBOOK_SLEEP, NULL);
 	if (ret & NOTIFY_STOP_MASK)
 		return -EBUSY;
 
@@ -984,7 +985,7 @@ int powerbook_sleep(void)
 			enable_irq(i);
 
 	/* Notify drivers */
-	notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL);
+	blocking_notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL);
 
 	/* reenable ADB autopoll */
 	pmu_adb_autopoll(adb_dev_map);
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 6c0ba04bc57..ab3faa702d5 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -52,7 +52,7 @@
 static LIST_HEAD(wf_controls);
 static LIST_HEAD(wf_sensors);
 static DEFINE_MUTEX(wf_lock);
-static struct notifier_block *wf_client_list;
+static BLOCKING_NOTIFIER_HEAD(wf_client_list);
 static int wf_client_count;
 static unsigned int wf_overtemp;
 static unsigned int wf_overtemp_counter;
@@ -68,7 +68,7 @@ static struct platform_device wf_platform_device = {
 
 static inline void wf_notify(int event, void *param)
 {
-	notifier_call_chain(&wf_client_list, event, param);
+	blocking_notifier_call_chain(&wf_client_list, event, param);
 }
 
 int wf_critical_overtemp(void)
@@ -398,7 +398,7 @@ int wf_register_client(struct notifier_block *nb)
 	struct wf_sensor *sr;
 
 	mutex_lock(&wf_lock);
-	rc = notifier_chain_register(&wf_client_list, nb);
+	rc = blocking_notifier_chain_register(&wf_client_list, nb);
 	if (rc != 0)
 		goto bail;
 	wf_client_count++;
@@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(wf_register_client);
 int wf_unregister_client(struct notifier_block *nb)
 {
 	mutex_lock(&wf_lock);
-	notifier_chain_unregister(&wf_client_list, nb);
+	blocking_notifier_chain_unregister(&wf_client_list, nb);
 	wf_client_count++;
 	if (wf_client_count == 0)
 		wf_stop_thread();
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index f295401fac2..7fd7a43e38d 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -52,12 +52,13 @@ static struct notifier_block panic_notifier = { panic_happened, NULL, 1 };
 
 void ibmasm_register_panic_notifier(void)
 {
-	notifier_chain_register(&panic_notifier_list, &panic_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
 }
 
 void ibmasm_unregister_panic_notifier(void)
 {
-	notifier_chain_unregister(&panic_notifier_list, &panic_notifier);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+			&panic_notifier);
 }
 
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2d0ac169a86..f13a539dc16 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3159,7 +3159,7 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
  * bond_netdev_event: handle netdev notifier chain events.
  *
  * This function receives events for the netdev chain.  The caller (an
- * ioctl handler calling notifier_call_chain) holds the necessary
+ * ioctl handler calling blocking_notifier_call_chain) holds the necessary
  * locks for us to safely manipulate the slave devices (RTNL lock,
  * dev_probe_lock).
  */
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 3627a2d7f79..298f2ddb2c1 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -499,11 +499,16 @@ static int led_halt(struct notifier_block *, unsigned long, void *);
 static struct notifier_block led_notifier = {
 	.notifier_call = led_halt,
 };
+static int notifier_disabled = 0;
 
 static int led_halt(struct notifier_block *nb, unsigned long event, void *buf) 
 {
 	char *txt;
-	
+
+	if (notifier_disabled)
+		return NOTIFY_OK;
+
+	notifier_disabled = 1;
 	switch (event) {
 	case SYS_RESTART:	txt = "SYSTEM RESTART";
 				break;
@@ -527,7 +532,6 @@ static int led_halt(struct notifier_block *nb, unsigned long event, void *buf)
 		if (led_func_ptr)
 			led_func_ptr(0xff); /* turn all LEDs ON */
 	
-	unregister_reboot_notifier(&led_notifier);
 	return NOTIFY_OK;
 }
 
@@ -758,6 +762,12 @@ not_found:
 	return 1;
 }
 
+static void __exit led_exit(void)
+{
+	unregister_reboot_notifier(&led_notifier);
+	return;
+}
+
 #ifdef CONFIG_PROC_FS
 module_init(led_create_procfs)
 #endif
diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
index 54b2b7f20b9..0bcab83b408 100644
--- a/drivers/parisc/power.c
+++ b/drivers/parisc/power.c
@@ -251,7 +251,8 @@ static int __init power_init(void)
 	}
 
 	/* Register a call for panic conditions. */
-	notifier_chain_register(&panic_notifier_list, &parisc_panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&parisc_panic_block);
 
 	tasklet_enable(&power_tasklet);
 
@@ -264,7 +265,8 @@ static void __exit power_exit(void)
 		return;
 
 	tasklet_disable(&power_tasklet);
-	notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+			&parisc_panic_block);
 	power_tasklet.func = NULL;
 	pdc_soft_power_button(0);
 }
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 62e3cda859a..7f7013e80a8 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -671,7 +671,7 @@ static struct file_operations gdth_fops = {
 static struct notifier_block gdth_notifier = {
     gdth_halt, NULL, 0
 };
-
+static int notifier_disabled = 0;
 
 static void gdth_delay(int milliseconds)
 {
@@ -4595,13 +4595,13 @@ static int __init gdth_detect(struct scsi_host_template *shtp)
         add_timer(&gdth_timer);
 #endif
         major = register_chrdev(0,"gdth",&gdth_fops);
+        notifier_disabled = 0;
         register_reboot_notifier(&gdth_notifier);
     }
     gdth_polling = FALSE;
     return gdth_ctr_vcount;
 }
 
-
 static int gdth_release(struct Scsi_Host *shp)
 {
     int hanum;
@@ -5632,10 +5632,14 @@ static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
     char            cmnd[MAX_COMMAND_SIZE];   
 #endif
 
+    if (notifier_disabled)
+    	return NOTIFY_OK;
+
     TRACE2(("gdth_halt() event %d\n",(int)event));
     if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
         return NOTIFY_DONE;
 
+    notifier_disabled = 1;
     printk("GDT-HA: Flushing all host drives .. ");
     for (hanum = 0; hanum < gdth_ctr_count; ++hanum) {
         gdth_flush(hanum);
@@ -5679,7 +5683,6 @@ static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
 #ifdef GDTH_STATISTICS
     del_timer(&gdth_timer);
 #endif
-    unregister_reboot_notifier(&gdth_notifier);
     return NOTIFY_OK;
 }
 
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 4b55285de9a..fe0ed54fa0a 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -16,57 +16,7 @@
 #include <linux/mutex.h>
 #include "usb.h"
 
-
-static struct notifier_block *usb_notifier_list;
-static DEFINE_MUTEX(usb_notifier_lock);
-
-static void usb_notifier_chain_register(struct notifier_block **list,
-					struct notifier_block *n)
-{
-	mutex_lock(&usb_notifier_lock);
-	while (*list) {
-		if (n->priority > (*list)->priority)
-			break;
-		list = &((*list)->next);
-	}
-	n->next = *list;
-	*list = n;
-	mutex_unlock(&usb_notifier_lock);
-}
-
-static void usb_notifier_chain_unregister(struct notifier_block **nl,
-				   struct notifier_block *n)
-{
-	mutex_lock(&usb_notifier_lock);
-	while ((*nl)!=NULL) {
-		if ((*nl)==n) {
-			*nl = n->next;
-			goto exit;
-		}
-		nl=&((*nl)->next);
-	}
-exit:
-	mutex_unlock(&usb_notifier_lock);
-}
-
-static int usb_notifier_call_chain(struct notifier_block **n,
-				   unsigned long val, void *v)
-{
-	int ret=NOTIFY_DONE;
-	struct notifier_block *nb = *n;
-
-	mutex_lock(&usb_notifier_lock);
-	while (nb) {
-		ret = nb->notifier_call(nb,val,v);
-		if (ret&NOTIFY_STOP_MASK) {
-			goto exit;
-		}
-		nb = nb->next;
-	}
-exit:
-	mutex_unlock(&usb_notifier_lock);
-	return ret;
-}
+static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
 
 /**
  * usb_register_notify - register a notifier callback whenever a usb change happens
@@ -76,7 +26,7 @@ exit:
  */
 void usb_register_notify(struct notifier_block *nb)
 {
-	usb_notifier_chain_register(&usb_notifier_list, nb);
+	blocking_notifier_chain_register(&usb_notifier_list, nb);
 }
 EXPORT_SYMBOL_GPL(usb_register_notify);
 
@@ -89,27 +39,28 @@ EXPORT_SYMBOL_GPL(usb_register_notify);
  */
 void usb_unregister_notify(struct notifier_block *nb)
 {
-	usb_notifier_chain_unregister(&usb_notifier_list, nb);
+	blocking_notifier_chain_unregister(&usb_notifier_list, nb);
 }
 EXPORT_SYMBOL_GPL(usb_unregister_notify);
 
 
 void usb_notify_add_device(struct usb_device *udev)
 {
-	usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
+	blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
 }
 
 void usb_notify_remove_device(struct usb_device *udev)
 {
-	usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_REMOVE, udev);
+	blocking_notifier_call_chain(&usb_notifier_list,
+			USB_DEVICE_REMOVE, udev);
 }
 
 void usb_notify_add_bus(struct usb_bus *ubus)
 {
-	usb_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
+	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
 }
 
 void usb_notify_remove_bus(struct usb_bus *ubus)
 {
-	usb_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
+	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
 }
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 07d882b1439..b1a8dca7643 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -55,7 +55,7 @@
 
 #define FBPIXMAPSIZE	(1024 * 8)
 
-static struct notifier_block *fb_notifier_list;
+static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
 struct fb_info *registered_fb[FB_MAX];
 int num_registered_fb;
 
@@ -784,7 +784,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
 
 		    event.info = info;
 		    event.data = &mode1;
-		    ret = notifier_call_chain(&fb_notifier_list,
+		    ret = blocking_notifier_call_chain(&fb_notifier_list,
 					      FB_EVENT_MODE_DELETE, &event);
 		}
 
@@ -830,8 +830,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
 
 				info->flags &= ~FBINFO_MISC_USEREVENT;
 				event.info = info;
-				notifier_call_chain(&fb_notifier_list, evnt,
-						    &event);
+				blocking_notifier_call_chain(&fb_notifier_list,
+						evnt, &event);
 			}
 		}
 	}
@@ -854,7 +854,8 @@ fb_blank(struct fb_info *info, int blank)
 
 		event.info = info;
 		event.data = &blank;
-		notifier_call_chain(&fb_notifier_list, FB_EVENT_BLANK, &event);
+		blocking_notifier_call_chain(&fb_notifier_list,
+				FB_EVENT_BLANK, &event);
 	}
 
  	return ret;
@@ -925,7 +926,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
 		con2fb.framebuffer = -1;
 		event.info = info;
 		event.data = &con2fb;
-		notifier_call_chain(&fb_notifier_list,
+		blocking_notifier_call_chain(&fb_notifier_list,
 				    FB_EVENT_GET_CONSOLE_MAP, &event);
 		return copy_to_user(argp, &con2fb,
 				    sizeof(con2fb)) ? -EFAULT : 0;
@@ -944,7 +945,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
 		    return -EINVAL;
 		event.info = info;
 		event.data = &con2fb;
-		return notifier_call_chain(&fb_notifier_list,
+		return blocking_notifier_call_chain(&fb_notifier_list,
 					   FB_EVENT_SET_CONSOLE_MAP,
 					   &event);
 	case FBIOBLANK:
@@ -1324,7 +1325,7 @@ register_framebuffer(struct fb_info *fb_info)
 	devfs_mk_cdev(MKDEV(FB_MAJOR, i),
 			S_IFCHR | S_IRUGO | S_IWUGO, "fb/%d", i);
 	event.info = fb_info;
-	notifier_call_chain(&fb_notifier_list,
+	blocking_notifier_call_chain(&fb_notifier_list,
 			    FB_EVENT_FB_REGISTERED, &event);
 	return 0;
 }
@@ -1366,7 +1367,7 @@ unregister_framebuffer(struct fb_info *fb_info)
  */
 int fb_register_client(struct notifier_block *nb)
 {
-	return notifier_chain_register(&fb_notifier_list, nb);
+	return blocking_notifier_chain_register(&fb_notifier_list, nb);
 }
 
 /**
@@ -1375,7 +1376,7 @@ int fb_register_client(struct notifier_block *nb)
  */
 int fb_unregister_client(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&fb_notifier_list, nb);
+	return blocking_notifier_chain_unregister(&fb_notifier_list, nb);
 }
 
 /**
@@ -1393,11 +1394,13 @@ void fb_set_suspend(struct fb_info *info, int state)
 
 	event.info = info;
 	if (state) {
-		notifier_call_chain(&fb_notifier_list, FB_EVENT_SUSPEND, &event);
+		blocking_notifier_call_chain(&fb_notifier_list,
+				FB_EVENT_SUSPEND, &event);
 		info->state = FBINFO_STATE_SUSPENDED;
 	} else {
 		info->state = FBINFO_STATE_RUNNING;
-		notifier_call_chain(&fb_notifier_list, FB_EVENT_RESUME, &event);
+		blocking_notifier_call_chain(&fb_notifier_list,
+				FB_EVENT_RESUME, &event);
 	}
 }
 
@@ -1469,7 +1472,7 @@ int fb_new_modelist(struct fb_info *info)
 
 	if (!list_empty(&info->modelist)) {
 		event.info = info;
-		err = notifier_call_chain(&fb_notifier_list,
+		err = blocking_notifier_call_chain(&fb_notifier_list,
 					   FB_EVENT_NEW_MODELIST,
 					   &event);
 	}
@@ -1495,7 +1498,7 @@ int fb_con_duit(struct fb_info *info, int event, void *data)
 	evnt.info = info;
 	evnt.data = data;
 
-	return notifier_call_chain(&fb_notifier_list, event, &evnt);
+	return blocking_notifier_call_chain(&fb_notifier_list, event, &evnt);
 }
 EXPORT_SYMBOL(fb_con_duit);
 
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index 316138e8991..96d0828ce09 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -17,11 +17,9 @@ struct die_args {
 	int signr;
 };
 
-/* Note - you should never unregister because that can race with NMIs.
-   If you really want to do it first unregister - then synchronize_sched - then free.
-  */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *i386die_chain;
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head i386die_chain;
 
 
 /* Grossly misnamed. */
@@ -51,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str,
 		.trapnr = trap,
 		.signr = sig
 	};
-	return notifier_call_chain(&i386die_chain, val, &args);
+	return atomic_notifier_call_chain(&i386die_chain, val, &args);
 }
 
 #endif
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h
index 8b01a083dde..218c458ab60 100644
--- a/include/asm-ia64/kdebug.h
+++ b/include/asm-ia64/kdebug.h
@@ -40,7 +40,7 @@ struct die_args {
 
 extern int register_die_notifier(struct notifier_block *);
 extern int unregister_die_notifier(struct notifier_block *);
-extern struct notifier_block *ia64die_chain;
+extern struct atomic_notifier_head ia64die_chain;
 
 enum die_val {
 	DIE_BREAK = 1,
@@ -81,7 +81,7 @@ static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs,
 		.signr  = sig
 	};
 
-	return notifier_call_chain(&ia64die_chain, val, &args);
+	return atomic_notifier_call_chain(&ia64die_chain, val, &args);
 }
 
 #endif
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
index 7c16265568e..c01786ab5fa 100644
--- a/include/asm-powerpc/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -16,13 +16,9 @@ struct die_args {
 	int signr;
 };
 
-/*
-   Note - you should never unregister because that can race with NMIs.
-   If you really want to do it first unregister - then synchronize_sched -
-   then free.
- */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *powerpc_die_chain;
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head powerpc_die_chain;
 
 /* Grossly misnamed. */
 enum die_val {
@@ -37,7 +33,7 @@ enum die_val {
 static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
 {
 	struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
-	return notifier_call_chain(&powerpc_die_chain, val, &args);
+	return atomic_notifier_call_chain(&powerpc_die_chain, val, &args);
 }
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 6321f5a0198..4040d127ac3 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -15,12 +15,9 @@ struct die_args {
 	int signr;
 };
 
-/* Note - you should never unregister because that can race with NMIs.
- * If you really want to do it first unregister - then synchronize_sched
- * - then free.
- */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *sparc64die_chain;
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head sparc64die_chain;
 
 extern void bad_trap(struct pt_regs *, long);
 
@@ -46,7 +43,7 @@ static inline int notify_die(enum die_val val,char *str, struct pt_regs *regs,
 				 .trapnr	= trap,
 				 .signr		= sig };
 
-	return notifier_call_chain(&sparc64die_chain, val, &args);
+	return atomic_notifier_call_chain(&sparc64die_chain, val, &args);
 }
 
 #endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index b9ed4c0c878..cf795631d9b 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -5,21 +5,20 @@
 
 struct pt_regs;
 
-struct die_args { 
+struct die_args {
 	struct pt_regs *regs;
 	const char *str;
-	long err; 
+	long err;
 	int trapnr;
 	int signr;
-}; 
+};
+
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head die_chain;
 
-/* Note - you should never unregister because that can race with NMIs.
-   If you really want to do it first unregister - then synchronize_sched - then free.
-  */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *die_chain;
 /* Grossly misnamed. */
-enum die_val { 
+enum die_val {
 	DIE_OOPS = 1,
 	DIE_INT3,
 	DIE_DEBUG,
@@ -33,8 +32,8 @@ enum die_val {
 	DIE_CALL,
 	DIE_NMI_IPI,
 	DIE_PAGE_FAULT,
-}; 
-	
+};
+
 static inline int notify_die(enum die_val val, const char *str,
 			struct pt_regs *regs, long err, int trap, int sig)
 {
@@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str,
 		.trapnr = trap,
 		.signr = sig
 	};
-	return notifier_call_chain(&die_chain, val, &args); 
+	return atomic_notifier_call_chain(&die_chain, val, &args);
 } 
 
 extern int printk_address(unsigned long address);
diff --git a/include/linux/adb.h b/include/linux/adb.h
index e9fdc63483c..b7305b17827 100644
--- a/include/linux/adb.h
+++ b/include/linux/adb.h
@@ -85,7 +85,7 @@ enum adb_message {
     ADB_MSG_POST_RESET	/* Called after resetting the bus (re-do init & register) */
 };
 extern struct adb_driver *adb_controller;
-extern struct notifier_block *adb_client_list;
+extern struct blocking_notifier_head adb_client_list;
 
 int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
 		int flags, int nbytes, ...);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 03d6cfaa5b8..a3720f973ea 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -87,7 +87,7 @@ extern int cond_resched(void);
 		(__x < 0) ? -__x : __x;		\
 	})
 
-extern struct notifier_block *panic_notifier_list;
+extern struct atomic_notifier_head panic_notifier_list;
 extern long (*panic_blink)(long time);
 NORET_TYPE void panic(const char * fmt, ...)
 	__attribute__ ((NORET_AND format (printf, 1, 2)));
diff --git a/include/linux/memory.h b/include/linux/memory.h
index e251dc43d0f..8f04143ca36 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -77,7 +77,6 @@ extern int remove_memory_block(unsigned long, struct mem_section *, int);
 
 #define CONFIG_MEM_BLOCK_SIZE	(PAGES_PER_SECTION<<PAGE_SHIFT)
 
-struct notifier_block;
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index f32d75c4f4c..d54d7b278e9 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -308,29 +308,30 @@ DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
 
 #define CONNTRACK_ECACHE(x)	(__get_cpu_var(ip_conntrack_ecache).x)
  
-extern struct notifier_block *ip_conntrack_chain;
-extern struct notifier_block *ip_conntrack_expect_chain;
+extern struct atomic_notifier_head ip_conntrack_chain;
+extern struct atomic_notifier_head ip_conntrack_expect_chain;
 
 static inline int ip_conntrack_register_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&ip_conntrack_chain, nb);
+	return atomic_notifier_chain_register(&ip_conntrack_chain, nb);
 }
 
 static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&ip_conntrack_chain, nb);
+	return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb);
 }
 
 static inline int 
 ip_conntrack_expect_register_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&ip_conntrack_expect_chain, nb);
+	return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb);
 }
 
 static inline int
 ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&ip_conntrack_expect_chain, nb);
+	return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain,
+			nb);
 }
 
 extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
@@ -355,14 +356,14 @@ static inline void ip_conntrack_event(enum ip_conntrack_events event,
 				      struct ip_conntrack *ct)
 {
 	if (is_confirmed(ct) && !is_dying(ct))
-		notifier_call_chain(&ip_conntrack_chain, event, ct);
+		atomic_notifier_call_chain(&ip_conntrack_chain, event, ct);
 }
 
 static inline void 
 ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
 			  struct ip_conntrack_expect *exp)
 {
-	notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
+	atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
 }
 #else /* CONFIG_IP_NF_CONNTRACK_EVENTS */
 static inline void ip_conntrack_event_cache(enum ip_conntrack_events event, 
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 5937dd6053c..51dbab9710c 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -10,25 +10,107 @@
 #ifndef _LINUX_NOTIFIER_H
 #define _LINUX_NOTIFIER_H
 #include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
 
-struct notifier_block
-{
-	int (*notifier_call)(struct notifier_block *self, unsigned long, void *);
+/*
+ * Notifier chains are of three types:
+ *
+ *	Atomic notifier chains: Chain callbacks run in interrupt/atomic
+ *		context. Callouts are not allowed to block.
+ *	Blocking notifier chains: Chain callbacks run in process context.
+ *		Callouts are allowed to block.
+ *	Raw notifier chains: There are no restrictions on callbacks,
+ *		registration, or unregistration.  All locking and protection
+ *		must be provided by the caller.
+ *
+ * atomic_notifier_chain_register() may be called from an atomic context,
+ * but blocking_notifier_chain_register() must be called from a process
+ * context.  Ditto for the corresponding _unregister() routines.
+ *
+ * atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister()
+ * _must not_ be called from within the call chain.
+ */
+
+struct notifier_block {
+	int (*notifier_call)(struct notifier_block *, unsigned long, void *);
 	struct notifier_block *next;
 	int priority;
 };
 
+struct atomic_notifier_head {
+	spinlock_t lock;
+	struct notifier_block *head;
+};
+
+struct blocking_notifier_head {
+	struct rw_semaphore rwsem;
+	struct notifier_block *head;
+};
+
+struct raw_notifier_head {
+	struct notifier_block *head;
+};
+
+#define ATOMIC_INIT_NOTIFIER_HEAD(name) do {	\
+		spin_lock_init(&(name)->lock);	\
+		(name)->head = NULL;		\
+	} while (0)
+#define BLOCKING_INIT_NOTIFIER_HEAD(name) do {	\
+		init_rwsem(&(name)->rwsem);	\
+		(name)->head = NULL;		\
+	} while (0)
+#define RAW_INIT_NOTIFIER_HEAD(name) do {	\
+		(name)->head = NULL;		\
+	} while (0)
+
+#define ATOMIC_NOTIFIER_INIT(name) {				\
+		.lock = SPIN_LOCK_UNLOCKED,			\
+		.head = NULL }
+#define BLOCKING_NOTIFIER_INIT(name) {				\
+		.rwsem = __RWSEM_INITIALIZER((name).rwsem),	\
+		.head = NULL }
+#define RAW_NOTIFIER_INIT(name)	{				\
+		.head = NULL }
+
+#define ATOMIC_NOTIFIER_HEAD(name)				\
+	struct atomic_notifier_head name =			\
+		ATOMIC_NOTIFIER_INIT(name)
+#define BLOCKING_NOTIFIER_HEAD(name)				\
+	struct blocking_notifier_head name =			\
+		BLOCKING_NOTIFIER_INIT(name)
+#define RAW_NOTIFIER_HEAD(name)					\
+	struct raw_notifier_head name =				\
+		RAW_NOTIFIER_INIT(name)
 
 #ifdef __KERNEL__
 
-extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n);
-extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n);
-extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v);
+extern int atomic_notifier_chain_register(struct atomic_notifier_head *,
+		struct notifier_block *);
+extern int blocking_notifier_chain_register(struct blocking_notifier_head *,
+		struct notifier_block *);
+extern int raw_notifier_chain_register(struct raw_notifier_head *,
+		struct notifier_block *);
+
+extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *,
+		struct notifier_block *);
+extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *,
+		struct notifier_block *);
+extern int raw_notifier_chain_unregister(struct raw_notifier_head *,
+		struct notifier_block *);
+
+extern int atomic_notifier_call_chain(struct atomic_notifier_head *,
+		unsigned long val, void *v);
+extern int blocking_notifier_call_chain(struct blocking_notifier_head *,
+		unsigned long val, void *v);
+extern int raw_notifier_call_chain(struct raw_notifier_head *,
+		unsigned long val, void *v);
 
 #define NOTIFY_DONE		0x0000		/* Don't care */
 #define NOTIFY_OK		0x0001		/* Suits me */
 #define NOTIFY_STOP_MASK	0x8000		/* Don't call further */
-#define NOTIFY_BAD		(NOTIFY_STOP_MASK|0x0002)	/* Bad/Veto action	*/
+#define NOTIFY_BAD		(NOTIFY_STOP_MASK|0x0002)
+						/* Bad/Veto action */
 /*
  * Clean way to return from the notifier and stop further calls.
  */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index b6f0905a4ee..916013ca4a5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -300,29 +300,30 @@ DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
 
 #define CONNTRACK_ECACHE(x)	(__get_cpu_var(nf_conntrack_ecache).x)
 
-extern struct notifier_block *nf_conntrack_chain;
-extern struct notifier_block *nf_conntrack_expect_chain;
+extern struct atomic_notifier_head nf_conntrack_chain;
+extern struct atomic_notifier_head nf_conntrack_expect_chain;
 
 static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&nf_conntrack_chain, nb);
+	return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
 }
 
 static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&nf_conntrack_chain, nb);
+	return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
 }
 
 static inline int
 nf_conntrack_expect_register_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&nf_conntrack_expect_chain, nb);
+	return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
 }
 
 static inline int
 nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&nf_conntrack_expect_chain, nb);
+	return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain,
+			nb);
 }
 
 extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
@@ -347,14 +348,14 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event,
 				      struct nf_conn *ct)
 {
 	if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
-		notifier_call_chain(&nf_conntrack_chain, event, ct);
+		atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
 }
 
 static inline void
 nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
 			  struct nf_conntrack_expect *exp)
 {
-	notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
+	atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
 }
 #else /* CONFIG_NF_CONNTRACK_EVENTS */
 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8be22bd8093..fe2b8d0bfe4 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -18,7 +18,7 @@
 /* This protects CPUs going up and down... */
 static DECLARE_MUTEX(cpucontrol);
 
-static struct notifier_block *cpu_chain;
+static BLOCKING_NOTIFIER_HEAD(cpu_chain);
 
 #ifdef CONFIG_HOTPLUG_CPU
 static struct task_struct *lock_cpu_hotplug_owner;
@@ -71,21 +71,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
 /* Need to know about CPUs going up/down? */
 int register_cpu_notifier(struct notifier_block *nb)
 {
-	int ret;
-
-	if ((ret = lock_cpu_hotplug_interruptible()) != 0)
-		return ret;
-	ret = notifier_chain_register(&cpu_chain, nb);
-	unlock_cpu_hotplug();
-	return ret;
+	return blocking_notifier_chain_register(&cpu_chain, nb);
 }
 EXPORT_SYMBOL(register_cpu_notifier);
 
 void unregister_cpu_notifier(struct notifier_block *nb)
 {
-	lock_cpu_hotplug();
-	notifier_chain_unregister(&cpu_chain, nb);
-	unlock_cpu_hotplug();
+	blocking_notifier_chain_unregister(&cpu_chain, nb);
 }
 EXPORT_SYMBOL(unregister_cpu_notifier);
 
@@ -141,7 +133,7 @@ int cpu_down(unsigned int cpu)
 		goto out;
 	}
 
-	err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
+	err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
 						(void *)(long)cpu);
 	if (err == NOTIFY_BAD) {
 		printk("%s: attempt to take down CPU %u failed\n",
@@ -159,7 +151,7 @@ int cpu_down(unsigned int cpu)
 	p = __stop_machine_run(take_cpu_down, NULL, cpu);
 	if (IS_ERR(p)) {
 		/* CPU didn't die: tell everyone.  Can't complain. */
-		if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
+		if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
 				(void *)(long)cpu) == NOTIFY_BAD)
 			BUG();
 
@@ -182,8 +174,8 @@ int cpu_down(unsigned int cpu)
 	put_cpu();
 
 	/* CPU is completely dead: tell everyone.  Too late to complain. */
-	if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
-	    == NOTIFY_BAD)
+	if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD,
+			(void *)(long)cpu) == NOTIFY_BAD)
 		BUG();
 
 	check_for_tasks(cpu);
@@ -211,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
 		goto out;
 	}
 
-	ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
+	ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
 	if (ret == NOTIFY_BAD) {
 		printk("%s: attempt to bring up CPU %u failed\n",
 				__FUNCTION__, cpu);
@@ -226,11 +218,12 @@ int __devinit cpu_up(unsigned int cpu)
 	BUG_ON(!cpu_online(cpu));
 
 	/* Now call notifier in preparation. */
-	notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+	blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
 
 out_notify:
 	if (ret != 0)
-		notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
+		blocking_notifier_call_chain(&cpu_chain,
+				CPU_UP_CANCELED, hcpu);
 out:
 	unlock_cpu_hotplug();
 	return ret;
diff --git a/kernel/module.c b/kernel/module.c
index ddfe45ac2fd..4fafd58038a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -64,26 +64,17 @@ static DEFINE_SPINLOCK(modlist_lock);
 static DEFINE_MUTEX(module_mutex);
 static LIST_HEAD(modules);
 
-static DEFINE_MUTEX(notify_mutex);
-static struct notifier_block * module_notify_list;
+static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 
 int register_module_notifier(struct notifier_block * nb)
 {
-	int err;
-	mutex_lock(&notify_mutex);
-	err = notifier_chain_register(&module_notify_list, nb);
-	mutex_unlock(&notify_mutex);
-	return err;
+	return blocking_notifier_chain_register(&module_notify_list, nb);
 }
 EXPORT_SYMBOL(register_module_notifier);
 
 int unregister_module_notifier(struct notifier_block * nb)
 {
-	int err;
-	mutex_lock(&notify_mutex);
-	err = notifier_chain_unregister(&module_notify_list, nb);
-	mutex_unlock(&notify_mutex);
-	return err;
+	return blocking_notifier_chain_unregister(&module_notify_list, nb);
 }
 EXPORT_SYMBOL(unregister_module_notifier);
 
@@ -1816,9 +1807,8 @@ sys_init_module(void __user *umod,
 	/* Drop lock so they can recurse */
 	mutex_unlock(&module_mutex);
 
-	mutex_lock(&notify_mutex);
-	notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod);
-	mutex_unlock(&notify_mutex);
+	blocking_notifier_call_chain(&module_notify_list,
+			MODULE_STATE_COMING, mod);
 
 	/* Start the module */
 	if (mod->init != NULL)
diff --git a/kernel/panic.c b/kernel/panic.c
index acd95adddb9..f895c7c01d5 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(pause_on_oops_lock);
 int panic_timeout;
 EXPORT_SYMBOL(panic_timeout);
 
-struct notifier_block *panic_notifier_list;
+ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
 
 EXPORT_SYMBOL(panic_notifier_list);
 
@@ -97,7 +97,7 @@ NORET_TYPE void panic(const char * fmt, ...)
 	smp_send_stop();
 #endif
 
-	notifier_call_chain(&panic_notifier_list, 0, buf);
+	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 
 	if (!panic_blink)
 		panic_blink = no_blink;
diff --git a/kernel/profile.c b/kernel/profile.c
index ad81f799a9b..5a730fdb1a2 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -87,72 +87,52 @@ void __init profile_init(void)
  
 #ifdef CONFIG_PROFILING
  
-static DECLARE_RWSEM(profile_rwsem);
-static DEFINE_RWLOCK(handoff_lock);
-static struct notifier_block * task_exit_notifier;
-static struct notifier_block * task_free_notifier;
-static struct notifier_block * munmap_notifier;
+static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
  
 void profile_task_exit(struct task_struct * task)
 {
-	down_read(&profile_rwsem);
-	notifier_call_chain(&task_exit_notifier, 0, task);
-	up_read(&profile_rwsem);
+	blocking_notifier_call_chain(&task_exit_notifier, 0, task);
 }
  
 int profile_handoff_task(struct task_struct * task)
 {
 	int ret;
-	read_lock(&handoff_lock);
-	ret = notifier_call_chain(&task_free_notifier, 0, task);
-	read_unlock(&handoff_lock);
+	ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
 	return (ret == NOTIFY_OK) ? 1 : 0;
 }
 
 void profile_munmap(unsigned long addr)
 {
-	down_read(&profile_rwsem);
-	notifier_call_chain(&munmap_notifier, 0, (void *)addr);
-	up_read(&profile_rwsem);
+	blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
 }
 
 int task_handoff_register(struct notifier_block * n)
 {
-	int err = -EINVAL;
-
-	write_lock(&handoff_lock);
-	err = notifier_chain_register(&task_free_notifier, n);
-	write_unlock(&handoff_lock);
-	return err;
+	return atomic_notifier_chain_register(&task_free_notifier, n);
 }
 
 int task_handoff_unregister(struct notifier_block * n)
 {
-	int err = -EINVAL;
-
-	write_lock(&handoff_lock);
-	err = notifier_chain_unregister(&task_free_notifier, n);
-	write_unlock(&handoff_lock);
-	return err;
+	return atomic_notifier_chain_unregister(&task_free_notifier, n);
 }
 
 int profile_event_register(enum profile_type type, struct notifier_block * n)
 {
 	int err = -EINVAL;
  
-	down_write(&profile_rwsem);
- 
 	switch (type) {
 		case PROFILE_TASK_EXIT:
-			err = notifier_chain_register(&task_exit_notifier, n);
+			err = blocking_notifier_chain_register(
+					&task_exit_notifier, n);
 			break;
 		case PROFILE_MUNMAP:
-			err = notifier_chain_register(&munmap_notifier, n);
+			err = blocking_notifier_chain_register(
+					&munmap_notifier, n);
 			break;
 	}
  
-	up_write(&profile_rwsem);
- 
 	return err;
 }
 
@@ -161,18 +141,17 @@ int profile_event_unregister(enum profile_type type, struct notifier_block * n)
 {
 	int err = -EINVAL;
  
-	down_write(&profile_rwsem);
- 
 	switch (type) {
 		case PROFILE_TASK_EXIT:
-			err = notifier_chain_unregister(&task_exit_notifier, n);
+			err = blocking_notifier_chain_unregister(
+					&task_exit_notifier, n);
 			break;
 		case PROFILE_MUNMAP:
-			err = notifier_chain_unregister(&munmap_notifier, n);
+			err = blocking_notifier_chain_unregister(
+					&munmap_notifier, n);
 			break;
 	}
 
-	up_write(&profile_rwsem);
 	return err;
 }
 
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d9b3d5847ed..ced91e1ff56 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -152,5 +152,5 @@ __init void spawn_softlockup_task(void)
 	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
 	register_cpu_notifier(&cpu_nfb);
 
-	notifier_chain_register(&panic_notifier_list, &panic_block);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 38bc73ede2b..c93d37f71ae 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -95,99 +95,304 @@ int cad_pid = 1;
  *	and the like. 
  */
 
-static struct notifier_block *reboot_notifier_list;
-static DEFINE_RWLOCK(notifier_lock);
+static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
+
+/*
+ *	Notifier chain core routines.  The exported routines below
+ *	are layered on top of these, with appropriate locking added.
+ */
+
+static int notifier_chain_register(struct notifier_block **nl,
+		struct notifier_block *n)
+{
+	while ((*nl) != NULL) {
+		if (n->priority > (*nl)->priority)
+			break;
+		nl = &((*nl)->next);
+	}
+	n->next = *nl;
+	rcu_assign_pointer(*nl, n);
+	return 0;
+}
+
+static int notifier_chain_unregister(struct notifier_block **nl,
+		struct notifier_block *n)
+{
+	while ((*nl) != NULL) {
+		if ((*nl) == n) {
+			rcu_assign_pointer(*nl, n->next);
+			return 0;
+		}
+		nl = &((*nl)->next);
+	}
+	return -ENOENT;
+}
+
+static int __kprobes notifier_call_chain(struct notifier_block **nl,
+		unsigned long val, void *v)
+{
+	int ret = NOTIFY_DONE;
+	struct notifier_block *nb;
+
+	nb = rcu_dereference(*nl);
+	while (nb) {
+		ret = nb->notifier_call(nb, val, v);
+		if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
+			break;
+		nb = rcu_dereference(nb->next);
+	}
+	return ret;
+}
+
+/*
+ *	Atomic notifier chain routines.  Registration and unregistration
+ *	use a mutex, and call_chain is synchronized by RCU (no locks).
+ */
 
 /**
- *	notifier_chain_register	- Add notifier to a notifier chain
- *	@list: Pointer to root list pointer
+ *	atomic_notifier_chain_register - Add notifier to an atomic notifier chain
+ *	@nh: Pointer to head of the atomic notifier chain
  *	@n: New entry in notifier chain
  *
- *	Adds a notifier to a notifier chain.
+ *	Adds a notifier to an atomic notifier chain.
  *
  *	Currently always returns zero.
  */
+
+int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+		struct notifier_block *n)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&nh->lock, flags);
+	ret = notifier_chain_register(&nh->head, n);
+	spin_unlock_irqrestore(&nh->lock, flags);
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
+
+/**
+ *	atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
+ *	@nh: Pointer to head of the atomic notifier chain
+ *	@n: Entry to remove from notifier chain
+ *
+ *	Removes a notifier from an atomic notifier chain.
+ *
+ *	Returns zero on success or %-ENOENT on failure.
+ */
+int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+		struct notifier_block *n)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&nh->lock, flags);
+	ret = notifier_chain_unregister(&nh->head, n);
+	spin_unlock_irqrestore(&nh->lock, flags);
+	synchronize_rcu();
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
+
+/**
+ *	atomic_notifier_call_chain - Call functions in an atomic notifier chain
+ *	@nh: Pointer to head of the atomic notifier chain
+ *	@val: Value passed unmodified to notifier function
+ *	@v: Pointer passed unmodified to notifier function
+ *
+ *	Calls each function in a notifier chain in turn.  The functions
+ *	run in an atomic context, so they must not block.
+ *	This routine uses RCU to synchronize with changes to the chain.
+ *
+ *	If the return value of the notifier can be and'ed
+ *	with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
+ *	will return immediately, with the return value of
+ *	the notifier function which halted execution.
+ *	Otherwise the return value is the return value
+ *	of the last notifier function called.
+ */
  
-int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+		unsigned long val, void *v)
 {
-	write_lock(&notifier_lock);
-	while(*list)
-	{
-		if(n->priority > (*list)->priority)
-			break;
-		list= &((*list)->next);
-	}
-	n->next = *list;
-	*list=n;
-	write_unlock(&notifier_lock);
-	return 0;
+	int ret;
+
+	rcu_read_lock();
+	ret = notifier_call_chain(&nh->head, val, v);
+	rcu_read_unlock();
+	return ret;
 }
 
-EXPORT_SYMBOL(notifier_chain_register);
+EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
+
+/*
+ *	Blocking notifier chain routines.  All access to the chain is
+ *	synchronized by an rwsem.
+ */
 
 /**
- *	notifier_chain_unregister - Remove notifier from a notifier chain
- *	@nl: Pointer to root list pointer
+ *	blocking_notifier_chain_register - Add notifier to a blocking notifier chain
+ *	@nh: Pointer to head of the blocking notifier chain
  *	@n: New entry in notifier chain
  *
- *	Removes a notifier from a notifier chain.
+ *	Adds a notifier to a blocking notifier chain.
+ *	Must be called in process context.
  *
- *	Returns zero on success, or %-ENOENT on failure.
+ *	Currently always returns zero.
  */
  
-int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+		struct notifier_block *n)
 {
-	write_lock(&notifier_lock);
-	while((*nl)!=NULL)
-	{
-		if((*nl)==n)
-		{
-			*nl=n->next;
-			write_unlock(&notifier_lock);
-			return 0;
-		}
-		nl=&((*nl)->next);
-	}
-	write_unlock(&notifier_lock);
-	return -ENOENT;
+	int ret;
+
+	/*
+	 * This code gets used during boot-up, when task switching is
+	 * not yet working and interrupts must remain disabled.  At
+	 * such times we must not call down_write().
+	 */
+	if (unlikely(system_state == SYSTEM_BOOTING))
+		return notifier_chain_register(&nh->head, n);
+
+	down_write(&nh->rwsem);
+	ret = notifier_chain_register(&nh->head, n);
+	up_write(&nh->rwsem);
+	return ret;
 }
 
-EXPORT_SYMBOL(notifier_chain_unregister);
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
 
 /**
- *	notifier_call_chain - Call functions in a notifier chain
- *	@n: Pointer to root pointer of notifier chain
+ *	blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
+ *	@nh: Pointer to head of the blocking notifier chain
+ *	@n: Entry to remove from notifier chain
+ *
+ *	Removes a notifier from a blocking notifier chain.
+ *	Must be called from process context.
+ *
+ *	Returns zero on success or %-ENOENT on failure.
+ */
+int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+		struct notifier_block *n)
+{
+	int ret;
+
+	/*
+	 * This code gets used during boot-up, when task switching is
+	 * not yet working and interrupts must remain disabled.  At
+	 * such times we must not call down_write().
+	 */
+	if (unlikely(system_state == SYSTEM_BOOTING))
+		return notifier_chain_unregister(&nh->head, n);
+
+	down_write(&nh->rwsem);
+	ret = notifier_chain_unregister(&nh->head, n);
+	up_write(&nh->rwsem);
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
+
+/**
+ *	blocking_notifier_call_chain - Call functions in a blocking notifier chain
+ *	@nh: Pointer to head of the blocking notifier chain
  *	@val: Value passed unmodified to notifier function
  *	@v: Pointer passed unmodified to notifier function
  *
- *	Calls each function in a notifier chain in turn.
+ *	Calls each function in a notifier chain in turn.  The functions
+ *	run in a process context, so they are allowed to block.
  *
- *	If the return value of the notifier can be and'd
- *	with %NOTIFY_STOP_MASK, then notifier_call_chain
+ *	If the return value of the notifier can be and'ed
+ *	with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
  *	will return immediately, with the return value of
  *	the notifier function which halted execution.
- *	Otherwise, the return value is the return value
+ *	Otherwise the return value is the return value
  *	of the last notifier function called.
  */
  
-int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+		unsigned long val, void *v)
 {
-	int ret=NOTIFY_DONE;
-	struct notifier_block *nb = *n;
+	int ret;
 
-	while(nb)
-	{
-		ret=nb->notifier_call(nb,val,v);
-		if(ret&NOTIFY_STOP_MASK)
-		{
-			return ret;
-		}
-		nb=nb->next;
-	}
+	down_read(&nh->rwsem);
+	ret = notifier_call_chain(&nh->head, val, v);
+	up_read(&nh->rwsem);
 	return ret;
 }
 
-EXPORT_SYMBOL(notifier_call_chain);
+EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
+
+/*
+ *	Raw notifier chain routines.  There is no protection;
+ *	the caller must provide it.  Use at your own risk!
+ */
+
+/**
+ *	raw_notifier_chain_register - Add notifier to a raw notifier chain
+ *	@nh: Pointer to head of the raw notifier chain
+ *	@n: New entry in notifier chain
+ *
+ *	Adds a notifier to a raw notifier chain.
+ *	All locking must be provided by the caller.
+ *
+ *	Currently always returns zero.
+ */
+
+int raw_notifier_chain_register(struct raw_notifier_head *nh,
+		struct notifier_block *n)
+{
+	return notifier_chain_register(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
+
+/**
+ *	raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
+ *	@nh: Pointer to head of the raw notifier chain
+ *	@n: Entry to remove from notifier chain
+ *
+ *	Removes a notifier from a raw notifier chain.
+ *	All locking must be provided by the caller.
+ *
+ *	Returns zero on success or %-ENOENT on failure.
+ */
+int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+		struct notifier_block *n)
+{
+	return notifier_chain_unregister(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
+
+/**
+ *	raw_notifier_call_chain - Call functions in a raw notifier chain
+ *	@nh: Pointer to head of the raw notifier chain
+ *	@val: Value passed unmodified to notifier function
+ *	@v: Pointer passed unmodified to notifier function
+ *
+ *	Calls each function in a notifier chain in turn.  The functions
+ *	run in an undefined context.
+ *	All locking must be provided by the caller.
+ *
+ *	If the return value of the notifier can be and'ed
+ *	with %NOTIFY_STOP_MASK then raw_notifier_call_chain
+ *	will return immediately, with the return value of
+ *	the notifier function which halted execution.
+ *	Otherwise the return value is the return value
+ *	of the last notifier function called.
+ */
+
+int raw_notifier_call_chain(struct raw_notifier_head *nh,
+		unsigned long val, void *v)
+{
+	return notifier_call_chain(&nh->head, val, v);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
 
 /**
  *	register_reboot_notifier - Register function to be called at reboot time
@@ -196,13 +401,13 @@ EXPORT_SYMBOL(notifier_call_chain);
  *	Registers a function with the list of functions
  *	to be called at reboot time.
  *
- *	Currently always returns zero, as notifier_chain_register
+ *	Currently always returns zero, as blocking_notifier_chain_register
  *	always returns zero.
  */
  
 int register_reboot_notifier(struct notifier_block * nb)
 {
-	return notifier_chain_register(&reboot_notifier_list, nb);
+	return blocking_notifier_chain_register(&reboot_notifier_list, nb);
 }
 
 EXPORT_SYMBOL(register_reboot_notifier);
@@ -219,7 +424,7 @@ EXPORT_SYMBOL(register_reboot_notifier);
  
 int unregister_reboot_notifier(struct notifier_block * nb)
 {
-	return notifier_chain_unregister(&reboot_notifier_list, nb);
+	return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
 }
 
 EXPORT_SYMBOL(unregister_reboot_notifier);
@@ -380,7 +585,7 @@ EXPORT_SYMBOL_GPL(emergency_restart);
 
 void kernel_restart_prepare(char *cmd)
 {
-	notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
+	blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
 	system_state = SYSTEM_RESTART;
 	device_shutdown();
 }
@@ -430,7 +635,7 @@ EXPORT_SYMBOL_GPL(kernel_kexec);
 
 void kernel_shutdown_prepare(enum system_states state)
 {
-	notifier_call_chain(&reboot_notifier_list,
+	blocking_notifier_call_chain(&reboot_notifier_list,
 		(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
 	system_state = state;
 	device_shutdown();
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9106354c781..a49a6975092 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -73,23 +73,23 @@ DEFINE_RWLOCK(hci_cb_list_lock);
 struct hci_proto *hci_proto[HCI_MAX_PROTO];
 
 /* HCI notifiers list */
-static struct notifier_block *hci_notifier;
+static ATOMIC_NOTIFIER_HEAD(hci_notifier);
 
 /* ---- HCI notifications ---- */
 
 int hci_register_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&hci_notifier, nb);
+	return atomic_notifier_chain_register(&hci_notifier, nb);
 }
 
 int hci_unregister_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&hci_notifier, nb);
+	return atomic_notifier_chain_unregister(&hci_notifier, nb);
 }
 
 static void hci_notify(struct hci_dev *hdev, int event)
 {
-	notifier_call_chain(&hci_notifier, event, hdev);
+	atomic_notifier_call_chain(&hci_notifier, event, hdev);
 }
 
 /* ---- HCI requests ---- */
diff --git a/net/core/dev.c b/net/core/dev.c
index 8e1dc305122..a3ab11f3415 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -193,7 +193,7 @@ static inline struct hlist_head *dev_index_hash(int ifindex)
  *	Our notifier list
  */
 
-static struct notifier_block *netdev_chain;
+static BLOCKING_NOTIFIER_HEAD(netdev_chain);
 
 /*
  *	Device drivers call our routines to queue packets here. We empty the
@@ -736,7 +736,8 @@ int dev_change_name(struct net_device *dev, char *newname)
 	if (!err) {
 		hlist_del(&dev->name_hlist);
 		hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
-		notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
+		blocking_notifier_call_chain(&netdev_chain,
+				NETDEV_CHANGENAME, dev);
 	}
 
 	return err;
@@ -750,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname)
  */
 void netdev_features_change(struct net_device *dev)
 {
-	notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
+	blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
 }
 EXPORT_SYMBOL(netdev_features_change);
 
@@ -765,7 +766,8 @@ EXPORT_SYMBOL(netdev_features_change);
 void netdev_state_change(struct net_device *dev)
 {
 	if (dev->flags & IFF_UP) {
-		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
+		blocking_notifier_call_chain(&netdev_chain,
+				NETDEV_CHANGE, dev);
 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
 	}
 }
@@ -862,7 +864,7 @@ int dev_open(struct net_device *dev)
 		/*
 		 *	... and announce new interface.
 		 */
-		notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+		blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
 	}
 	return ret;
 }
@@ -885,7 +887,7 @@ int dev_close(struct net_device *dev)
 	 *	Tell people we are going down, so that they can
 	 *	prepare to death, when device is still operating.
 	 */
-	notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
+	blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
 
 	dev_deactivate(dev);
 
@@ -922,7 +924,7 @@ int dev_close(struct net_device *dev)
 	/*
 	 * Tell people we are down
 	 */
-	notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+	blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
 
 	return 0;
 }
@@ -953,7 +955,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
 	int err;
 
 	rtnl_lock();
-	err = notifier_chain_register(&netdev_chain, nb);
+	err = blocking_notifier_chain_register(&netdev_chain, nb);
 	if (!err) {
 		for (dev = dev_base; dev; dev = dev->next) {
 			nb->notifier_call(nb, NETDEV_REGISTER, dev);
@@ -981,7 +983,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
 	int err;
 
 	rtnl_lock();
-	err = notifier_chain_unregister(&netdev_chain, nb);
+	err = blocking_notifier_chain_unregister(&netdev_chain, nb);
 	rtnl_unlock();
 	return err;
 }
@@ -992,12 +994,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
  *      @v:   pointer passed unmodified to notifier function
  *
  *	Call all network notifier blocks.  Parameters and return value
- *	are as for notifier_call_chain().
+ *	are as for blocking_notifier_call_chain().
  */
 
 int call_netdevice_notifiers(unsigned long val, void *v)
 {
-	return notifier_call_chain(&netdev_chain, val, v);
+	return blocking_notifier_call_chain(&netdev_chain, val, v);
 }
 
 /* When > 0 there are consumers of rx skb time stamps */
@@ -2242,7 +2244,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
 	if (dev->flags & IFF_UP &&
 	    ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
 					  IFF_VOLATILE)))
-		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
+		blocking_notifier_call_chain(&netdev_chain,
+				NETDEV_CHANGE, dev);
 
 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
 		int inc = (flags & IFF_PROMISC) ? +1 : -1;
@@ -2286,8 +2289,8 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
 	else
 		dev->mtu = new_mtu;
 	if (!err && dev->flags & IFF_UP)
-		notifier_call_chain(&netdev_chain,
-				    NETDEV_CHANGEMTU, dev);
+		blocking_notifier_call_chain(&netdev_chain,
+				NETDEV_CHANGEMTU, dev);
 	return err;
 }
 
@@ -2303,7 +2306,8 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
 		return -ENODEV;
 	err = dev->set_mac_address(dev, sa);
 	if (!err)
-		notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
+		blocking_notifier_call_chain(&netdev_chain,
+				NETDEV_CHANGEADDR, dev);
 	return err;
 }
 
@@ -2359,7 +2363,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
 				return -EINVAL;
 			memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-			notifier_call_chain(&netdev_chain,
+			blocking_notifier_call_chain(&netdev_chain,
 					    NETDEV_CHANGEADDR, dev);
 			return 0;
 
@@ -2813,7 +2817,7 @@ int register_netdevice(struct net_device *dev)
 	write_unlock_bh(&dev_base_lock);
 
 	/* Notify protocols, that a new device appeared. */
-	notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
+	blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
 
 	/* Finish registration after unlock */
 	net_set_todo(dev);
@@ -2892,7 +2896,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
 			rtnl_lock();
 
 			/* Rebroadcast unregister notification */
-			notifier_call_chain(&netdev_chain,
+			blocking_notifier_call_chain(&netdev_chain,
 					    NETDEV_UNREGISTER, dev);
 
 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
@@ -3148,7 +3152,7 @@ int unregister_netdevice(struct net_device *dev)
 	/* Notify protocols, that we are about to destroy
 	   this device. They should clean all the things.
 	*/
-	notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
+	blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
 	
 	/*
 	 *	Flush the multicast chain
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index cc7b9d9255e..d2ae9893ca1 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -68,7 +68,7 @@ __le16 decnet_address = 0;
 
 static DEFINE_RWLOCK(dndev_lock);
 static struct net_device *decnet_default_device;
-static struct notifier_block *dnaddr_chain;
+static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
 
 static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
 static void dn_dev_delete(struct net_device *dev);
@@ -446,7 +446,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de
 	}
 
 	rtmsg_ifa(RTM_DELADDR, ifa1);
-	notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
+	blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
 	if (destroy) {
 		dn_dev_free_ifa(ifa1);
 
@@ -481,7 +481,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
 	dn_db->ifa_list = ifa;
 
 	rtmsg_ifa(RTM_NEWADDR, ifa);
-	notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
+	blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
 
 	return 0;
 }
@@ -1285,12 +1285,12 @@ void dn_dev_devices_on(void)
 
 int register_dnaddr_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&dnaddr_chain, nb);
+	return blocking_notifier_chain_register(&dnaddr_chain, nb);
 }
 
 int unregister_dnaddr_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&dnaddr_chain, nb);
+	return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
 }
 
 #ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 44fdf1413e2..81c2f788529 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -81,7 +81,7 @@ static struct ipv4_devconf ipv4_devconf_dflt = {
 
 static void rtmsg_ifa(int event, struct in_ifaddr *);
 
-static struct notifier_block *inetaddr_chain;
+static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 			 int destroy);
 #ifdef CONFIG_SYSCTL
@@ -267,7 +267,8 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 				*ifap1 = ifa->ifa_next;
 
 				rtmsg_ifa(RTM_DELADDR, ifa);
-				notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa);
+				blocking_notifier_call_chain(&inetaddr_chain,
+						NETDEV_DOWN, ifa);
 				inet_free_ifa(ifa);
 			} else {
 				promote = ifa;
@@ -291,7 +292,7 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 	   So that, this order is correct.
 	 */
 	rtmsg_ifa(RTM_DELADDR, ifa1);
-	notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
+	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
 
 	if (promote) {
 
@@ -303,7 +304,8 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 
 		promote->ifa_flags &= ~IFA_F_SECONDARY;
 		rtmsg_ifa(RTM_NEWADDR, promote);
-		notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote);
+		blocking_notifier_call_chain(&inetaddr_chain,
+				NETDEV_UP, promote);
 		for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) {
 			if (ifa1->ifa_mask != ifa->ifa_mask ||
 			    !inet_ifa_match(ifa1->ifa_address, ifa))
@@ -366,7 +368,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa)
 	   Notifier will trigger FIB update, so that
 	   listeners of netlink will know about new ifaddr */
 	rtmsg_ifa(RTM_NEWADDR, ifa);
-	notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
+	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
 
 	return 0;
 }
@@ -938,12 +940,12 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
 
 int register_inetaddr_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&inetaddr_chain, nb);
+	return blocking_notifier_chain_register(&inetaddr_chain, nb);
 }
 
 int unregister_inetaddr_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&inetaddr_chain, nb);
+	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
 }
 
 /* Rename ifa_labels for a device name change. Make some effort to preserve existing
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 9e34034729a..ceaabc18202 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -80,8 +80,8 @@ static int ip_conntrack_vmalloc;
 static unsigned int ip_conntrack_next_id;
 static unsigned int ip_conntrack_expect_next_id;
 #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-struct notifier_block *ip_conntrack_chain;
-struct notifier_block *ip_conntrack_expect_chain;
+ATOMIC_NOTIFIER_HEAD(ip_conntrack_chain);
+ATOMIC_NOTIFIER_HEAD(ip_conntrack_expect_chain);
 
 DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
 
@@ -92,7 +92,7 @@ __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
 {
 	DEBUGP("ecache: delivering events for %p\n", ecache->ct);
 	if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
-		notifier_call_chain(&ip_conntrack_chain, ecache->events,
+		atomic_notifier_call_chain(&ip_conntrack_chain, ecache->events,
 				    ecache->ct);
 	ecache->events = 0;
 	ip_conntrack_put(ecache->ct);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 01c62a0d374..445006ee452 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -143,7 +143,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
 				struct prefix_info *pinfo);
 static int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev);
 
-static struct notifier_block *inet6addr_chain;
+static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
 
 struct ipv6_devconf ipv6_devconf = {
 	.forwarding		= 0,
@@ -593,7 +593,7 @@ out2:
 	read_unlock_bh(&addrconf_lock);
 
 	if (likely(err == 0))
-		notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
+		atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
 	else {
 		kfree(ifa);
 		ifa = ERR_PTR(err);
@@ -688,7 +688,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
 
 	ipv6_ifa_notify(RTM_DELADDR, ifp);
 
-	notifier_call_chain(&inet6addr_chain,NETDEV_DOWN,ifp);
+	atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
 
 	addrconf_del_timer(ifp);
 
@@ -3767,12 +3767,12 @@ static void addrconf_sysctl_unregister(struct ipv6_devconf *p)
 
 int register_inet6addr_notifier(struct notifier_block *nb)
 {
-        return notifier_chain_register(&inet6addr_chain, nb);
+        return atomic_notifier_chain_register(&inet6addr_chain, nb);
 }
 
 int unregister_inet6addr_notifier(struct notifier_block *nb)
 {
-        return notifier_chain_unregister(&inet6addr_chain,nb);
+        return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
 }
 
 /*
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0ae281d9bfc..56389c83557 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -90,8 +90,8 @@ static int nf_conntrack_vmalloc;
 static unsigned int nf_conntrack_next_id;
 static unsigned int nf_conntrack_expect_next_id;
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
-struct notifier_block *nf_conntrack_chain;
-struct notifier_block *nf_conntrack_expect_chain;
+ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
+ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
 
 DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
 
@@ -103,7 +103,7 @@ __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
 	DEBUGP("ecache: delivering events for %p\n", ecache->ct);
 	if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
 	    && ecache->events)
-		notifier_call_chain(&nf_conntrack_chain, ecache->events,
+		atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events,
 				    ecache->ct);
 
 	ecache->events = 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d00a9034cb5..2a233ffcf61 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -123,7 +123,7 @@ static void netlink_destroy_callback(struct netlink_callback *cb);
 static DEFINE_RWLOCK(nl_table_lock);
 static atomic_t nl_table_users = ATOMIC_INIT(0);
 
-static struct notifier_block *netlink_chain;
+static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
 static u32 netlink_group_mask(u32 group)
 {
@@ -469,7 +469,8 @@ static int netlink_release(struct socket *sock)
 						.protocol = sk->sk_protocol,
 						.pid = nlk->pid,
 					  };
-		notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
+		atomic_notifier_call_chain(&netlink_chain,
+				NETLINK_URELEASE, &n);
 	}	
 
 	if (nlk->module)
@@ -1695,12 +1696,12 @@ static struct file_operations netlink_seq_fops = {
 
 int netlink_register_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_register(&netlink_chain, nb);
+	return atomic_notifier_chain_register(&netlink_chain, nb);
 }
 
 int netlink_unregister_notifier(struct notifier_block *nb)
 {
-	return notifier_chain_unregister(&netlink_chain, nb);
+	return atomic_notifier_chain_unregister(&netlink_chain, nb);
 }
                 
 static const struct proto_ops netlink_ops = {
-- 
cgit v1.2.3-70-g09d2


From 6e57a3a89785692bd8d012d80f5ee210ab8e0b68 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Tue, 28 Mar 2006 01:00:08 -0800
Subject: [SPARC64]: Implement futex_atomic_cmpxchg_inatomic().

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/asm-sparc64/futex.h | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index cd340a23315..dee40206b22 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -84,9 +84,27 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
-	return -ENOSYS;
+	__asm__ __volatile__(
+	"\n1:	lduwa	[%2] %%asi, %0\n"
+	"2:	casa	[%2] %%asi, %0, %1\n"
+	"3:\n"
+	"	.section .fixup,#alloc,#execinstr\n"
+	"	.align	4\n"
+	"4:	ba	3b\n"
+	"	 mov	%3, %0\n"
+	"	.previous\n"
+	"	.section __ex_table,\"a\"\n"
+	"	.align	4\n"
+	"	.word	1b, 4b\n"
+	"	.word	2b, 4b\n"
+	"	.previous\n"
+	: "=&r" (oldval)
+	: "r" (newval), "r" (uaddr), "i" (-EFAULT)
+	: "memory");
+
+	return oldval;
 }
 
 #endif /* !(_SPARC64_FUTEX_H) */
-- 
cgit v1.2.3-70-g09d2


From 7f927fcc2fd1575d01efb4b76665975007945690 Mon Sep 17 00:00:00 2001
From: Alexey Dobriyan <adobriyan@gmail.com>
Date: Tue, 28 Mar 2006 01:56:53 -0800
Subject: [PATCH] Typo fixes

Fix a lot of typos.  Eyeballed by jmc@ in OpenBSD.

Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 Documentation/m68k/README.buddha         | 2 +-
 Documentation/networking/ifenslave.c     | 2 +-
 arch/arm/lib/copy_template.S             | 2 +-
 drivers/char/mxser.h                     | 2 +-
 drivers/char/synclink.c                  | 2 +-
 drivers/edac/edac_mc.c                   | 2 +-
 drivers/net/irda/nsc-ircc.c              | 2 +-
 drivers/net/sis900.c                     | 2 +-
 drivers/net/tulip/de4x5.c                | 2 +-
 drivers/net/tulip/pnic2.c                | 2 +-
 drivers/net/typhoon.c                    | 4 ++--
 drivers/net/wireless/orinoco.c           | 2 +-
 drivers/net/wireless/prism54/isl_ioctl.c | 2 +-
 drivers/scsi/3w-9xxx.c                   | 2 +-
 drivers/serial/8250.c                    | 2 +-
 drivers/serial/serial_txx9.c             | 2 +-
 drivers/serial/sunsu.c                   | 2 +-
 drivers/usb/host/ohci-s3c2410.c          | 2 +-
 drivers/usb/net/zaurus.c                 | 2 +-
 fs/mbcache.c                             | 2 +-
 include/asm-parisc/pdc.h                 | 2 +-
 include/asm-sh/addrspace.h               | 2 +-
 include/asm-sparc64/floppy.h             | 2 +-
 include/linux/fb.h                       | 2 +-
 mm/mempolicy.c                           | 2 +-
 net/irda/af_irda.c                       | 6 +++---
 sound/pci/rme32.c                        | 8 ++++----
 sound/pci/rme96.c                        | 8 ++++----
 sound/pci/rme9652/hdspm.c                | 2 +-
 sound/usb/usx2y/usx2yhwdeppcm.c          | 2 +-
 30 files changed, 39 insertions(+), 39 deletions(-)

(limited to 'include/asm-sparc64')

diff --git a/Documentation/m68k/README.buddha b/Documentation/m68k/README.buddha
index bf802ffc98a..ef484a719bb 100644
--- a/Documentation/m68k/README.buddha
+++ b/Documentation/m68k/README.buddha
@@ -29,7 +29,7 @@ address is written to $4a, then the whole Byte is written to
 $48, while it doesn't matter how often you're writing to $4a
 as  long as $48 is not touched.  After $48 has been written,
 the  whole card disappears from $e8 and is mapped to the new
-address just written.  Make shure $4a is written before $48,
+address just written.  Make sure $4a is written before $48,
 otherwise your chance is only 1:16 to find the board :-).
 
 The local memory-map is even active when mapped to $e8:
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
index 545447ac503..a1205988675 100644
--- a/Documentation/networking/ifenslave.c
+++ b/Documentation/networking/ifenslave.c
@@ -87,7 +87,7 @@
  *	   would fail and generate an error message in the system log.
  * 	 - For opt_c: slave should not be set to the master's setting
  *	   while it is running. It was already set during enslave. To
- *	   simplify things, it is now handeled separately.
+ *	   simplify things, it is now handled separately.
  *
  *    - 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
  *	 - Code cleanup and style changes
diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S
index 838e435e492..cab355c0c1f 100644
--- a/arch/arm/lib/copy_template.S
+++ b/arch/arm/lib/copy_template.S
@@ -236,7 +236,7 @@
 
 
 /*
- * Abort preanble and completion macros.
+ * Abort preamble and completion macros.
  * If a fixup handler is required then those macros must surround it.
  * It is assumed that the fixup code will handle the private part of
  * the exit macro.
diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h
index e7fd0b08e0b..7e188a4d602 100644
--- a/drivers/char/mxser.h
+++ b/drivers/char/mxser.h
@@ -118,7 +118,7 @@
 
 // enable CTS interrupt
 #define MOXA_MUST_IER_ECTSI		0x80
-// eanble RTS interrupt
+// enable RTS interrupt
 #define MOXA_MUST_IER_ERTSI		0x40
 // enable Xon/Xoff interrupt
 #define MOXA_MUST_IER_XINT		0x20
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index abb03e52fe1..fee2aca3f6a 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -5996,7 +5996,7 @@ static void usc_set_async_mode( struct mgsl_struct *info )
 	 * <15..8>	?		RxFIFO IRQ Request Level
 	 *
 	 * Note: For async mode the receive FIFO level must be set
-	 * to 0 to aviod the situation where the FIFO contains fewer bytes
+	 * to 0 to avoid the situation where the FIFO contains fewer bytes
 	 * than the trigger level and no more data is expected.
 	 *
 	 * <7>		0		Exited Hunt IA (Interrupt Arm)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 905f58ba8e1..ea06e3a4dc3 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -2044,7 +2044,7 @@ static int __init edac_mc_init(void)
 	 */
 	clear_pci_parity_errors();
 
-	/* Create the MC sysfs entires */
+	/* Create the MC sysfs entries */
 	if (edac_sysfs_memctrl_setup()) {
 		edac_printk(KERN_ERR, EDAC_MC,
 			"Error initializing sysfs code\n");
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 9aa074b44dd..cc7ff8f00e4 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -812,7 +812,7 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
 	int cfg_base = info->cfg_base;
 	int enabled;
 
-	/* User is shure about his config... accept it. */
+	/* User is sure about his config... accept it. */
 	IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
 		   "io=0x%04x, irq=%d, dma=%d\n", 
 		   __FUNCTION__, info->fir_base, info->irq, info->dma);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 8429ceb0138..b82191d2bee 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -2283,7 +2283,7 @@ static void set_rx_mode(struct net_device *net_dev)
 	int i, table_entries;
 	u32 rx_mode;
 
-	/* 635 Hash Table entires = 256(2^16) */
+	/* 635 Hash Table entries = 256(2^16) */
 	if((sis_priv->chipset_rev >= SIS635A_900_REV) ||
 			(sis_priv->chipset_rev == SIS900B_900_REV))
 		table_entries = 16;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index ee48bfd6734..d1a86a080a6 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -513,7 +513,7 @@ struct mii_phy {
     u_char  *rst;           /* Start of reset sequence in SROM           */
     u_int mc;               /* Media Capabilities                        */
     u_int ana;              /* NWay Advertisement                        */
-    u_int fdx;              /* Full DupleX capabilites for each media    */
+    u_int fdx;              /* Full DupleX capabilities for each media   */
     u_int ttm;              /* Transmit Threshold Mode for each media    */
     u_int mci;              /* 21142 MII Connector Interrupt info        */
 };
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index 55f4a9a631b..ab985023fcc 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -199,7 +199,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
 	               /* negotiation ended successfully */
 
 	               /* get the link partners reply and mask out all but
-                        * bits 24-21 which show the partners capabilites
+                        * bits 24-21 which show the partners capabilities
                         * and match those to what we advertised
                         *
                         * then begin to interpret the results of the negotiation.
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index cde35dd8790..c1ce87a5f8d 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -208,7 +208,7 @@ static const struct typhoon_card_info typhoon_card_info[] __devinitdata = {
 };
 
 /* Notes on the new subsystem numbering scheme:
- * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
+ * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
  * bit 4 indicates if this card has secured firmware (we don't support it)
  * bit 8 indicates if this is a (0) copper or (1) fiber card
  * bits 12-16 indicate card type: (0) client and (1) server
@@ -788,7 +788,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
 	/* we have two rings to choose from, but we only use txLo for now
 	 * If we start using the Hi ring as well, we'll need to update
 	 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
-	 * and TXHI_ENTIRES to match, as well as update the TSO code below
+	 * and TXHI_ENTRIES to match, as well as update the TSO code below
 	 * to get the right DMA address
 	 */
 	txRing = &tp->txLoRing;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 6fd0bf73683..8dfdfbd5966 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -3858,7 +3858,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
 	unsigned long flags;
 
 	/* Note : you may have realised that, as this is a SET operation,
-	 * this is priviledged and therefore a normal user can't
+	 * this is privileged and therefore a normal user can't
 	 * perform scanning.
 	 * This is not an error, while the device perform scanning,
 	 * traffic doesn't flow, so it's a perfect DoS...
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index e5bb9f5ae42..989599ad33e 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -747,7 +747,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
 
 	if (essid->length) {
 		dwrq->flags = 1;	/* set ESSID to ON for Wireless Extensions */
-		/* if it is to big, trunk it */
+		/* if it is too big, trunk it */
 		dwrq->length = min((u8)IW_ESSID_MAX_SIZE, essid->length);
 	} else {
 		dwrq->flags = 0;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 0ab26d01877..0d2b447c50e 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1026,7 +1026,7 @@ static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
 	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
 } /* End twa_free_request_id() */
 
-/* This function will get parameter table entires from the firmware */
+/* This function will get parameter table entries from the firmware */
 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
 {
 	TW_Command_Full *full_command_packet;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 5996d3cd0ed..674b15c78f6 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1528,7 +1528,7 @@ static int serial8250_startup(struct uart_port *port)
 
 	/*
 	 * Clear the FIFO buffers and disable them.
-	 * (they will be reeanbled in set_termios())
+	 * (they will be reenabled in set_termios())
 	 */
 	serial8250_clear_fifos(up);
 
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index b848b7d9441..3bdee64d1a9 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -483,7 +483,7 @@ static int serial_txx9_startup(struct uart_port *port)
 
 	/*
 	 * Clear the FIFO buffers and disable them.
-	 * (they will be reeanbled in set_termios())
+	 * (they will be reenabled in set_termios())
 	 */
 	sio_set(up, TXX9_SIFCR,
 		TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 9fe2283d91e..1c4396c2962 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -641,7 +641,7 @@ static int sunsu_startup(struct uart_port *port)
 
 	/*
 	 * Clear the FIFO buffers and disable them.
-	 * (they will be reeanbled in set_termios())
+	 * (they will be reenabled in set_termios())
 	 */
 	if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) {
 		serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 372527a8359..682bf221566 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -158,7 +158,7 @@ static int ohci_s3c2410_hub_control (
 		"s3c2410_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
 		hcd, typeReq, wValue, wIndex, buf, wLength);
 
-	/* if we are only an humble host without any special capabilites
+	/* if we are only an humble host without any special capabilities
 	 * process the request straight away and exit */
 
 	if (info == NULL) {
diff --git a/drivers/usb/net/zaurus.c b/drivers/usb/net/zaurus.c
index 9c5ab251370..f7ac9d6b985 100644
--- a/drivers/usb/net/zaurus.c
+++ b/drivers/usb/net/zaurus.c
@@ -217,7 +217,7 @@ static int blan_mdlm_bind(struct usbnet *dev, struct usb_interface *intf)
 			 * with devices that use it and those that don't.
 			 */
 			if ((detail->bDetailData[1] & ~0x02) != 0x01) {
-				/* bmDataCapabilites == 0 would be fine too,
+				/* bmDataCapabilities == 0 would be fine too,
 				 * but framing is minidriver-coupled for now.
 				 */
 bad_detail:
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 73e754fea2d..e4fde1ab22c 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -311,7 +311,7 @@ fail:
 /*
  * mb_cache_shrink()
  *
- * Removes all cache entires of a device from the cache. All cache entries
+ * Removes all cache entries of a device from the cache. All cache entries
  * currently in use cannot be freed, and thus remain in the cache. All others
  * are freed.
  *
diff --git a/include/asm-parisc/pdc.h b/include/asm-parisc/pdc.h
index 8e23e4c674f..0a3face6c48 100644
--- a/include/asm-parisc/pdc.h
+++ b/include/asm-parisc/pdc.h
@@ -333,7 +333,7 @@ struct pdc_model {		/* for PDC_MODEL */
 	unsigned long curr_key;
 };
 
-/* Values for PDC_MODEL_CAPABILITES non-equivalent virtual aliasing support */
+/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */
 
 #define PDC_MODEL_IOPDIR_FDC            (1 << 2)        /* see sba_iommu.c */
 #define PDC_MODEL_NVA_MASK		(3 << 4)
diff --git a/include/asm-sh/addrspace.h b/include/asm-sh/addrspace.h
index dbb05d1a26d..720afc11c2c 100644
--- a/include/asm-sh/addrspace.h
+++ b/include/asm-sh/addrspace.h
@@ -13,7 +13,7 @@
 
 #include <asm/cpu/addrspace.h>
 
-/* Memory segments (32bit Priviledged mode addresses)  */
+/* Memory segments (32bit Privileged mode addresses)  */
 #define P0SEG		0x00000000
 #define P1SEG		0x80000000
 #define P2SEG		0xa0000000
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h
index 49d49a28594..6a95d5d0c57 100644
--- a/include/asm-sparc64/floppy.h
+++ b/include/asm-sparc64/floppy.h
@@ -738,7 +738,7 @@ static unsigned long __init sun_floppy_init(void)
 		if (!sun_floppy_types[0] && sun_floppy_types[1]) {
 			/*
 			 * Set the drive exchange bit in FCR on NS87303,
-			 * make shure other bits are sane before doing so.
+			 * make sure other bits are sane before doing so.
 			 */
 			ns87303_modify(config, FER, FER_EDM, 0);
 			ns87303_modify(config, ASC, ASC_DRV2_SEL, 0);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 2cb19e6503a..d03fadfcafe 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -734,7 +734,7 @@ struct fb_tile_ops {
 
 /* A driver may set this flag to indicate that it does want a set_par to be
  * called every time when fbcon_switch is executed. The advantage is that with
- * this flag set you can really be shure that set_par is always called before
+ * this flag set you can really be sure that set_par is always called before
  * any of the functions dependant on the correct hardware state or altering
  * that state, even if you are using some broken X releases. The disadvantage
  * is that it introduces unwanted delays to every console switch if set_par
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4f71cfd29c6..dec8249e972 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -912,7 +912,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
 	/*
 	 * Check if this process has the right to modify the specified
 	 * process. The right exists if the process has administrative
-	 * capabilities, superuser priviledges or the same
+	 * capabilities, superuser privileges or the same
 	 * userid as the target process.
 	 */
 	if ((current->euid != task->suid) && (current->euid != task->uid) &&
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 75944564866..627b1134223 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1302,7 +1302,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
 	if (sk->sk_state != TCP_ESTABLISHED)
 		return -ENOTCONN;
 
-	/* Check that we don't send out to big frames */
+	/* Check that we don't send out too big frames */
 	if (len > self->max_data_size) {
 		IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
 			   __FUNCTION__, len, self->max_data_size);
@@ -1546,7 +1546,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
 	IRDA_ASSERT(self != NULL, return -1;);
 
 	/*
-	 * Check that we don't send out to big frames. This is an unreliable
+	 * Check that we don't send out too big frames. This is an unreliable
 	 * service, so we have no fragmentation and no coalescence
 	 */
 	if (len > self->max_data_size) {
@@ -1642,7 +1642,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
 	}
 
 	/*
-	 * Check that we don't send out to big frames. This is an unreliable
+	 * Check that we don't send out too big frames. This is an unreliable
 	 * service, so we have no fragmentation and no coalescence
 	 */
 	if (len > self->max_data_size) {
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 0cbef5fe6c6..ab78544bf04 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -313,7 +313,7 @@ static int snd_rme32_capture_copy(struct snd_pcm_substream *substream, int chann
 }
 
 /*
- * SPDIF I/O capabilites (half-duplex mode)
+ * SPDIF I/O capabilities (half-duplex mode)
  */
 static struct snd_pcm_hardware snd_rme32_spdif_info = {
 	.info =		(SNDRV_PCM_INFO_MMAP_IOMEM |
@@ -339,7 +339,7 @@ static struct snd_pcm_hardware snd_rme32_spdif_info = {
 };
 
 /*
- * ADAT I/O capabilites (half-duplex mode)
+ * ADAT I/O capabilities (half-duplex mode)
  */
 static struct snd_pcm_hardware snd_rme32_adat_info =
 {
@@ -364,7 +364,7 @@ static struct snd_pcm_hardware snd_rme32_adat_info =
 };
 
 /*
- * SPDIF I/O capabilites (full-duplex mode)
+ * SPDIF I/O capabilities (full-duplex mode)
  */
 static struct snd_pcm_hardware snd_rme32_spdif_fd_info = {
 	.info =		(SNDRV_PCM_INFO_MMAP |
@@ -390,7 +390,7 @@ static struct snd_pcm_hardware snd_rme32_spdif_fd_info = {
 };
 
 /*
- * ADAT I/O capabilites (full-duplex mode)
+ * ADAT I/O capabilities (full-duplex mode)
  */
 static struct snd_pcm_hardware snd_rme32_adat_fd_info =
 {
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 0e694b011dc..6c2a9f4a765 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -359,7 +359,7 @@ snd_rme96_capture_copy(struct snd_pcm_substream *substream,
 }
 
 /*
- * Digital output capabilites (S/PDIF)
+ * Digital output capabilities (S/PDIF)
  */
 static struct snd_pcm_hardware snd_rme96_playback_spdif_info =
 {
@@ -388,7 +388,7 @@ static struct snd_pcm_hardware snd_rme96_playback_spdif_info =
 };
 
 /*
- * Digital input capabilites (S/PDIF)
+ * Digital input capabilities (S/PDIF)
  */
 static struct snd_pcm_hardware snd_rme96_capture_spdif_info =
 {
@@ -417,7 +417,7 @@ static struct snd_pcm_hardware snd_rme96_capture_spdif_info =
 };
 
 /*
- * Digital output capabilites (ADAT)
+ * Digital output capabilities (ADAT)
  */
 static struct snd_pcm_hardware snd_rme96_playback_adat_info =
 {
@@ -442,7 +442,7 @@ static struct snd_pcm_hardware snd_rme96_playback_adat_info =
 };
 
 /*
- * Digital input capabilites (ADAT)
+ * Digital input capabilities (ADAT)
  */
 static struct snd_pcm_hardware snd_rme96_capture_adat_info =
 {
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 980b9cd689d..b5538efd146 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -2256,7 +2256,7 @@ static int snd_hdspm_create_controls(struct snd_card *card, struct hdspm * hdspm
 	}
 
 	/* Channel playback mixer as default control 
-	   Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders, thats to big for any alsamixer 
+	   Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders, thats too big for any alsamixer
 	   they are accesible via special IOCTL on hwdep
 	   and the mixer 2dimensional mixer control */
 
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 315855082fe..fe67a92e2a1 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -404,7 +404,7 @@ static void usX2Y_usbpcm_subs_startup(struct snd_usX2Y_substream *subs)
 	struct usX2Ydev * usX2Y = subs->usX2Y;
 	usX2Y->prepare_subs = subs;
 	subs->urb[0]->start_frame = -1;
-	smp_wmb();	// Make shure above modifications are seen by i_usX2Y_subs_startup()
+	smp_wmb();	// Make sure above modifications are seen by i_usX2Y_subs_startup()
 	usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_subs_startup);
 }
 
-- 
cgit v1.2.3-70-g09d2