summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 18:31:20 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:17 -0800
commit98c5584cfc47932c4f3ccf5eee2e0bae1447b85e (patch)
treec067ac8bfc081bbe0b3073374cb15708458e04ab /arch/sparc64/mm
parent09f94287f7260e03bbeab497e743691fafcc22c3 (diff)
[SPARC64]: Add infrastructure for dynamic TSB sizing.
This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/tsb.c109
1 files changed, 95 insertions, 14 deletions
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 2f84cef6c1b..dfe7144fcdf 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -9,13 +9,7 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
-
-#define TSB_ENTRY_ALIGNMENT 16
-
-struct tsb {
- unsigned long tag;
- unsigned long pte;
-} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
+#include <asm/pgtable.h>
/* We use an 8K TSB for the whole kernel, this allows to
* handle about 4MB of modules and vmalloc mappings without
@@ -27,10 +21,10 @@ struct tsb {
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
-static inline unsigned long tsb_hash(unsigned long vaddr)
+static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
{
vaddr >>= PAGE_SHIFT;
- return vaddr & (KERNEL_TSB_NENTRIES - 1);
+ return vaddr & (nentries - 1);
}
static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
@@ -51,7 +45,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
unsigned long v;
for (v = start; v < end; v += PAGE_SIZE) {
- struct tsb *ent = &swapper_tsb[tsb_hash(v)];
+ unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
+ struct tsb *ent = &swapper_tsb[hash];
if (tag_compare(ent, v, 0)) {
ent->tag = 0UL;
@@ -63,8 +58,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
void flush_tsb_user(struct mmu_gather *mp)
{
struct mm_struct *mm = mp->mm;
- struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb;
+ struct tsb *tsb = mm->context.tsb;
unsigned long ctx = ~0UL;
+ unsigned long nentries = mm->context.tsb_nentries;
int i;
if (CTX_VALID(mm->context))
@@ -76,7 +72,7 @@ void flush_tsb_user(struct mmu_gather *mp)
v &= ~0x1UL;
- ent = &tsb[tsb_hash(v)];
+ ent = &tsb[tsb_hash(v, nentries)];
if (tag_compare(ent, v, ctx)) {
ent->tag = 0UL;
membar_storeload_storestore();
@@ -84,6 +80,83 @@ void flush_tsb_user(struct mmu_gather *mp)
}
}
+static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
+{
+ unsigned long tsb_reg, base, tsb_paddr;
+ unsigned long page_sz, tte;
+
+ mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
+
+ base = TSBMAP_BASE;
+ tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
+ _PAGE_CV | _PAGE_P | _PAGE_W);
+ tsb_paddr = __pa(mm->context.tsb);
+
+ /* Use the smallest page size that can map the whole TSB
+ * in one TLB entry.
+ */
+ switch (tsb_bytes) {
+ case 8192 << 0:
+ tsb_reg = 0x0UL;
+#ifdef DCACHE_ALIASING_POSSIBLE
+ base += (tsb_paddr & 8192);
+#endif
+ tte |= _PAGE_SZ8K;
+ page_sz = 8192;
+ break;
+
+ case 8192 << 1:
+ tsb_reg = 0x1UL;
+ tte |= _PAGE_SZ64K;
+ page_sz = 64 * 1024;
+ break;
+
+ case 8192 << 2:
+ tsb_reg = 0x2UL;
+ tte |= _PAGE_SZ64K;
+ page_sz = 64 * 1024;
+ break;
+
+ case 8192 << 3:
+ tsb_reg = 0x3UL;
+ tte |= _PAGE_SZ64K;
+ page_sz = 64 * 1024;
+ break;
+
+ case 8192 << 4:
+ tsb_reg = 0x4UL;
+ tte |= _PAGE_SZ512K;
+ page_sz = 512 * 1024;
+ break;
+
+ case 8192 << 5:
+ tsb_reg = 0x5UL;
+ tte |= _PAGE_SZ512K;
+ page_sz = 512 * 1024;
+ break;
+
+ case 8192 << 6:
+ tsb_reg = 0x6UL;
+ tte |= _PAGE_SZ512K;
+ page_sz = 512 * 1024;
+ break;
+
+ case 8192 << 7:
+ tsb_reg = 0x7UL;
+ tte |= _PAGE_SZ4MB;
+ page_sz = 4 * 1024 * 1024;
+ break;
+ };
+
+ tsb_reg |= base;
+ tsb_reg |= (tsb_paddr & (page_sz - 1UL));
+ tte |= (tsb_paddr & ~(page_sz - 1UL));
+
+ mm->context.tsb_reg_val = tsb_reg;
+ mm->context.tsb_map_vaddr = base;
+ mm->context.tsb_map_pte = tte;
+}
+
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long page = get_zeroed_page(GFP_KERNEL);
@@ -92,14 +165,22 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
if (unlikely(!page))
return -ENOMEM;
- mm->context.sparc64_tsb = (unsigned long *) page;
+ mm->context.tsb = (struct tsb *) page;
+ setup_tsb_params(mm, PAGE_SIZE);
return 0;
}
void destroy_context(struct mm_struct *mm)
{
- free_page((unsigned long) mm->context.sparc64_tsb);
+ free_page((unsigned long) mm->context.tsb);
+
+ /* We can remove these later, but for now it's useful
+ * to catch any bogus post-destroy_context() references
+ * to the TSB.
+ */
+ mm->context.tsb = NULL;
+ mm->context.tsb_reg_val = 0UL;
spin_lock(&ctx_alloc_lock);