diff options
author | David S. Miller <davem@davemloft.net> | 2006-01-31 18:29:18 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 01:11:13 -0800 |
commit | 74bf4312fff083ab25c3f357cc653ada7995e5f6 (patch) | |
tree | c23dea461e32485f4cd7ca4b8c33c632655eb906 /arch/sparc64/kernel/tsb.S | |
parent | 30d4d1ffed7098afe2641536d67eef150499da02 (diff) |
[SPARC64]: Move away from virtual page tables, part 1.
We now use the TSB hardware assist features of the UltraSPARC
MMUs.
SMP is currently knowingly broken, we need to find another place
to store the per-cpu base pointers. We hid them away in the TSB
base register, and that obviously will not work any more :-)
Another known broken case is non-8KB base page size.
Also noticed that flush_tlb_all() is not referenced anywhere, only
the internal __flush_tlb_all() (local cpu only) is used by the
sparc64 port, so we can get rid of flush_tlb_all().
The kernel gets it's own 8KB TSB (swapper_tsb) and each address space
gets it's own private 8K TSB. Later we can add code to dynamically
increase the size of per-process TSB as the RSS grows. An 8KB TSB is
good enough for up to about a 4MB RSS, after which the TSB starts to
incur many capacity and conflict misses.
We even accumulate OBP translations into the kernel TSB.
Another area for refinement is large page size support. We could use
a secondary address space TSB to handle those.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/tsb.S')
-rw-r--r-- | arch/sparc64/kernel/tsb.S | 169 |
1 files changed, 169 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S new file mode 100644 index 00000000000..44b9e6fed09 --- /dev/null +++ b/arch/sparc64/kernel/tsb.S @@ -0,0 +1,169 @@ +/* tsb.S: Sparc64 TSB table handling. + * + * Copyright (C) 2006 David S. Miller <davem@davemloft.net> + */ + +#include <asm/tsb.h> + + .text + .align 32 + + /* Invoked from TLB miss handler, we are in the + * MMU global registers and they are setup like + * this: + * + * %g1: TSB entry pointer + * %g2: available temporary + * %g3: FAULT_CODE_{D,I}TLB + * %g4: available temporary + * %g5: available temporary + * %g6: TAG TARGET + * %g7: physical address base of the linux page + * tables for the current address space + */ + .globl tsb_miss_dtlb +tsb_miss_dtlb: + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g4 + ba,pt %xcc, tsb_miss_page_table_walk + nop + + .globl tsb_miss_itlb +tsb_miss_itlb: + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_IMMU, %g4 + ba,pt %xcc, tsb_miss_page_table_walk + nop + +tsb_miss_page_table_walk: + USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) + +tsb_reload: + TSB_LOCK_TAG(%g1, %g2, %g4) + + /* Load and check PTE. */ + ldxa [%g5] ASI_PHYS_USE_EC, %g5 + brgez,a,pn %g5, tsb_do_fault + stx %g0, [%g1] + + TSB_WRITE(%g1, %g5, %g6) + + /* Finally, load TLB and return from trap. */ +tsb_tlb_reload: + cmp %g3, FAULT_CODE_DTLB + bne,pn %xcc, tsb_itlb_load + nop + +tsb_dtlb_load: + stxa %g5, [%g0] ASI_DTLB_DATA_IN + retry + +tsb_itlb_load: + stxa %g5, [%g0] ASI_ITLB_DATA_IN + retry + + /* No valid entry in the page tables, do full fault + * processing. + */ + + .globl tsb_do_fault +tsb_do_fault: + cmp %g3, FAULT_CODE_DTLB + rdpr %pstate, %g5 + bne,pn %xcc, tsb_do_itlb_fault + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + +tsb_do_dtlb_fault: + rdpr %tl, %g4 + cmp %g4, 1 + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g5 + be,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_DTLB, %g4 + ba,pt %xcc, winfix_trampoline + nop + +tsb_do_itlb_fault: + rdpr %tpc, %g5 + ba,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_ITLB, %g4 + + .globl sparc64_realfault_common +sparc64_realfault_common: + stb %g4, [%g6 + TI_FAULT_CODE] ! Save fault code + stx %g5, [%g6 + TI_FAULT_ADDR] ! Save fault address + ba,pt %xcc, etrap ! Save trap state +1: rd %pc, %g7 ! ... + call do_sparc64_fault ! Call fault handler + add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg + ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state + nop ! Delay slot (fill me) + + .globl winfix_trampoline +winfix_trampoline: + rdpr %tpc, %g3 ! Prepare winfixup TNPC + or %g3, 0x7c, %g3 ! Compute branch offset + wrpr %g3, %tnpc ! Write it into TNPC + done ! Trap return + + /* Reload MMU related context switch state at + * schedule() time. + * + * %o0: page table physical address + * %o1: TSB address + */ + .globl tsb_context_switch +tsb_context_switch: + wrpr %g0, PSTATE_MG | PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV, %pstate + + /* Set page table base alternate global. */ + mov %o0, %g7 + + /* XXX can this happen? */ + brz,pn %o1, 9f + nop + + /* Lock TSB into D-TLB. */ + sethi %hi(PAGE_SIZE), %o3 + and %o3, %o1, %o3 + sethi %hi(TSBMAP_BASE), %o2 + add %o2, %o3, %o2 + + /* XXX handle PAGE_SIZE != 8K correctly... */ + mov TSB_REG, %g1 + stxa %o2, [%g1] ASI_DMMU + membar #Sync + + stxa %o2, [%g1] ASI_IMMU + membar #Sync + +#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000) +#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L) + sethi %uhi(KERN_HIGHBITS), %g2 + or %g2, %ulo(KERN_HIGHBITS), %g2 + sllx %g2, 32, %g2 + or %g2, KERN_LOWBITS, %g2 +#undef KERN_HIGHBITS +#undef KERN_LOWBITS + + xor %o1, %g2, %o1 + + /* We use entry 61 for this locked entry. This is the spitfire + * TLB entry number, and luckily cheetah masks the value with + * 15 ending us up with entry 13 which is what we want in that + * case too. + * + * XXX Interactions with prom_world()... + */ + mov TLB_TAG_ACCESS, %g1 + stxa %o2, [%g1] ASI_DMMU + membar #Sync + mov (61 << 3), %g1 + stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS + membar #Sync + +9: + wrpr %g0, PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE, %pstate + + retl + mov %o2, %o0 |