diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:08:04 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:08:04 +0900 |
commit | de390bba797aa9a554bc1769b6a8771605854d79 (patch) | |
tree | ce95610d4a70ec0a7307a30cfd1a66fdf0c901ab /arch/mips/mm | |
parent | 50e0d10232db05c6776afcf6098459bff47e8b15 (diff) | |
parent | 382fc33b4a04e2dde89b4c69a6880e0c7d9761e2 (diff) |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS update from Ralf Baechle:
"This is the MIPS update for 3.7.
A fair chunk of them are platform updates to the Cavium Octeon SOC
(which involves machine generated header files of considerable size),
Atheros ATH79xx, RMI aka Netlogic aka Broadcom XLP, Broadcom BCM63xx
platforms.
Support for the commercial MIPS simulator MIPSsim has been removed as
MIPS Technologies is shifting away from this product and Qemu is
offering various more powerful platforms. The generic MIPS code can
now also probe for no-execute / write-only TLB features implemented
without the full SmartMIPS extension as permitted by the latest MIPS
processor architecture. Lots of small changes to generic code."
* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (78 commits)
MIPS: ath79: Fix CPU/DDR frequency calculation for SRIF PLLs
MIPS: ath79: use correct fractional dividers for {CPU,DDR}_PLL on AR934x
MIPS: BCM63XX: Properly handle mac address octet overflow
MIPS: Kconfig: Avoid build errors by hiding USE_OF from the user.
MIPS: Replace `-' in defconfig filename wth `_' for consistency.
MIPS: Wire kcmp syscall.
MIPS: MIPSsim: Remove the MIPSsim platform.
MIPS: NOTIFY_RESUME is not needed in TIF masks
MIPS: Merge the identical "return from syscall" per-ABI code
MIPS: Unobfuscate _TIF..._MASK
MIPS: Prevent hitting do_notify_resume() with !user_mode(regs).
MIPS: Replace 'kernel_uses_smartmips_rixi' with 'cpu_has_rixi'.
MIPS: Add base architecture support for RI and XI.
MIPS: Optimise TLB handlers for MIPS32/64 R2 cores.
MIPS: uasm: Add INS and EXT instructions.
MIPS: Avoid pipeline stalls on some MIPS32R2 cores.
MIPS: Make VPE count to be one-based.
MIPS: Add new end of interrupt functionality for GIC.
MIPS: Add EIC support for GIC.
MIPS: Code clean-ups for the GIC.
...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 17 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 21 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 44 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 23 |
7 files changed, 79 insertions, 32 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index fd6203f14f1..90ceb963aaf 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -11,27 +11,12 @@ obj-$(CONFIG_64BIT) += pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_MIPS64) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_NEVADA) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R10000) += c-r4k.o cex-gen.o tlb-r4k.o +obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o -obj-$(CONFIG_CPU_R4300) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R4X00) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R5000) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R5432) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R5500) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o -obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o -obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o -obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o -obj-$(CONFIG_CPU_XLP) += c-r4k.o tlb-r4k.o cex-gen.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index f092c265dc6..4c32ede464b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -786,6 +786,25 @@ static inline void rm7k_erratum31(void) } } +static inline void alias_74k_erratum(struct cpuinfo_mips *c) +{ + /* + * Early versions of the 74K do not update the cache tags on a + * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG + * aliases. In this case it is better to treat the cache as always + * having aliases. + */ + if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0)) + c->dcache.flags |= MIPS_CACHE_VTAG; + if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0)) + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); + if (((c->processor_id & 0xff00) == PRID_IMP_1074K) && + ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) { + c->dcache.flags |= MIPS_CACHE_VTAG; + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); + } +} + static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" }; @@ -1056,6 +1075,8 @@ static void __cpuinit probe_pcache(void) case CPU_34K: case CPU_74K: case CPU_1004K: + if (c->cputype == CPU_74K) + alias_74k_erratum(c); if ((read_c0_config7() & (1 << 16))) { /* effectively physically indexed dcache, thus no virtual aliases. */ diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 829320c7b17..07cec4407b0 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -142,7 +142,7 @@ EXPORT_SYMBOL(_page_cachable_default); static inline void setup_protection_map(void) { - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index c14f6dfed99..7a19957735e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -114,7 +114,7 @@ good_area: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n", diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index d2572cb232d..87b9cfcc30f 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -401,7 +401,7 @@ void __cpuinit tlb_init(void) current_cpu_type() == CPU_R14000) write_c0_framemask(0); - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { /* * Enable the no read, no exec bits, and enable large virtual * address. diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 03eb0ef9158..e09d4925690 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -449,8 +449,20 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } if (cpu_has_mips_r2) { - if (cpu_has_mips_r2_exec_hazard) + /* + * The architecture spec says an ehb is required here, + * but a number of cores do not have the hazard and + * using an ehb causes an expensive pipeline stall. + */ + switch (current_cpu_type()) { + case CPU_M14KC: + case CPU_74K: + break; + + default: uasm_i_ehb(p); + break; + } tlbw(p); return; } @@ -586,7 +598,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); } else { @@ -921,6 +933,13 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) #endif uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); + + if (cpu_has_mips_r2) { + uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT)); + uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT)); + return; + } + uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ @@ -956,6 +975,15 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { + if (cpu_has_mips_r2) { + /* PTE ptr offset is obtained from BadVAddr */ + UASM_i_MFC0(p, tmp, C0_BADVADDR); + UASM_i_LW(p, ptr, 0, ptr); + uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1); + uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1); + return; + } + /* * Bug workaround for the Nevada. It seems as if under certain * circumstances the move from cp0_context might produce a @@ -990,7 +1018,7 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, if (cpu_has_64bits) { uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); @@ -1017,7 +1045,7 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); @@ -1183,7 +1211,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_LW(p, even, 0, ptr); /* get even pte */ UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ } - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC)); uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC)); uasm_i_drotr(p, even, even, @@ -1545,7 +1573,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r, { int t = scratch >= 0 ? scratch : pte; - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); uasm_i_nop(p); @@ -1875,7 +1903,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. @@ -1929,7 +1957,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); build_tlb_probe_entry(&p); - if (kernel_uses_smartmips_rixi) { + if (cpu_has_rixi) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 64a28e81906..39b89105622 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -63,11 +63,12 @@ enum opcode { insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, - insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, insn_ll, insn_lld, - insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, insn_or, insn_ori, - insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, - insn_sra, insn_srl, insn_subu, insn_sw, insn_syscall, insn_tlbp, - insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, + insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, + insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, + insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, + insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, + insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, + insn_xori, }; struct insn { @@ -115,6 +116,9 @@ static struct insn insn_table[] __uasminitdata = { { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, + { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, + { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, @@ -341,6 +345,13 @@ Ip_u2u1msbu3(op) \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_u2u1msbdu3(op) \ +Ip_u2u1msbu3(op) \ +{ \ + build_insn(buf, insn##op, b, a, d-1, c); \ +} \ +UASM_EXPORT_SYMBOL(uasm_i##op); + #define I_u1u2(op) \ Ip_u1u2(op) \ { \ @@ -394,6 +405,8 @@ I_u2u1u3(_drotr) I_u2u1u3(_drotr32) I_u3u1u2(_dsubu) I_0(_eret) +I_u2u1msbdu3(_ext) +I_u2u1msbu3(_ins) I_u1(_j) I_u1(_jal) I_u1(_jr) |