diff options
Diffstat (limited to 'arch/mips/kernel')
30 files changed, 629 insertions, 724 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 008a2fed058..92987d1bbe5 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -4,9 +4,10 @@ extra-y := head.o vmlinux.lds -obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \ - prom.o ptrace.o reset.o setup.o signal.o syscall.o \ - time.o topology.o traps.o unaligned.o watch.o vdso.o +obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \ + process.o prom.o ptrace.o reset.o setup.o signal.o \ + syscall.o time.o topology.o traps.o unaligned.o watch.o \ + vdso.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg @@ -18,12 +19,10 @@ endif obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o -obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o -obj-$(CONFIG_CSRC_GIC) += csrc-gic.o obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o @@ -68,7 +67,6 @@ obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o obj-$(CONFIG_MIPS_MSC) += irq-msc01.o obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o -obj-$(CONFIG_IRQ_GIC) += irq-gic.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_32BIT) += scall32-o32.o diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S index 290c23b5167..86495072a92 100644 --- a/arch/mips/kernel/bmips_vec.S +++ b/arch/mips/kernel/bmips_vec.S @@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end: END(bmips_reset_nmi_vec) .set pop - .previous /*********************************************************************** * CPU1 warm restart vector (used for second and subsequent boots). @@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01) jr ra END(bmips_enable_xks01) - - .previous diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c deleted file mode 100644 index 6093716980b..00000000000 --- a/arch/mips/kernel/cevt-gic.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2013 Imagination Technologies Ltd. - */ -#include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/percpu.h> -#include <linux/smp.h> -#include <linux/irq.h> - -#include <asm/time.h> -#include <asm/gic.h> -#include <asm/mips-boards/maltaint.h> - -DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); -int gic_timer_irq_installed; - - -static int gic_next_event(unsigned long delta, struct clock_event_device *evt) -{ - u64 cnt; - int res; - - cnt = gic_read_count(); - cnt += (u64)delta; - gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); - res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; - return res; -} - -void gic_set_clock_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - /* Nothing to do ... */ -} - -irqreturn_t gic_compare_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *cd; - int cpu = smp_processor_id(); - - gic_write_compare(gic_read_compare()); - cd = &per_cpu(gic_clockevent_device, cpu); - cd->event_handler(cd); - return IRQ_HANDLED; -} - -struct irqaction gic_compare_irqaction = { - .handler = gic_compare_interrupt, - .flags = IRQF_PERCPU | IRQF_TIMER, - .name = "timer", -}; - - -void gic_event_handler(struct clock_event_device *dev) -{ -} - -int gic_clockevent_init(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - unsigned int irq; - - if (!cpu_has_counter || !gic_frequency) - return -ENXIO; - - irq = MIPS_GIC_IRQ_BASE; - - cd = &per_cpu(gic_clockevent_device, cpu); - - cd->name = "MIPS GIC"; - cd->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_C3STOP; - - clockevent_set_clock(cd, gic_frequency); - - /* Calculate the min / max delta */ - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = clockevent_delta2ns(0x300, cd); - - cd->rating = 300; - cd->irq = irq; - cd->cpumask = cpumask_of(cpu); - cd->set_next_event = gic_next_event; - cd->set_mode = gic_set_clock_mode; - cd->event_handler = gic_event_handler; - - clockevents_register_device(cd); - - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002); - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK); - - if (gic_timer_irq_installed) - return 0; - - gic_timer_irq_installed = 1; - - setup_irq(irq, &gic_compare_irqaction); - irq_set_handler(irq, handle_percpu_irq); - return 0; -} diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index bc127e22fda..6acaad0480a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -11,10 +11,10 @@ #include <linux/percpu.h> #include <linux/smp.h> #include <linux/irq.h> +#include <linux/irqchip/mips-gic.h> #include <asm/time.h> #include <asm/cevt-r4k.h> -#include <asm/gic.h> static int mips_next_event(unsigned long delta, struct clock_event_device *evt) @@ -85,8 +85,8 @@ void mips_event_handler(struct clock_event_device *dev) */ static int c0_compare_int_pending(void) { -#ifdef CONFIG_IRQ_GIC - if (cpu_has_veic) +#ifdef CONFIG_MIPS_GIC + if (gic_present) return gic_get_timer_pending(); #endif return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index e6e97d2a5c9..0384b05ab5a 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -229,6 +229,7 @@ LEAF(mips_cps_core_init) nop .set push + .set mips32r2 .set mt /* Only allow 1 TC per VPE to execute... */ @@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes) nop .set push + .set mips32r2 .set mt 1: /* Enter VPE configuration state */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 94c4a0c0a57..5342674842f 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -69,6 +69,63 @@ static int __init htw_disable(char *s) __setup("nohtw", htw_disable); +static int mips_ftlb_disabled; +static int mips_has_ftlb_configured; + +static void set_ftlb_enable(struct cpuinfo_mips *c, int enable); + +static int __init ftlb_disable(char *s) +{ + unsigned int config4, mmuextdef; + + /* + * If the core hasn't done any FTLB configuration, there is nothing + * for us to do here. + */ + if (!mips_has_ftlb_configured) + return 1; + + /* Disable it in the boot cpu */ + set_ftlb_enable(&cpu_data[0], 0); + + back_to_back_c0_hazard(); + + config4 = read_c0_config4(); + + /* Check that FTLB has been disabled */ + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + /* MMUSIZEEXT == VTLB ON, FTLB OFF */ + if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) { + /* This should never happen */ + pr_warn("FTLB could not be disabled!\n"); + return 1; + } + + mips_ftlb_disabled = 1; + mips_has_ftlb_configured = 0; + + /* + * noftlb is mainly used for debug purposes so print + * an informative message instead of using pr_debug() + */ + pr_info("FTLB has been disabled\n"); + + /* + * Some of these bits are duplicated in the decode_config4. + * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case + * once FTLB has been disabled so undo what decode_config4 did. + */ + cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways * + cpu_data[0].tlbsizeftlbsets; + cpu_data[0].tlbsizeftlbsets = 0; + cpu_data[0].tlbsizeftlbways = 0; + + return 1; +} + +__setup("noftlb", ftlb_disable); + + static inline void check_errata(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -140,7 +197,7 @@ static inline unsigned long cpu_get_fpu_id(void) */ static inline int __cpu_has_fpu(void) { - return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE); + return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; } static inline unsigned long cpu_get_msa_id(void) @@ -193,6 +250,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) static char unknown_isa[] = KERN_ERR \ "Unsupported ISA type, c0.config0: %d."; +static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) +{ + + unsigned int probability = c->tlbsize / c->tlbsizevtlb; + + /* + * 0 = All TLBWR instructions go to FTLB + * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the + * FTLB and 1 goes to the VTLB. + * 2 = 7:1: As above with 7:1 ratio. + * 3 = 3:1: As above with 3:1 ratio. + * + * Use the linear midpoint as the probability threshold. + */ + if (probability >= 12) + return 1; + else if (probability >= 6) + return 2; + else + /* + * So FTLB is less than 4 times bigger than VTLB. + * A 3:1 ratio can still be useful though. + */ + return 3; +} + static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) { unsigned int config6; @@ -203,9 +286,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) case CPU_P5600: /* proAptiv & related cores use Config6 to enable the FTLB */ config6 = read_c0_config6(); + /* Clear the old probability value */ + config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); if (enable) /* Enable FTLB */ - write_c0_config6(config6 | MIPS_CONF6_FTLBEN); + write_c0_config6(config6 | + (calculate_ftlb_probability(c) + << MIPS_CONF6_FTLBP_SHIFT) + | MIPS_CONF6_FTLBEN); else /* Disable FTLB */ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); @@ -368,6 +456,8 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) ftlb_page = MIPS_CONF4_VFTLBPAGESIZE; /* fall through */ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: + if (mips_ftlb_disabled) + break; newcf4 = (config4 & ~ftlb_page) | (page_size_ftlb(mmuextdef) << MIPS_CONF4_FTLBPAGESIZE_SHIFT); @@ -387,6 +477,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >> MIPS_CONF4_FTLBWAYS_SHIFT) + 2; c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets; + mips_has_ftlb_configured = 1; break; } } @@ -401,7 +492,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) unsigned int config5; config5 = read_c0_config5(); - config5 &= ~MIPS_CONF5_UFR; + config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); write_c0_config5(config5); if (config5 & MIPS_CONF5_EVA) @@ -422,8 +513,8 @@ static void decode_configs(struct cpuinfo_mips *c) c->scache.flags = MIPS_CACHE_NOT_PRESENT; - /* Enable FTLB if present */ - set_ftlb_enable(c, 1); + /* Enable FTLB if present and not disabled */ + set_ftlb_enable(c, !mips_ftlb_disabled); ok = decode_config0(c); /* Read Config registers. */ BUG_ON(!ok); /* Arch spec violation! */ @@ -757,31 +848,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON2F: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON3A: c->cputype = CPU_LOONGSON3; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; case PRID_REV_LOONGSON3B_R1: case PRID_REV_LOONGSON3B_R2: c->cputype = CPU_LOONGSON3; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3b"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; } - set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | MIPS_CPU_32FPR; c->tlbsize = 64; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; case PRID_IMP_LOONGSON_32: /* Loongson-1 */ decode_configs(c); @@ -1024,6 +1118,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) break; } case PRID_IMP_BMIPS5000: + case PRID_IMP_BMIPS5200: c->cputype = CPU_BMIPS5000; __cpu_name[cpu] = "Broadcom BMIPS5000"; set_elf_platform(cpu, "bmips5000"); @@ -1254,6 +1349,8 @@ void cpu_probe(void) MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; + if (c->fpu_id & MIPS_FPIR_FREP) + c->options |= MIPS_CPU_FRE; } } diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index f291cf99b03..6fe7790e586 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -38,7 +38,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, kunmap_atomic(vaddr); } else { if (!kdump_buf_page) { - pr_warning("Kdump: Kdump buffer page not allocated\n"); + pr_warn("Kdump: Kdump buffer page not allocated\n"); return -EFAULT; } @@ -57,7 +57,7 @@ static int __init kdump_buf_page_init(void) kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!kdump_buf_page) { - pr_warning("Kdump: Failed to allocate kdump buffer page\n"); + pr_warn("Kdump: Failed to allocate kdump buffer page\n"); ret = -ENOMEM; } diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c deleted file mode 100644 index e0262090111..00000000000 --- a/arch/mips/kernel/csrc-gic.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/init.h> -#include <linux/time.h> - -#include <asm/gic.h> - -static cycle_t gic_hpt_read(struct clocksource *cs) -{ - return gic_read_count(); -} - -static struct clocksource gic_clocksource = { - .name = "GIC", - .read = gic_hpt_read, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -void __init gic_clocksource_init(unsigned int frequency) -{ - unsigned int config, bits; - - /* Calculate the clocksource mask. */ - GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config); - bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> - (GIC_SH_CONFIG_COUNTBITS_SHF - 2)); - - /* Set clocksource mask. */ - gic_clocksource.mask = CLOCKSOURCE_MASK(bits); - - /* Calculate a somewhat reasonable rating value. */ - gic_clocksource.rating = 200 + frequency / 10000000; - - clocksource_register_hz(&gic_clocksource, frequency); -} diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c new file mode 100644 index 00000000000..c92b15df689 --- /dev/null +++ b/arch/mips/kernel/elf.c @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/elf.h> +#include <linux/sched.h> + +enum { + FP_ERROR = -1, + FP_DOUBLE_64A = -2, +}; + +int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, + bool is_interp, struct arch_elf_state *state) +{ + struct elfhdr *ehdr = _ehdr; + struct elf_phdr *phdr = _phdr; + struct mips_elf_abiflags_v0 abiflags; + int ret; + + if (config_enabled(CONFIG_64BIT) && + (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) + return 0; + if (phdr->p_type != PT_MIPS_ABIFLAGS) + return 0; + if (phdr->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, + sizeof(abiflags)); + if (ret < 0) + return ret; + if (ret != sizeof(abiflags)) + return -EIO; + + /* Record the required FP ABIs for use by mips_check_elf */ + if (is_interp) + state->interp_fp_abi = abiflags.fp_abi; + else + state->fp_abi = abiflags.fp_abi; + + return 0; +} + +static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) +{ + /* If the ABI requirement is provided, simply return that */ + if (in_abi != -1) + return in_abi; + + /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ + if (ehdr->e_flags & EF_MIPS_FP64) + return MIPS_ABI_FP_64; + + /* Default to MIPS_ABI_FP_DOUBLE */ + return MIPS_ABI_FP_DOUBLE; +} + +int arch_check_elf(void *_ehdr, bool has_interpreter, + struct arch_elf_state *state) +{ + struct elfhdr *ehdr = _ehdr; + unsigned fp_abi, interp_fp_abi, abi0, abi1; + + /* Ignore non-O32 binaries */ + if (config_enabled(CONFIG_64BIT) && + (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) + return 0; + + fp_abi = get_fp_abi(ehdr, state->fp_abi); + + if (has_interpreter) { + interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); + + abi0 = min(fp_abi, interp_fp_abi); + abi1 = max(fp_abi, interp_fp_abi); + } else { + abi0 = abi1 = fp_abi; + } + + state->overall_abi = FP_ERROR; + + if (abi0 == abi1) { + state->overall_abi = abi0; + } else if (abi0 == MIPS_ABI_FP_ANY) { + state->overall_abi = abi1; + } else if (abi0 == MIPS_ABI_FP_DOUBLE) { + switch (abi1) { + case MIPS_ABI_FP_XX: + state->overall_abi = MIPS_ABI_FP_DOUBLE; + break; + + case MIPS_ABI_FP_64A: + state->overall_abi = FP_DOUBLE_64A; + break; + } + } else if (abi0 == MIPS_ABI_FP_SINGLE || + abi0 == MIPS_ABI_FP_SOFT) { + /* Cannot link with other ABIs */ + } else if (abi0 == MIPS_ABI_FP_OLD_64) { + switch (abi1) { + case MIPS_ABI_FP_XX: + case MIPS_ABI_FP_64: + case MIPS_ABI_FP_64A: + state->overall_abi = MIPS_ABI_FP_64; + break; + } + } else if (abi0 == MIPS_ABI_FP_XX || + abi0 == MIPS_ABI_FP_64 || + abi0 == MIPS_ABI_FP_64A) { + state->overall_abi = MIPS_ABI_FP_64; + } + + switch (state->overall_abi) { + case MIPS_ABI_FP_64: + case MIPS_ABI_FP_64A: + case FP_DOUBLE_64A: + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) + return -ELIBBAD; + break; + + case FP_ERROR: + return -ELIBBAD; + } + + return 0; +} + +void mips_set_personality_fp(struct arch_elf_state *state) +{ + if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { + /* + * Use hybrid FPRs for all code which can correctly execute + * with that mode. + */ + switch (state->overall_abi) { + case MIPS_ABI_FP_DOUBLE: + case MIPS_ABI_FP_SINGLE: + case MIPS_ABI_FP_SOFT: + case MIPS_ABI_FP_XX: + case MIPS_ABI_FP_ANY: + /* FR=1, FRE=1 */ + clear_thread_flag(TIF_32BIT_FPREGS); + set_thread_flag(TIF_HYBRID_FPREGS); + return; + } + } + + switch (state->overall_abi) { + case MIPS_ABI_FP_DOUBLE: + case MIPS_ABI_FP_SINGLE: + case MIPS_ABI_FP_SOFT: + /* FR=0 */ + set_thread_flag(TIF_32BIT_FPREGS); + clear_thread_flag(TIF_HYBRID_FPREGS); + break; + + case FP_DOUBLE_64A: + /* FR=1, FRE=1 */ + clear_thread_flag(TIF_32BIT_FPREGS); + set_thread_flag(TIF_HYBRID_FPREGS); + break; + + case MIPS_ABI_FP_64: + case MIPS_ABI_FP_64A: + /* FR=1, FRE=0 */ + clear_thread_flag(TIF_32BIT_FPREGS); + clear_thread_flag(TIF_HYBRID_FPREGS); + break; + + case MIPS_ABI_FP_XX: + case MIPS_ABI_FP_ANY: + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) + set_thread_flag(TIF_32BIT_FPREGS); + else + clear_thread_flag(TIF_32BIT_FPREGS); + + clear_thread_flag(TIF_HYBRID_FPREGS); + break; + + default: + case FP_ERROR: + BUG(); + } +} diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 50b364897dd..a74ec3ae557 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/syscore_ops.h> @@ -308,6 +309,19 @@ static struct resource pic2_io_resource = { .flags = IORESOURCE_BUSY }; +static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq); + irq_set_probe(virq); + return 0; +} + +static struct irq_domain_ops i8259A_ops = { + .map = i8259A_irq_domain_map, + .xlate = irq_domain_xlate_onecell, +}; + /* * On systems with i8259-style interrupt controllers we assume for * driver compatibility reasons interrupts 0 - 15 to be the i8259 @@ -315,17 +329,17 @@ static struct resource pic2_io_resource = { */ void __init init_i8259_irqs(void) { - int i; + struct irq_domain *domain; insert_resource(&ioport_resource, &pic1_io_resource); insert_resource(&ioport_resource, &pic2_io_resource); init_8259A(0); - for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { - irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); - irq_set_probe(i); - } + domain = irq_domain_add_legacy(NULL, 16, I8259A_IRQ_BASE, 0, + &i8259A_ops, NULL); + if (!domain) + panic("Failed to add i8259 IRQ domain"); setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); } diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c deleted file mode 100644 index 9e9d8b9a5b9..00000000000 --- a/arch/mips/kernel/irq-gic.c +++ /dev/null @@ -1,402 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/bitmap.h> -#include <linux/init.h> -#include <linux/smp.h> -#include <linux/irq.h> -#include <linux/clocksource.h> - -#include <asm/io.h> -#include <asm/gic.h> -#include <asm/setup.h> -#include <asm/traps.h> -#include <linux/hardirq.h> -#include <asm-generic/bitops/find.h> - -unsigned int gic_frequency; -unsigned int gic_present; -unsigned long _gic_base; -unsigned int gic_irq_base; -unsigned int gic_irq_flags[GIC_NUM_INTRS]; - -/* The index into this array is the vector # of the interrupt. */ -struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS]; - -struct gic_pcpu_mask { - DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS); -}; - -struct gic_pending_regs { - DECLARE_BITMAP(pending, GIC_NUM_INTRS); -}; - -struct gic_intrmask_regs { - DECLARE_BITMAP(intrmask, GIC_NUM_INTRS); -}; - -static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; -static struct gic_pending_regs pending_regs[NR_CPUS]; -static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; - -#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC) -cycle_t gic_read_count(void) -{ - unsigned int hi, hi2, lo; - - do { - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); - } while (hi2 != hi); - - return (((cycle_t) hi) << 32) + lo; -} - -void gic_write_compare(cycle_t cnt) -{ - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); -} - -void gic_write_cpu_compare(cycle_t cnt, int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - - local_irq_restore(flags); -} - -cycle_t gic_read_compare(void) -{ - unsigned int hi, lo; - - GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi); - GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo); - - return (((cycle_t) hi) << 32) + lo; -} -#endif - -unsigned int gic_get_timer_pending(void) -{ - unsigned int vpe_pending; - - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0); - GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending); - return (vpe_pending & GIC_VPE_PEND_TIMER_MSK); -} - -void gic_bind_eic_interrupt(int irq, int set) -{ - /* Convert irq vector # to hw int # */ - irq -= GIC_PIN_TO_VEC_OFFSET; - - /* Set irq to use shadow set */ - GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set); -} - -void gic_send_ipi(unsigned int intr) -{ - GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr); -} - -static void gic_eic_irq_dispatch(void) -{ - unsigned int cause = read_c0_cause(); - int irq; - - irq = (cause & ST0_IM) >> STATUSB_IP2; - if (irq == 0) - irq = -1; - - if (irq >= 0) - do_IRQ(gic_irq_base + irq); - else - spurious_interrupt(); -} - -static void __init vpe_local_setup(unsigned int numvpes) -{ - unsigned long timer_intr = GIC_INT_TMR; - unsigned long perf_intr = GIC_INT_PERFCTR; - unsigned int vpe_ctl; - int i; - - if (cpu_has_veic) { - /* - * GIC timer interrupt -> CPU HW Int X (vector X+2) -> - * map to pin X+2-1 (since GIC adds 1) - */ - timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); - /* - * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) -> - * map to pin X+2-1 (since GIC adds 1) - */ - perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); - } - - /* - * Setup the default performance counter timer interrupts - * for all VPEs - */ - for (i = 0; i < numvpes; i++) { - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); - - /* Are Interrupts locally routable? */ - GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl); - if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK) - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), - GIC_MAP_TO_PIN_MSK | timer_intr); - if (cpu_has_veic) { - set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET, - gic_eic_irq_dispatch); - gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK; - } - - if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK) - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), - GIC_MAP_TO_PIN_MSK | perf_intr); - if (cpu_has_veic) { - set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch); - gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK; - } - } -} - -unsigned int gic_compare_int(void) -{ - unsigned int pending; - - GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending); - if (pending & GIC_VPE_PEND_CMP_MSK) - return 1; - else - return 0; -} - -void gic_get_int_mask(unsigned long *dst, const unsigned long *src) -{ - unsigned int i; - unsigned long *pending, *intrmask, *pcpu_mask; - unsigned long *pending_abs, *intrmask_abs; - - /* Get per-cpu bitmaps */ - pending = pending_regs[smp_processor_id()].pending; - intrmask = intrmask_regs[smp_processor_id()].intrmask; - pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; - - pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED, - GIC_SH_PEND_31_0_OFS); - intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED, - GIC_SH_MASK_31_0_OFS); - - for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) { - GICREAD(*pending_abs, pending[i]); - GICREAD(*intrmask_abs, intrmask[i]); - pending_abs++; - intrmask_abs++; - } - - bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS); - bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS); - bitmap_and(dst, src, pending, GIC_NUM_INTRS); -} - -unsigned int gic_get_int(void) -{ - DECLARE_BITMAP(interrupts, GIC_NUM_INTRS); - - bitmap_fill(interrupts, GIC_NUM_INTRS); - gic_get_int_mask(interrupts, interrupts); - - return find_first_bit(interrupts, GIC_NUM_INTRS); -} - -static void gic_mask_irq(struct irq_data *d) -{ - GIC_CLR_INTR_MASK(d->irq - gic_irq_base); -} - -static void gic_unmask_irq(struct irq_data *d) -{ - GIC_SET_INTR_MASK(d->irq - gic_irq_base); -} - -#ifdef CONFIG_SMP -static DEFINE_SPINLOCK(gic_lock); - -static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, - bool force) -{ - unsigned int irq = (d->irq - gic_irq_base); - cpumask_t tmp = CPU_MASK_NONE; - unsigned long flags; - int i; - - cpumask_and(&tmp, cpumask, cpu_online_mask); - if (cpus_empty(tmp)) - return -1; - - /* Assumption : cpumask refers to a single CPU */ - spin_lock_irqsave(&gic_lock, flags); - - /* Re-route this IRQ */ - GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); - - /* Update the pcpu_masks */ - for (i = 0; i < NR_CPUS; i++) - clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); - - cpumask_copy(d->affinity, cpumask); - spin_unlock_irqrestore(&gic_lock, flags); - - return IRQ_SET_MASK_OK_NOCOPY; -} -#endif - -static struct irq_chip gic_irq_controller = { - .name = "MIPS GIC", - .irq_ack = gic_irq_ack, - .irq_mask = gic_mask_irq, - .irq_mask_ack = gic_mask_irq, - .irq_unmask = gic_unmask_irq, - .irq_eoi = gic_finish_irq, -#ifdef CONFIG_SMP - .irq_set_affinity = gic_set_affinity, -#endif -}; - -static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, - unsigned int pin, unsigned int polarity, unsigned int trigtype, - unsigned int flags) -{ - struct gic_shared_intr_map *map_ptr; - - /* Setup Intr to Pin mapping */ - if (pin & GIC_MAP_TO_NMI_MSK) { - int i; - - GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); - /* FIXME: hack to route NMI to all cpu's */ - for (i = 0; i < NR_CPUS; i += 32) { - GICWRITE(GIC_REG_ADDR(SHARED, - GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)), - 0xffffffff); - } - } else { - GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), - GIC_MAP_TO_PIN_MSK | pin); - /* Setup Intr to CPU mapping */ - GIC_SH_MAP_TO_VPE_SMASK(intr, cpu); - if (cpu_has_veic) { - set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET, - gic_eic_irq_dispatch); - map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET]; - if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR) - BUG(); - map_ptr->intr_list[map_ptr->num_shared_intr++] = intr; - } - } - - /* Setup Intr Polarity */ - GIC_SET_POLARITY(intr, polarity); - - /* Setup Intr Trigger Type */ - GIC_SET_TRIGGER(intr, trigtype); - - /* Init Intr Masks */ - GIC_CLR_INTR_MASK(intr); - - /* Initialise per-cpu Interrupt software masks */ - set_bit(intr, pcpu_masks[cpu].pcpu_mask); - - if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0)) - GIC_SET_INTR_MASK(intr); - if (trigtype == GIC_TRIG_EDGE) - gic_irq_flags[intr] |= GIC_TRIG_EDGE; -} - -static void __init gic_basic_init(int numintrs, int numvpes, - struct gic_intr_map *intrmap, int mapsize) -{ - unsigned int i, cpu; - unsigned int pin_offset = 0; - - board_bind_eic_interrupt = &gic_bind_eic_interrupt; - - /* Setup defaults */ - for (i = 0; i < numintrs; i++) { - GIC_SET_POLARITY(i, GIC_POL_POS); - GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL); - GIC_CLR_INTR_MASK(i); - if (i < GIC_NUM_INTRS) { - gic_irq_flags[i] = 0; - gic_shared_intr_map[i].num_shared_intr = 0; - gic_shared_intr_map[i].local_intr_mask = 0; - } - } - - /* - * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract - * one because the GIC will add one (since 0=no intr). - */ - if (cpu_has_veic) - pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); - - /* Setup specifics */ - for (i = 0; i < mapsize; i++) { - cpu = intrmap[i].cpunum; - if (cpu == GIC_UNUSED) - continue; - gic_setup_intr(i, - intrmap[i].cpunum, - intrmap[i].pin + pin_offset, - intrmap[i].polarity, - intrmap[i].trigtype, - intrmap[i].flags); - } - - vpe_local_setup(numvpes); -} - -void __init gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - struct gic_intr_map *intr_map, unsigned int intr_map_size, - unsigned int irqbase) -{ - unsigned int gicconfig; - int numvpes, numintrs; - - _gic_base = (unsigned long) ioremap_nocache(gic_base_addr, - gic_addrspace_size); - gic_irq_base = irqbase; - - GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); - numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> - GIC_SH_CONFIG_NUMINTRS_SHF; - numintrs = ((numintrs + 1) * 8); - - numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> - GIC_SH_CONFIG_NUMVPES_SHF; - numvpes = numvpes + 1; - - gic_basic_init(numintrs, numvpes, intr_map, intr_map_size); - - gic_platform_init(numintrs, &gic_irq_controller); -} diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index e498f2b3646..590c2c980fd 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -36,6 +36,7 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> +#include <asm/setup.h> static inline void unmask_mips_irq(struct irq_data *d) { @@ -94,28 +95,24 @@ static struct irq_chip mips_mt_cpu_irq_controller = { .irq_eoi = unmask_mips_irq, }; -void __init mips_cpu_irq_init(void) +asmlinkage void __weak plat_irq_dispatch(void) { - int irq_base = MIPS_CPU_IRQ_BASE; - int i; + unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM; + int irq; - /* Mask interrupts. */ - clear_c0_status(ST0_IM); - clear_c0_cause(CAUSEF_IP); - - /* Software interrupts are used for MT/CMT IPI */ - for (i = irq_base; i < irq_base + 2; i++) - irq_set_chip_and_handler(i, cpu_has_mipsmt ? - &mips_mt_cpu_irq_controller : - &mips_cpu_irq_controller, - handle_percpu_irq); + if (!pending) { + spurious_interrupt(); + return; + } - for (i = irq_base + 2; i < irq_base + 8; i++) - irq_set_chip_and_handler(i, &mips_cpu_irq_controller, - handle_percpu_irq); + pending >>= CAUSEB_IP; + while (pending) { + irq = fls(pending) - 1; + do_IRQ(MIPS_CPU_IRQ_BASE + irq); + pending &= ~BIT(irq); + } } -#ifdef CONFIG_IRQ_DOMAIN static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { @@ -128,6 +125,9 @@ static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, chip = &mips_cpu_irq_controller; } + if (cpu_has_vint) + set_vi_handler(hw, plat_irq_dispatch); + irq_set_chip_and_handler(irq, chip, handle_percpu_irq); return 0; @@ -138,8 +138,7 @@ static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; -int __init mips_cpu_intc_init(struct device_node *of_node, - struct device_node *parent) +static void __init __mips_cpu_irq_init(struct device_node *of_node) { struct irq_domain *domain; @@ -151,7 +150,16 @@ int __init mips_cpu_intc_init(struct device_node *of_node, &mips_cpu_intc_irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain for MIPS CPU"); +} +void __init mips_cpu_irq_init(void) +{ + __mips_cpu_irq_init(NULL); +} + +int __init mips_cpu_irq_of_init(struct device_node *of_node, + struct device_node *parent) +{ + __mips_cpu_irq_init(of_node); return 0; } -#endif /* CONFIG_IRQ_DOMAIN */ diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 6001610cfe5..dda800e9e73 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -18,31 +18,53 @@ #ifdef HAVE_JUMP_LABEL -#define J_RANGE_MASK ((1ul << 28) - 1) +/* + * Define parameters for the standard MIPS and the microMIPS jump + * instruction encoding respectively: + * + * - the ISA bit of the target, either 0 or 1 respectively, + * + * - the amount the jump target address is shifted right to fit in the + * immediate field of the machine instruction, either 2 or 1, + * + * - the mask determining the size of the jump region relative to the + * delay-slot instruction, either 256MB or 128MB, + * + * - the jump target alignment, either 4 or 2 bytes. + */ +#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS) +#define J_RANGE_SHIFT (2 - J_ISA_BIT) +#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1) +#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1) void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { + union mips_instruction *insn_p; union mips_instruction insn; - union mips_instruction *insn_p = - (union mips_instruction *)(unsigned long)e->code; - /* Jump only works within a 256MB aligned region. */ - BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK)); + insn_p = (union mips_instruction *)msk_isa16_mode(e->code); + + /* Jump only works within an aligned region its delay slot is in. */ + BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); - /* Target must have 4 byte alignment. */ - BUG_ON((e->target & 3) != 0); + /* Target must have the right alignment and ISA must be preserved. */ + BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); if (type == JUMP_LABEL_ENABLE) { - insn.j_format.opcode = j_op; - insn.j_format.target = (e->target & J_RANGE_MASK) >> 2; + insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; + insn.j_format.target = e->target >> J_RANGE_SHIFT; } else { insn.word = 0; /* nop */ } get_online_cpus(); mutex_lock(&text_mutex); - *insn_p = insn; + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { + insn_p->halfword[0] = insn.word >> 16; + insn_p->halfword[1] = insn.word; + } else + *insn_p = insn; flush_icache_range((unsigned long)insn_p, (unsigned long)insn_p + sizeof(*insn_p)); diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index f76f7a08412..85bbe9b9675 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -16,7 +16,7 @@ void __iomem *mips_cm_base; void __iomem *mips_cm_l2sync_base; -phys_t __mips_cm_phys_base(void) +phys_addr_t __mips_cm_phys_base(void) { u32 config3 = read_c0_config3(); u32 cmgcr; @@ -30,10 +30,10 @@ phys_t __mips_cm_phys_base(void) return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); } -phys_t mips_cm_phys_base(void) +phys_addr_t mips_cm_phys_base(void) __attribute__((weak, alias("__mips_cm_phys_base"))); -phys_t __mips_cm_l2sync_phys_base(void) +phys_addr_t __mips_cm_l2sync_phys_base(void) { u32 base_reg; @@ -49,13 +49,13 @@ phys_t __mips_cm_l2sync_phys_base(void) return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; } -phys_t mips_cm_l2sync_phys_base(void) +phys_addr_t mips_cm_l2sync_phys_base(void) __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); static void mips_cm_probe_l2sync(void) { unsigned major_rev; - phys_t addr; + phys_addr_t addr; /* L2-only sync was introduced with CM major revision 6 */ major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >> @@ -78,7 +78,7 @@ static void mips_cm_probe_l2sync(void) int mips_cm_probe(void) { - phys_t addr; + phys_addr_t addr; u32 base_reg; addr = mips_cm_phys_base(); diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index ba473608a34..11964501c4b 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -21,7 +21,7 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); -phys_t __weak mips_cpc_phys_base(void) +phys_addr_t __weak mips_cpc_phys_base(void) { u32 cpc_base; @@ -44,7 +44,7 @@ phys_t __weak mips_cpc_phys_base(void) int mips_cpc_probe(void) { - phys_t addr; + phys_addr_t addr; unsigned cpu; for_each_possible_cpu(cpu) diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 2607c3a4ff7..17eaf0cf760 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -24,9 +24,7 @@ extern long __strncpy_from_user_nocheck_asm(char *__to, const char *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char *__from, long __len); -extern long __strlen_kernel_nocheck_asm(const char *s); extern long __strlen_kernel_asm(const char *s); -extern long __strlen_user_nocheck_asm(const char *s); extern long __strlen_user_asm(const char *s); extern long __strnlen_kernel_nocheck_asm(const char *s); extern long __strnlen_kernel_asm(const char *s); @@ -62,9 +60,7 @@ EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_kernel_asm); EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_user_asm); -EXPORT_SYMBOL(__strlen_kernel_nocheck_asm); EXPORT_SYMBOL(__strlen_kernel_asm); -EXPORT_SYMBOL(__strlen_user_nocheck_asm); EXPORT_SYMBOL(__strlen_user_asm); EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm); EXPORT_SYMBOL(__strnlen_kernel_asm); diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index a8f9cdc6f8b..9466184d003 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -561,8 +561,8 @@ static int mipspmu_get_irq(void) IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD, "mips_perf_pmu", NULL); if (err) { - pr_warning("Unable to request IRQ%d for MIPS " - "performance counters!\n", mipspmu.irq); + pr_warn("Unable to request IRQ%d for MIPS performance counters!\n", + mipspmu.irq); } } else if (cp0_perfcount_irq < 0) { /* @@ -572,8 +572,7 @@ static int mipspmu_get_irq(void) perf_irq = mipsxx_pmu_handle_shared_irq; err = 0; } else { - pr_warning("The platform hasn't properly defined its " - "interrupt controller.\n"); + pr_warn("The platform hasn't properly defined its interrupt controller\n"); err = -ENOENT; } @@ -1614,22 +1613,13 @@ init_hw_perf_events(void) counters = counters_total_to_per_cpu(counters); #endif -#ifdef MSC01E_INT_BASE - if (cpu_has_veic) { - /* - * Using platform specific interrupt controller defines. - */ - irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; - } else { -#endif - if ((cp0_perfcount_irq >= 0) && - (cp0_compare_irq != cp0_perfcount_irq)) - irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; - else - irq = -1; -#ifdef MSC01E_INT_BASE - } -#endif + if (get_c0_perfcount_int) + irq = get_c0_perfcount_int(); + else if ((cp0_perfcount_irq >= 0) && + (cp0_compare_irq != cp0_perfcount_irq)) + irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; + else + irq = -1; mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 636b0745d7c..eb76434828e 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -42,6 +42,7 @@ #include <asm/isadep.h> #include <asm/inst.h> #include <asm/stacktrace.h> +#include <asm/irq_regs.h> #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) @@ -187,21 +188,21 @@ static inline int is_ra_save_ins(union mips_instruction *ip) */ if (mm_insn_16bit(ip->halfword[0])) { mmi.word = (ip->halfword[0] << 16); - return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && - mmi.mm16_r5_format.rt == 31) || - (mmi.mm16_m_format.opcode == mm_pool16c_op && - mmi.mm16_m_format.func == mm_swm16_op)); + return (mmi.mm16_r5_format.opcode == mm_swsp16_op && + mmi.mm16_r5_format.rt == 31) || + (mmi.mm16_m_format.opcode == mm_pool16c_op && + mmi.mm16_m_format.func == mm_swm16_op); } else { mmi.halfword[0] = ip->halfword[1]; mmi.halfword[1] = ip->halfword[0]; - return ((mmi.mm_m_format.opcode == mm_pool32b_op && - mmi.mm_m_format.rd > 9 && - mmi.mm_m_format.base == 29 && - mmi.mm_m_format.func == mm_swm32_func) || - (mmi.i_format.opcode == mm_sw32_op && - mmi.i_format.rs == 29 && - mmi.i_format.rt == 31)); + return (mmi.mm_m_format.opcode == mm_pool32b_op && + mmi.mm_m_format.rd > 9 && + mmi.mm_m_format.base == 29 && + mmi.mm_m_format.func == mm_swm32_func) || + (mmi.i_format.opcode == mm_sw32_op && + mmi.i_format.rs == 29 && + mmi.i_format.rt == 31); } #else /* sw / sd $ra, offset($sp) */ @@ -233,7 +234,7 @@ static inline int is_jump_ins(union mips_instruction *ip) if (ip->r_format.opcode != mm_pool32a_op || ip->r_format.func != mm_pool32axf_op) return 0; - return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); + return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op; #else if (ip->j_format.opcode == j_op) return 1; @@ -260,13 +261,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip) union mips_instruction mmi; mmi.word = (ip->halfword[0] << 16); - return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && - mmi.mm16_r3_format.simmediate && mm_addiusp_func) || - (mmi.mm16_r5_format.opcode == mm_pool16d_op && - mmi.mm16_r5_format.rt == 29)); + return (mmi.mm16_r3_format.opcode == mm_pool16d_op && + mmi.mm16_r3_format.simmediate && mm_addiusp_func) || + (mmi.mm16_r5_format.opcode == mm_pool16d_op && + mmi.mm16_r5_format.rt == 29); } - return (ip->mm_i_format.opcode == mm_addiu32_op && - ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); + return ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; #else /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) @@ -532,3 +533,20 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ALMASK; } + +static void arch_dump_stack(void *info) +{ + struct pt_regs *regs; + + regs = get_irq_regs(); + + if (regs) + show_regs(regs); + + dump_stack(); +} + +void arch_trigger_all_cpu_backtrace(bool include_self) +{ + smp_call_function(arch_dump_stack, NULL, 1); +} diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 5d39bb85bf3..452d4350ce4 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -16,6 +16,7 @@ #include <linux/debugfs.h> #include <linux/of.h> #include <linux/of_fdt.h> +#include <linux/of_platform.h> #include <asm/page.h> #include <asm/prom.h> @@ -54,4 +55,21 @@ void __init __dt_setup_arch(void *bph) mips_set_machine_name(of_flat_dt_get_machine_name()); } + +int __init __dt_register_buses(const char *bus0, const char *bus1) +{ + static struct of_device_id of_ids[3]; + + if (!of_have_populated_dt()) + panic("device tree not present"); + + strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible)); + strlcpy(of_ids[1].compatible, bus1, sizeof(of_ids[1].compatible)); + + if (of_platform_populate(NULL, of_ids, NULL, NULL)) + panic("failed to populate DT"); + + return 0; +} + #endif diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 31b1b763cb2..c5c4fd54d79 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep) int ret = 0; if (index >= RTLX_CHANNELS) { - pr_debug(KERN_DEBUG "rtlx_open index out of range\n"); + pr_debug("rtlx_open index out of range\n"); return -ENOSYS; } if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { - pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index); + pr_debug("rtlx_open channel %d already opened\n", index); ret = -EBUSY; goto out_fail; } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index d21ec57b6e9..05892904136 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -82,14 +82,14 @@ static struct resource data_resource = { .name = "Kernel data", }; static void *detect_magic __initdata = detect_memory_region; -void __init add_memory_region(phys_t start, phys_t size, long type) +void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) { int x = boot_mem_map.nr_map; int i; /* Sanity check */ if (start + size < start) { - pr_warning("Trying to add an invalid memory region, skipped\n"); + pr_warn("Trying to add an invalid memory region, skipped\n"); return; } @@ -127,10 +127,10 @@ void __init add_memory_region(phys_t start, phys_t size, long type) boot_mem_map.nr_map++; } -void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max) +void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) { void *dm = &detect_magic; - phys_t size; + phys_addr_t size; for (size = sz_min; size < sz_max; size <<= 1) { if (!memcmp(dm, dm + size, sizeof(detect_magic))) @@ -485,7 +485,7 @@ static void __init bootmem_init(void) * NOTE: historically plat_mem_setup did the entire platform initialization. * This was rather impractical because it meant plat_mem_setup had to * get away without any kind of memory allocator. To keep old code from - * breaking plat_setup was just renamed to plat_setup and a second platform + * breaking plat_setup was just renamed to plat_mem_setup and a second platform * initialization hook for anything else was introduced. */ @@ -493,7 +493,7 @@ static int usermem __initdata; static int __init early_parse_mem(char *p) { - unsigned long start, size; + phys_addr_t start, size; /* * If a user specifies memory size, we @@ -545,9 +545,9 @@ static int __init early_parse_elfcorehdr(char *p) early_param("elfcorehdr", early_parse_elfcorehdr); #endif -static void __init arch_mem_addpart(phys_t mem, phys_t end, int type) +static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type) { - phys_t size; + phys_addr_t size; int i; size = end - mem; diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 1d57605e461..545bf11bd2e 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -530,7 +530,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) struct mips_abi *abi = current->thread.abi; #ifdef CONFIG_CPU_MICROMIPS void *vdso; - unsigned int tmp = (unsigned int)current->mm->context.vdso; + unsigned long tmp = (unsigned long)current->mm->context.vdso; set_isa16_mode(tmp); vdso = (void *)tmp; @@ -658,13 +658,13 @@ static int signal_setup(void) save_fp_context = _save_fp_context; restore_fp_context = _restore_fp_context; } else { - save_fp_context = copy_fp_from_sigcontext; - restore_fp_context = copy_fp_to_sigcontext; + save_fp_context = copy_fp_to_sigcontext; + restore_fp_context = copy_fp_from_sigcontext; } #endif /* CONFIG_SMP */ #else - save_fp_context = copy_fp_from_sigcontext;; - restore_fp_context = copy_fp_to_sigcontext; + save_fp_context = copy_fp_to_sigcontext; + restore_fp_context = copy_fp_from_sigcontext; #endif return 0; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 06bb5ed6d80..b8bd9340c9c 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -35,6 +35,7 @@ #include <asm/bmips.h> #include <asm/traps.h> #include <asm/barrier.h> +#include <asm/cpu-features.h> static int __maybe_unused max_cpus = 1; @@ -42,6 +43,12 @@ static int __maybe_unused max_cpus = 1; int bmips_smp_enabled = 1; int bmips_cpu_offset; cpumask_t bmips_booted_mask; +unsigned long bmips_tp1_irqs = IE_IRQ1; + +#define RESET_FROM_KSEG0 0x80080800 +#define RESET_FROM_KSEG1 0xa0080800 + +static void bmips_set_reset_vec(int cpu, u32 val); #ifdef CONFIG_SMP @@ -194,6 +201,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) pr_info("SMP: Booting CPU%d...\n", cpu); if (cpumask_test_cpu(cpu, &bmips_booted_mask)) { + /* kseg1 might not exist if this CPU enabled XKS01 */ + bmips_set_reset_vec(cpu, RESET_FROM_KSEG0); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: @@ -203,8 +213,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) bmips5000_send_ipi_single(cpu, 0); break; } - } - else { + } else { + bmips_set_reset_vec(cpu, RESET_FROM_KSEG1); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: @@ -213,17 +224,7 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) set_c0_brcm_cmt_ctrl(0x01); break; case CPU_BMIPS5000: - if (cpu & 0x01) - write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); - else { - /* - * core N thread 0 was already booted; just - * pulse the NMI line - */ - bmips_write_zscm_reg(0x210, 0xc0000000); - udelay(10); - bmips_write_zscm_reg(0x210, 0x00); - } + write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); break; } cpumask_set_cpu(cpu, &bmips_booted_mask); @@ -235,31 +236,12 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { - /* move NMI vector to kseg0, in case XKS01 is enabled */ - - void __iomem *cbr; - unsigned long old_vec; - unsigned long relo_vector; - int boot_cpu; - switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: - cbr = BMIPS_GET_CBR(); - - boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); - relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 : - BMIPS_RELO_VECTOR_CONTROL_1; - - old_vec = __raw_readl(cbr + relo_vector); - __raw_writel(old_vec & ~0x20000000, cbr + relo_vector); - clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); break; case CPU_BMIPS5000: - write_c0_brcm_bootvec(read_c0_brcm_bootvec() & - (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); - write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); break; } @@ -276,7 +258,7 @@ static void bmips_smp_finish(void) write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); irq_enable_hazard(); - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); + set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE); irq_enable_hazard(); } @@ -381,6 +363,7 @@ static int bmips_cpu_disable(void) set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); + clear_c0_status(IE_IRQ5); local_flush_tlb_all(); local_flush_icache_range(0, ~0); @@ -405,7 +388,8 @@ void __ref play_dead(void) * IRQ handlers; this clears ST0_IE and returns immediately. */ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); - change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, + change_c0_status( + IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); irq_disable_hazard(); @@ -473,10 +457,61 @@ static inline void bmips_nmi_handler_setup(void) &bmips_smp_int_vec_end); } +struct reset_vec_info { + int cpu; + u32 val; +}; + +static void bmips_set_reset_vec_remote(void *vinfo) +{ + struct reset_vec_info *info = vinfo; + int shift = info->cpu & 0x01 ? 16 : 0; + u32 mask = ~(0xffff << shift), val = info->val >> 16; + + preempt_disable(); + if (smp_processor_id() > 0) { + smp_call_function_single(0, &bmips_set_reset_vec_remote, + info, 1); + } else { + if (info->cpu & 0x02) { + /* BMIPS5200 "should" use mask/shift, but it's buggy */ + bmips_write_zscm_reg(0xa0, (val << 16) | val); + bmips_read_zscm_reg(0xa0); + } else { + write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) | + (val << shift)); + } + } + preempt_enable(); +} + +static void bmips_set_reset_vec(int cpu, u32 val) +{ + struct reset_vec_info info; + + if (current_cpu_type() == CPU_BMIPS5000) { + /* this needs to run from CPU0 (which is always online) */ + info.cpu = cpu; + info.val = val; + bmips_set_reset_vec_remote(&info); + } else { + void __iomem *cbr = BMIPS_GET_CBR(); + + if (cpu == 0) + __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0); + else { + if (current_cpu_type() != CPU_BMIPS4380) + return; + __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1); + } + } + __sync(); + back_to_back_c0_hazard(); +} + void bmips_ebase_setup(void) { unsigned long new_ebase = ebase; - void __iomem __maybe_unused *cbr; BUG_ON(ebase != CKSEG0); @@ -496,15 +531,14 @@ void bmips_ebase_setup(void) &bmips_smp_int_vec, 0x80); __sync(); return; + case CPU_BMIPS3300: case CPU_BMIPS4380: /* * 0x8000_0000: reset/NMI (initially in kseg1) * 0x8000_0400: normal vectors */ new_ebase = 0x80000400; - cbr = BMIPS_GET_CBR(); - __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); - __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); + bmips_set_reset_vec(0, RESET_FROM_KSEG0); break; case CPU_BMIPS5000: /* @@ -512,10 +546,8 @@ void bmips_ebase_setup(void) * 0x8000_1000: normal vectors */ new_ebase = 0x80001000; - write_c0_brcm_bootvec(0xa0088008); + bmips_set_reset_vec(0, RESET_FROM_KSEG0); write_c0_ebase(new_ebase); - if (max_cpus > 2) - bmips_write_zscm_reg(0xa0, 0xa008a008); break; default: return; diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index fc8a5155342..1e0a93c5a3e 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -24,6 +24,7 @@ #include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/compiler.h> +#include <linux/irqchip/mips-gic.h> #include <linux/atomic.h> #include <asm/cacheflush.h> @@ -37,7 +38,6 @@ #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> #include <asm/amon.h> -#include <asm/gic.h> static void cmp_init_secondary(void) { diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index e6e16a1d4ad..bed7590e475 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -9,13 +9,13 @@ */ #include <linux/io.h> +#include <linux/irqchip/mips-gic.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/types.h> #include <asm/bcache.h> -#include <asm/gic.h> #include <asm/mips-cm.h> #include <asm/mips-cpc.h> #include <asm/mips_mt.h> @@ -273,8 +273,8 @@ static void cps_init_secondary(void) if (cpu_has_mipsmt) dmt(); - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | - STATUSF_IP6 | STATUSF_IP7); + change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | + STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); } static void cps_smp_finish(void) diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c index 3b21a96d1cc..5f0ab5bcd01 100644 --- a/arch/mips/kernel/smp-gic.c +++ b/arch/mips/kernel/smp-gic.c @@ -12,9 +12,9 @@ * option) any later version. */ +#include <linux/irqchip/mips-gic.h> #include <linux/printk.h> -#include <asm/gic.h> #include <asm/mips-cpc.h> #include <asm/smp-ops.h> diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 21f23add04f..ad86951b73b 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -21,6 +21,7 @@ #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/interrupt.h> +#include <linux/irqchip/mips-gic.h> #include <linux/compiler.h> #include <linux/smp.h> @@ -34,7 +35,6 @@ #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> -#include <asm/gic.h> static void __init smvp_copy_vpe_config(void) { @@ -119,7 +119,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action) unsigned long flags; int vpflags; -#ifdef CONFIG_IRQ_GIC +#ifdef CONFIG_MIPS_GIC if (gic_present) { gic_send_ipi_single(cpu, action); return; @@ -158,7 +158,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) static void vsmp_init_secondary(void) { -#ifdef CONFIG_IRQ_GIC +#ifdef CONFIG_MIPS_GIC /* This is Malta specific: IPI,performance and timer interrupts */ if (gic_present) change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 4a4f9dda565..604b558809c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -117,6 +117,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) "2: sc %[tmp], (%[addr]) \n" " beqzl %[tmp], 1b \n" "3: \n" + " .insn \n" " .section .fixup,\"ax\" \n" "4: li %[err], %[efault] \n" " j 3b \n" @@ -142,6 +143,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) "2: sc %[tmp], (%[addr]) \n" " bnez %[tmp], 4f \n" "3: \n" + " .insn \n" " .subsection 2 \n" "4: b 1b \n" " .previous \n" diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 22b19c27504..ad3d2031c32 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -724,6 +724,50 @@ int process_fpemu_return(int sig, void __user *fault_addr) } } +static int simulate_fp(struct pt_regs *regs, unsigned int opcode, + unsigned long old_epc, unsigned long old_ra) +{ + union mips_instruction inst = { .word = opcode }; + void __user *fault_addr = NULL; + int sig; + + /* If it's obviously not an FP instruction, skip it */ + switch (inst.i_format.opcode) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case ldc1_op: + case swc1_op: + case sdc1_op: + break; + + default: + return -1; + } + + /* + * do_ri skipped over the instruction via compute_return_epc, undo + * that for the FPU emulator. + */ + regs->cp0_epc = old_epc; + regs->regs[31] = old_ra; + + /* Save the FP context to struct thread_struct */ + lose_fpu(1); + + /* Run the emulator */ + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + + /* If something went wrong, signal */ + process_fpemu_return(sig, fault_addr); + + /* Restore the hardware register state */ + own_fpu(1); + + return 0; +} + /* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */ @@ -1016,6 +1060,9 @@ asmlinkage void do_ri(struct pt_regs *regs) if (status < 0) status = simulate_sync(regs, opcode); + + if (status < 0) + status = simulate_fp(regs, opcode, old_epc, old31); } if (status < 0) @@ -1380,12 +1427,19 @@ asmlinkage void do_mcheck(struct pt_regs *regs) show_regs(regs); if (multi_match) { - printk("Index : %0x\n", read_c0_index()); - printk("Pagemask: %0x\n", read_c0_pagemask()); - printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); - printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); - printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); - printk("\n"); + pr_err("Index : %0x\n", read_c0_index()); + pr_err("Pagemask: %0x\n", read_c0_pagemask()); + pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi()); + pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); + pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); + pr_err("Wired : %0x\n", read_c0_wired()); + pr_err("Pagegrain: %0x\n", read_c0_pagegrain()); + if (cpu_has_htw) { + pr_err("PWField : %0*lx\n", field, read_c0_pwfield()); + pr_err("PWSize : %0*lx\n", field, read_c0_pwsize()); + pr_err("PWCtl : %0x\n", read_c0_pwctl()); + } + pr_err("\n"); dump_tlb_all(); } diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 0f1af58b036..ed2a278722a 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -16,9 +16,11 @@ #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/unistd.h> +#include <linux/random.h> #include <asm/vdso.h> #include <asm/uasm.h> +#include <asm/processor.h> /* * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... @@ -67,7 +69,18 @@ subsys_initcall(init_vdso); static unsigned long vdso_addr(unsigned long start) { - return STACK_TOP; + unsigned long offset = 0UL; + + if (current->flags & PF_RANDOMIZE) { + offset = get_random_int(); + offset <<= PAGE_SHIFT; + if (TASK_IS_32BIT_ADDR) + offset &= 0xfffffful; + else + offset &= 0xffffffful; + } + + return STACK_TOP + offset; } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |