summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/ftrace.c179
-rw-r--r--arch/mips/kernel/perf_event.c345
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp.c31
-rw-r--r--arch/mips/kernel/syscall.c5
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vpe.c4
9 files changed, 300 insertions, 274 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 5a84a1f1123..94ca2b018af 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -17,29 +17,13 @@
#include <asm/cacheflush.h>
#include <asm/uasm.h>
-/*
- * If the Instruction Pointer is in module space (0xc0000000), return true;
- * otherwise, it is in kernel space (0x80000000), return false.
- *
- * FIXME: This will not work when the kernel space and module space are the
- * same. If they are the same, we need to modify scripts/recordmcount.pl,
- * ftrace_make_nop/call() and the other related parts to ensure the
- * enabling/disabling of the calling site to _mcount is right for both kernel
- * and module.
- */
-
-static inline int in_module(unsigned long ip)
-{
- return ip & 0x40000000;
-}
+#include <asm-generic/sections.h>
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
-#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
-#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
#define INSN_NOP 0x00000000 /* nop */
#define INSN_JAL(addr) \
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
#endif
}
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+ if (ip >= (unsigned long)_stext &&
+ ip <= (unsigned long)_etext)
+ return 1;
+ return 0;
+}
+
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
return 0;
}
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount --> nop
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
+ */
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod,
unsigned long ip = rec->ip;
/*
- * We have compiled module with -mlong-calls, but compiled the kernel
- * without it, we need to cope with them respectively.
+ * If ip is in kernel space, no long call, otherwise, long call is
+ * needed.
*/
- if (in_module(ip)) {
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
- /*
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
- * addiu v1, v1, low_16bit_of_mcount
- * move at, ra
- * move $12, ra_address
- * jalr v1
- * sub sp, sp, 8
- * 1: offset = 5 instructions
- */
- new = INSN_B_1F_5;
-#else
- /*
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
- * addiu v1, v1, low_16bit_of_mcount
- * move at, ra
- * jalr v1
- * nop | move $12, ra_address | sub sp, sp, 8
- * 1: offset = 4 instructions
- */
- new = INSN_B_1F_4;
-#endif
- } else {
- /*
- * move at, ra
- * jal _mcount --> nop
- */
- new = INSN_NOP;
- }
+ new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+
return ftrace_modify_code(ip, new);
}
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new;
unsigned long ip = rec->ip;
- /* ip, module: 0xc0000000, kernel: 0x80000000 */
- new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
+ new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
+ insn_lui_v1_hi16_mcount;
return ftrace_modify_code(ip, new);
}
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
-unsigned long ftrace_get_parent_addr(unsigned long self_addr,
- unsigned long parent,
- unsigned long parent_addr,
- unsigned long fp)
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+ old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
{
- unsigned long sp, ip, ra;
+ unsigned long sp, ip, tmp;
unsigned int code;
int faulted;
/*
- * For module, move the ip from calling site of mcount to the
- * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
- * kernel, move to the instruction "move ra, at"(offset is 12)
+ * For module, move the ip from the return address after the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+ * kernel, move after the instruction "move ra, at"(offset is 16)
*/
- ip = self_addr - (in_module(self_addr) ? 20 : 12);
+ ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
/*
* search the text until finding the non-store instruction or "s{d,w}
* ra, offset(sp)" instruction
*/
do {
- ip -= 4;
-
/* get the code at "ip": code = *(unsigned int *)ip; */
safe_load_code(code, ip, faulted);
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
* store the ra on the stack
*/
if ((code & S_R_SP) != S_R_SP)
- return parent_addr;
+ return parent_ra_addr;
- } while (((code & S_RA_SP) != S_RA_SP));
+ /* Move to the next instruction */
+ ip -= 4;
+ } while ((code & S_RA_SP) != S_RA_SP);
sp = fp + (code & OFFSET_MASK);
- /* ra = *(unsigned long *)sp; */
- safe_load_stack(ra, sp, faulted);
+ /* tmp = *(unsigned long *)sp; */
+ safe_load_stack(tmp, sp, faulted);
if (unlikely(faulted))
return 0;
- if (ra == parent)
+ if (tmp == old_parent_ra)
return sp;
return 0;
}
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
unsigned long fp)
{
- unsigned long old;
+ unsigned long old_parent_ra;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
- int faulted;
+ int faulted, insns;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
/*
- * "parent" is the stack address saved the return address of the caller
- * of _mcount.
+ * "parent_ra_addr" is the stack address saved the return address of
+ * the caller of _mcount.
*
* if the gcc < 4.5, a leaf function does not save the return address
* in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
* do it in ftrace_graph_caller of mcount.S.
*/
- /* old = *parent; */
- safe_load_stack(old, parent, faulted);
+ /* old_parent_ra = *parent_ra_addr; */
+ safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
if (unlikely(faulted))
goto out;
#ifndef KBUILD_MCOUNT_RA_ADDRESS
- parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
- (unsigned long)parent, fp);
+ parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+ old_parent_ra, (unsigned long)parent_ra_addr, fp);
/*
* If fails when getting the stack address of the non-leaf function's
* ra, stop function graph tracer and return
*/
- if (parent == 0)
+ if (parent_ra_addr == 0)
goto out;
#endif
- /* *parent = return_hooker; */
- safe_store_stack(return_hooker, parent, faulted);
+ /* *parent_ra_addr = return_hooker; */
+ safe_store_stack(return_hooker, parent_ra_addr, faulted);
if (unlikely(faulted))
goto out;
- if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
- -EBUSY) {
- *parent = old;
+ if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
+ == -EBUSY) {
+ *parent_ra_addr = old_parent_ra;
return;
}
- trace.func = self_addr;
+ /*
+ * Get the recorded ip of the current mcount calling site in the
+ * __mcount_loc section, which will be used to filter the function
+ * entries configured through the tracing/set_graph_function interface.
+ */
+
+ insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
- *parent = old;
+ *parent_ra_addr = old_parent_ra;
}
return;
out:
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 2b7f3f703b8..a8244854d3d 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
return ret;
}
-static int mipspmu_enable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- int idx;
- int err = 0;
-
- /* To look for a free counter for this event. */
- idx = mipspmu->alloc_counter(cpuc, hwc);
- if (idx < 0) {
- err = idx;
- goto out;
- }
-
- /*
- * If there is an event in the counter we are going to use then
- * make sure it is disabled.
- */
- event->hw.idx = idx;
- mipspmu->disable_event(idx);
- cpuc->events[idx] = event;
-
- /* Set the period for the event. */
- mipspmu_event_set_period(event, hwc, idx);
-
- /* Enable the event. */
- mipspmu->enable_event(hwc, idx);
-
- /* Propagate our changes to the userspace mapping. */
- perf_event_update_userpage(event);
-
-out:
- return err;
-}
-
static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
unsigned long flags;
int shift = 64 - TOTAL_BITS;
s64 prev_raw_count, new_raw_count;
- s64 delta;
+ u64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
@@ -231,32 +196,90 @@ again:
return;
}
-static void mipspmu_disable(struct perf_event *event)
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!mipspmu)
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, hwc->idx);
+
+ /* Enable the event. */
+ mipspmu->enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!mipspmu)
+ return;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ /* We are working on a local event. */
+ mipspmu->disable_event(hwc->idx);
+ barrier();
+ mipspmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
+ int idx;
+ int err = 0;
+ perf_pmu_disable(event->pmu);
- WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+ /* To look for a free counter for this event. */
+ idx = mipspmu->alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
- /* We are working on a local event. */
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
mipspmu->disable_event(idx);
+ cpuc->events[idx] = event;
- barrier();
-
- mipspmu_event_update(event, hwc, idx);
- cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ mipspmu_start(event, PERF_EF_RELOAD);
+ /* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
}
-static void mipspmu_unthrottle(struct perf_event *event)
+static void mipspmu_del(struct perf_event *event, int flags)
{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
- mipspmu->enable_event(hwc, hwc->idx);
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ mipspmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
}
static void mipspmu_read(struct perf_event *event)
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
mipspmu_event_update(event, hwc, hwc->idx);
}
-static struct pmu pmu = {
- .enable = mipspmu_enable,
- .disable = mipspmu_disable,
- .unthrottle = mipspmu_unthrottle,
- .read = mipspmu_read,
-};
+static void mipspmu_enable(struct pmu *pmu)
+{
+ if (mipspmu)
+ mipspmu->start();
+}
+
+static void mipspmu_disable(struct pmu *pmu)
+{
+ if (mipspmu)
+ mipspmu->stop();
+}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
perf_irq = save_perf_irq;
}
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu->num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return -ENODEV;
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+ atomic_dec(&active_events);
+ return -ENOSPC;
+ }
+
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err;
+}
+
+static struct pmu pmu = {
+ .pmu_enable = mipspmu_enable,
+ .pmu_disable = mipspmu_disable,
+ .event_init = mipspmu_event_init,
+ .add = mipspmu_add,
+ .del = mipspmu_del,
+ .start = mipspmu_start,
+ .stop = mipspmu_stop,
+ .read = mipspmu_read,
+};
+
static inline unsigned int
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
{
struct hw_perf_event fake_hwc = event->hw;
- if (event->pmu && event->pmu != &pmu)
- return 0;
+ /* Allow mixed event group. So return 1 to pass validation. */
+ if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+ return 1;
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
}
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
return 0;
}
-/*
- * mipsxx/rm9000/loongson2 have different performance counters, they have
- * specific low-level init routines.
- */
-static void reset_counters(void *arg);
-static int __hw_perf_event_init(struct perf_event *event);
-
-static void hw_perf_event_destroy(struct perf_event *event)
-{
- if (atomic_dec_and_mutex_lock(&active_events,
- &pmu_reserve_mutex)) {
- /*
- * We must not call the destroy function with interrupts
- * disabled.
- */
- on_each_cpu(reset_counters,
- (void *)(long)mipspmu->num_counters, 1);
- mipspmu_free_irq();
- mutex_unlock(&pmu_reserve_mutex);
- }
-}
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
- int err = 0;
-
- if (!mipspmu || event->cpu >= nr_cpumask_bits ||
- (event->cpu >= 0 && !cpu_online(event->cpu)))
- return ERR_PTR(-ENODEV);
-
- if (!atomic_inc_not_zero(&active_events)) {
- if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
- atomic_dec(&active_events);
- return ERR_PTR(-ENOSPC);
- }
-
- mutex_lock(&pmu_reserve_mutex);
- if (atomic_read(&active_events) == 0)
- err = mipspmu_get_irq();
-
- if (!err)
- atomic_inc(&active_events);
- mutex_unlock(&pmu_reserve_mutex);
- }
-
- if (err)
- return ERR_PTR(err);
-
- err = __hw_perf_event_init(event);
- if (err)
- hw_perf_event_destroy(event);
-
- return err ? ERR_PTR(err) : &pmu;
-}
-
-void hw_perf_enable(void)
-{
- if (mipspmu)
- mipspmu->start();
-}
-
-void hw_perf_disable(void)
-{
- if (mipspmu)
- mipspmu->stop();
-}
-
/* This is needed by specific irq handlers in perf_event_*.c */
static void
handle_associated_event(struct cpu_hw_events *cpuc,
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
#include "perf_event_mipsxx.c"
/* Callchain handling code. */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
- u64 ip)
-{
- if (entry->nr < PERF_MAX_STACK_DEPTH)
- entry->ip[entry->nr++] = ip;
-}
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we add here.
*/
-static void
-perf_callchain_user(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
}
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
- callchain_store(entry, addr);
+ perf_callchain_store(entry, addr);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
}
}
}
-static void
-perf_callchain_kernel(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
- callchain_store(entry, PERF_CONTEXT_KERNEL);
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
return;
}
do {
- callchain_store(entry, pc);
+ perf_callchain_store(entry, pc);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
- callchain_store(entry, PERF_CONTEXT_KERNEL);
save_raw_perf_callchain(entry, sp);
#endif
}
-
-static void
-perf_do_callchain(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
-{
- int is_user;
-
- if (!regs)
- return;
-
- is_user = user_mode(regs);
-
- if (!current || !current->pid)
- return;
-
- if (is_user && current->state != TASK_RUNNING)
- return;
-
- if (!is_user) {
- perf_callchain_kernel(regs, entry);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
- if (regs)
- perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
- struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
- entry->nr = 0;
- perf_do_callchain(regs, entry);
- return entry;
-}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 183e0d22666..d9a7db78ed6 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
* interrupt, not NMI.
*/
if (handled == IRQ_HANDLED)
- perf_event_do_pending();
+ irq_work_run();
#ifdef CONFIG_MIPS_MT_SMP
read_unlock(&pmuint_rwlock);
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
"CPU, irq %d%s\n", mipspmu->name, counters, irq,
irq < 0 ? " (share with timer interrupt)" : "");
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
return 0;
}
early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5922342bca3..dbbe0ce48d8 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
static int protected_restore_fp_context(struct sigcontext __user *sc)
{
- int err, tmp;
+ int err, tmp __maybe_unused;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index a0ed0e052b2..aae98661379 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
{
- int err, tmp;
+ int err, tmp __maybe_unused;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 383aeb95cb4..32a25610108 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
*/
static struct task_struct *cpu_idle_thread[NR_CPUS];
+struct create_idle {
+ struct work_struct work;
+ struct task_struct *idle;
+ struct completion done;
+ int cpu;
+};
+
+static void __cpuinit do_fork_idle(struct work_struct *work)
+{
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
+
+ c_idle->idle = fork_idle(c_idle->cpu);
+ complete(&c_idle->done);
+}
+
int __cpuinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
* Linux can schedule processes on this slave.
*/
if (!cpu_idle_thread[cpu]) {
- idle = fork_idle(cpu);
- cpu_idle_thread[cpu] = idle;
+ /*
+ * Schedule work item to avoid forking user task
+ * Ported from arch/x86/kernel/smpboot.c
+ */
+ struct create_idle c_idle = {
+ .cpu = cpu,
+ .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+ };
+
+ INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
+ schedule_work(&c_idle.work);
+ wait_for_completion(&c_idle.done);
+ idle = cpu_idle_thread[cpu] = c_idle.idle;
if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dc6edff45e..58beabf50b3 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips);
static int __used noinline
_sys_sysmips(nabi_no_regargs struct pt_regs regs)
{
- long cmd, arg1, arg2, arg3;
+ long cmd, arg1, arg2;
cmd = regs.regs[4];
arg1 = regs.regs[5];
arg2 = regs.regs[6];
- arg3 = regs.regs[7];
switch (cmd) {
case MIPS_ATOMIC_SET:
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
if (arg1 & 2)
set_thread_flag(TIF_LOGADE);
else
- clear_thread_flag(TIF_FIXADE);
+ clear_thread_flag(TIF_LOGADE);
return 0;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 570607b376b..832afbb8758 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -115,7 +115,7 @@ SECTIONS
EXIT_DATA
}
- PERCPU(PAGE_SIZE)
+ PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6a1fdfef8fd..ab52b7cf3b6 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -148,9 +148,9 @@ struct {
spinlock_t tc_list_lock;
struct list_head tc_list; /* Thread contexts */
} vpecontrol = {
- .vpe_list_lock = SPIN_LOCK_UNLOCKED,
+ .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
- .tc_list_lock = SPIN_LOCK_UNLOCKED,
+ .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
};