diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-05-18 13:13:33 -0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-05-18 13:13:33 -0300 |
commit | 16ee6576e25b83806d26eb771138249fcfb5eddc (patch) | |
tree | 7c717b80f28b5c59ba673dc00f2ca9bd0fc068d4 /arch/arm/kernel | |
parent | 16fa7e8200fb9066b77a3f27cbed8e4a9fc71998 (diff) | |
parent | 9b63776fa3ca96c4ecda76f6fa947b7b0add66ac (diff) |
Merge remote-tracking branch 'tip/perf/urgent' into perf/core
Merge reason: We are going to queue up a dependent patch:
"perf tools: Move parse event automated tests to separated object"
That depends on:
commit e7c72d8
perf tools: Add 'G' and 'H' modifiers to event parsing
Conflicts:
tools/perf/builtin-stat.c
Conflicted with the recent 'perf_target' patches when checking the
result of perf_evsel open routines to see if a retry is needed to cope
with older kernels where the exclude guest/host perf_event_attr bits
were not used.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 55 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 28 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 6 |
4 files changed, 25 insertions, 70 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 71ccdbfed66..8349d4e97e2 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc) } c = irq_data_get_irq_chip(d); - if (c->irq_set_affinity) - c->irq_set_affinity(d, affinity, true); - else + if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); + else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + cpumask_copy(d->affinity, affinity); return ret; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7cb532fc8aa..d68d1b69468 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; const unsigned long magic = VFP_MAGIC; const unsigned long size = VFP_STORAGE_SIZE; int err = 0; - vfp_sync_hwstate(thread); __put_user_error(magic, &frame->magic, err); __put_user_error(size, &frame->size, err); - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __put_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Copy the exception registers. - */ - __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); - __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + if (err) + return -EFAULT; - return err ? -EFAULT : 0; + return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); } static int restore_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; unsigned long magic; unsigned long size; - unsigned long fpexc; int err = 0; __get_user_error(magic, &frame->magic, err); @@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; - vfp_flush_hwstate(thread); - - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __get_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Sanitise and restore the exception registers. - */ - __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); - /* Ensure the VFP is enabled. */ - fpexc |= FPEXC_EN; - /* Ensure FPINST2 is invalid and the exception flag is cleared. */ - fpexc &= ~(FPEXC_EX | FPEXC_FP2V); - h->fpexc = fpexc; - - __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - - return err ? -EFAULT : 0; + return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); } #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index addbbe8028c..f6a4d32b042 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); -#ifdef CONFIG_HOTPLUG_CPU - platform_cpu_kill(cpu); -#endif - while (1) cpu_relax(); } @@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_HOTPLUG_CPU +static void smp_kill_cpus(cpumask_t *mask) +{ + unsigned int cpu; + for_each_cpu(cpu, mask) + platform_cpu_kill(cpu); +} +#else +static void smp_kill_cpus(cpumask_t *mask) { } +#endif + void smp_send_stop(void) { unsigned long timeout; + struct cpumask mask; - if (num_online_cpus() > 1) { - struct cpumask mask; - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - smp_cross_call(&mask, IPI_CPU_STOP); - } + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; @@ -595,6 +599,8 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); + + smp_kill_cpus(&mask); } /* diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 5b150afb995..fef42b21cec 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -118,14 +118,10 @@ static int twd_cpufreq_transition(struct notifier_block *nb, * The twd clock events must be reprogrammed to account for the new * frequency. The timer is local to a cpu, so cross-call to the * changing cpu. - * - * Only wait for it to finish, if the cpu is active to avoid - * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during - * booting of that cpu. */ if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) smp_call_function_single(freqs->cpu, twd_update_frequency, - NULL, cpu_active(freqs->cpu)); + NULL, 1); return NOTIFY_OK; } |