summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-01 10:26:41 +0100
committerIngo Molnar <mingo@elte.hu>2012-03-01 10:26:43 +0100
commit7e4d960993331e92567f0180e45322a93e6780ba (patch)
tree4d7444035303fc0b545e88afbd894176344fb2a3 /kernel
parentde5bdff7a72acc281219be2b8edeeca1fd81c542 (diff)
parent164974a8f2a482f1abcb027c6d1a89dd79b14297 (diff)
Merge branch 'linus' into sched/core
Merge reason: we'll queue up dependent patches. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c125
-rw-r--r--kernel/fork.c27
-rw-r--r--kernel/irq/autoprobe.c4
-rw-r--r--kernel/irq/chip.c42
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/kprobes.c6
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/power/power.h24
-rw-r--r--kernel/power/process.c26
-rw-r--r--kernel/power/user.c15
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/relay.c10
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/watchdog.c2
18 files changed, 226 insertions, 79 deletions
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 057e24b665c..6581a040f39 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -115,8 +115,6 @@ int get_callchain_buffers(void)
}
err = alloc_callchain_buffers();
- if (err)
- release_callchain_buffers();
exit:
mutex_unlock(&callchain_mutex);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a8f4ac001a0..1b5c081d8b9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -815,7 +815,7 @@ static void update_event_times(struct perf_event *event)
* here.
*/
if (is_cgroup_event(event))
- run_end = perf_event_time(event);
+ run_end = perf_cgroup_event_time(event);
else if (ctx->is_active)
run_end = ctx->time;
else
@@ -2300,7 +2300,10 @@ do { \
return div64_u64(dividend, divisor);
}
-static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
+static DEFINE_PER_CPU(int, perf_throttled_count);
+static DEFINE_PER_CPU(u64, perf_throttled_seq);
+
+static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
@@ -2319,22 +2322,40 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
- event->pmu->stop(event, PERF_EF_UPDATE);
+ if (disable)
+ event->pmu->stop(event, PERF_EF_UPDATE);
+
local64_set(&hwc->period_left, 0);
- event->pmu->start(event, PERF_EF_RELOAD);
+
+ if (disable)
+ event->pmu->start(event, PERF_EF_RELOAD);
}
}
-static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
+/*
+ * combine freq adjustment with unthrottling to avoid two passes over the
+ * events. At the same time, make sure, having freq events does not change
+ * the rate of unthrottling as that would introduce bias.
+ */
+static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
+ int needs_unthr)
{
struct perf_event *event;
struct hw_perf_event *hwc;
- u64 interrupts, now;
+ u64 now, period = TICK_NSEC;
s64 delta;
- if (!ctx->nr_freq)
+ /*
+ * only need to iterate over all events iff:
+ * - context have events in frequency mode (needs freq adjust)
+ * - there are events to unthrottle on this cpu
+ */
+ if (!(ctx->nr_freq || needs_unthr))
return;
+ raw_spin_lock(&ctx->lock);
+ perf_pmu_disable(ctx->pmu);
+
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -2344,13 +2365,8 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
hwc = &event->hw;
- interrupts = hwc->interrupts;
- hwc->interrupts = 0;
-
- /*
- * unthrottle events on the tick
- */
- if (interrupts == MAX_INTERRUPTS) {
+ if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
+ hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
@@ -2358,14 +2374,30 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
if (!event->attr.freq || !event->attr.sample_freq)
continue;
- event->pmu->read(event);
+ /*
+ * stop the event and update event->count
+ */
+ event->pmu->stop(event, PERF_EF_UPDATE);
+
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
+ /*
+ * restart the event
+ * reload only if value has changed
+ * we have stopped the event so tell that
+ * to perf_adjust_period() to avoid stopping it
+ * twice.
+ */
if (delta > 0)
- perf_adjust_period(event, period, delta);
+ perf_adjust_period(event, period, delta, false);
+
+ event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
}
+
+ perf_pmu_enable(ctx->pmu);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -2388,16 +2420,13 @@ static void rotate_ctx(struct perf_event_context *ctx)
*/
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
- u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
struct perf_event_context *ctx = NULL;
- int rotate = 0, remove = 1, freq = 0;
+ int rotate = 0, remove = 1;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
- if (cpuctx->ctx.nr_freq)
- freq = 1;
}
ctx = cpuctx->task_ctx;
@@ -2405,37 +2434,26 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
- if (ctx->nr_freq)
- freq = 1;
}
- if (!rotate && !freq)
+ if (!rotate)
goto done;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
- if (freq) {
- perf_ctx_adjust_freq(&cpuctx->ctx, interval);
- if (ctx)
- perf_ctx_adjust_freq(ctx, interval);
- }
-
- if (rotate) {
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ if (ctx)
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
- rotate_ctx(&cpuctx->ctx);
- if (ctx)
- rotate_ctx(ctx);
+ rotate_ctx(&cpuctx->ctx);
+ if (ctx)
+ rotate_ctx(ctx);
- perf_event_sched_in(cpuctx, ctx, current);
- }
+ perf_event_sched_in(cpuctx, ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
-
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
@@ -2445,10 +2463,22 @@ void perf_event_task_tick(void)
{
struct list_head *head = &__get_cpu_var(rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
+ struct perf_event_context *ctx;
+ int throttled;
WARN_ON(!irqs_disabled());
+ __this_cpu_inc(perf_throttled_seq);
+ throttled = __this_cpu_xchg(perf_throttled_count, 0);
+
list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+ ctx = &cpuctx->ctx;
+ perf_adjust_freq_unthr_context(ctx, throttled);
+
+ ctx = cpuctx->task_ctx;
+ if (ctx)
+ perf_adjust_freq_unthr_context(ctx, throttled);
+
if (cpuctx->jiffies_interval == 1 ||
!(jiffies % cpuctx->jiffies_interval))
perf_rotate_context(cpuctx);
@@ -4509,6 +4539,7 @@ static int __perf_event_overflow(struct perf_event *event,
{
int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
+ u64 seq;
int ret = 0;
/*
@@ -4518,14 +4549,20 @@ static int __perf_event_overflow(struct perf_event *event,
if (unlikely(!is_sampling_event(event)))
return 0;
- if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
- if (throttle) {
+ seq = __this_cpu_read(perf_throttled_seq);
+ if (seq != hwc->interrupts_seq) {
+ hwc->interrupts_seq = seq;
+ hwc->interrupts = 1;
+ } else {
+ hwc->interrupts++;
+ if (unlikely(throttle
+ && hwc->interrupts >= max_samples_per_tick)) {
+ __this_cpu_inc(perf_throttled_count);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
- } else
- hwc->interrupts++;
+ }
if (event->attr.freq) {
u64 now = perf_clock();
@@ -4534,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
- perf_adjust_period(event, delta, hwc->last_period);
+ perf_adjust_period(event, delta, hwc->last_period, true);
}
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 051f090d40c..e2cd3e2a5ae 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -66,6 +66,7 @@
#include <linux/user-return-notifier.h>
#include <linux/oom.h>
#include <linux/khugepaged.h>
+#include <linux/signalfd.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -647,6 +648,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(get_task_mm);
+struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+{
+ struct mm_struct *mm;
+ int err;
+
+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ mm = get_task_mm(task);
+ if (mm && mm != current->mm &&
+ !ptrace_may_access(task, mode)) {
+ mmput(mm);
+ mm = ERR_PTR(-EACCES);
+ }
+ mutex_unlock(&task->signal->cred_guard_mutex);
+
+ return mm;
+}
+
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
@@ -890,7 +911,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
return -ENOMEM;
new_ioc->ioprio = ioc->ioprio;
- put_io_context(new_ioc, NULL);
+ put_io_context(new_ioc);
}
#endif
return 0;
@@ -915,8 +936,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
void __cleanup_sighand(struct sighand_struct *sighand)
{
- if (atomic_dec_and_test(&sighand->count))
+ if (atomic_dec_and_test(&sighand->count)) {
+ signalfd_cleanup(sighand);
kmem_cache_free(sighand_cachep, sighand);
+ }
}
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 342d8f44e40..0119b9d467a 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
if (desc->irq_data.chip->irq_set_type)
desc->irq_data.chip->irq_set_type(&desc->irq_data,
IRQ_TYPE_PROBE);
- irq_startup(desc);
+ irq_startup(desc, false);
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
raw_spin_lock_irq(&desc->lock);
if (!desc->action && irq_settings_can_probe(desc)) {
desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
- if (irq_startup(desc))
+ if (irq_startup(desc, false))
desc->istate |= IRQS_PENDING;
}
raw_spin_unlock_irq(&desc->lock);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f7c543a801d..fb7db75ee0c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -157,19 +157,22 @@ static void irq_state_set_masked(struct irq_desc *desc)
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
}
-int irq_startup(struct irq_desc *desc)
+int irq_startup(struct irq_desc *desc, bool resend)
{
+ int ret = 0;
+
irq_state_clr_disabled(desc);
desc->depth = 0;
if (desc->irq_data.chip->irq_startup) {
- int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
+ ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
irq_state_clr_masked(desc);
- return ret;
+ } else {
+ irq_enable(desc);
}
-
- irq_enable(desc);
- return 0;
+ if (resend)
+ check_irq_resend(desc, desc->irq_data.irq);
+ return ret;
}
void irq_shutdown(struct irq_desc *desc)
@@ -330,6 +333,24 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(handle_simple_irq);
+/*
+ * Called unconditionally from handle_level_irq() and only for oneshot
+ * interrupts from handle_fasteoi_irq()
+ */
+static void cond_unmask_irq(struct irq_desc *desc)
+{
+ /*
+ * We need to unmask in the following cases:
+ * - Standard level irq (IRQF_ONESHOT is not set)
+ * - Oneshot irq which did not wake the thread (caused by a
+ * spurious interrupt or a primary handler handling it
+ * completely).
+ */
+ if (!irqd_irq_disabled(&desc->irq_data) &&
+ irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
+ unmask_irq(desc);
+}
+
/**
* handle_level_irq - Level type irq handler
* @irq: the interrupt number
@@ -362,8 +383,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
handle_irq_event(desc);
- if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
- unmask_irq(desc);
+ cond_unmask_irq(desc);
+
out_unlock:
raw_spin_unlock(&desc->lock);
}
@@ -417,6 +438,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
preflow_handler(desc);
handle_irq_event(desc);
+ if (desc->istate & IRQS_ONESHOT)
+ cond_unmask_irq(desc);
+
out_eoi:
desc->irq_data.chip->irq_eoi(&desc->irq_data);
out_unlock:
@@ -625,7 +649,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
- irq_startup(desc);
+ irq_startup(desc, true);
}
out:
irq_put_desc_busunlock(desc, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b7952316016..40378ff877e 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
-extern int irq_startup(struct irq_desc *desc);
+extern int irq_startup(struct irq_desc *desc, bool resend);
extern void irq_shutdown(struct irq_desc *desc);
extern void irq_enable(struct irq_desc *desc);
extern void irq_disable(struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a9a9dbe49fe..32313c08444 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1027,7 +1027,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->istate |= IRQS_ONESHOT;
if (irq_settings_can_autoenable(desc))
- irq_startup(desc);
+ irq_startup(desc, true);
else
/* Undo nested disables: */
desc->depth = 1;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 29f5b65bee2..9788c0ec6f4 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1673,8 +1673,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
ri->rp = rp;
ri->task = current;
- if (rp->entry_handler && rp->entry_handler(ri, regs))
+ if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+ raw_spin_lock_irqsave(&rp->lock, flags);
+ hlist_add_head(&ri->hlist, &rp->free_instances);
+ raw_spin_unlock_irqrestore(&rp->lock, flags);
return 0;
+ }
arch_prepare_kretprobe(ri, regs);
diff --git a/kernel/params.c b/kernel/params.c
index 32ee0430828..4bc965d8a1f 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -97,7 +97,8 @@ static int parse_one(char *param,
for (i = 0; i < num_params; i++) {
if (parameq(param, params[i].name)) {
/* No one handled NULL, so do it here. */
- if (!val && params[i].ops->set != param_set_bool)
+ if (!val && params[i].ops->set != param_set_bool
+ && params[i].ops->set != param_set_bint)
return -EINVAL;
pr_debug("They are equal! Calling %p\n",
params[i].ops->set);
diff --git a/kernel/pid.c b/kernel/pid.c
index ce8e00deacc..9f08dfabaf1 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -543,12 +543,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
*/
void __init pidhash_init(void)
{
- int i, pidhash_size;
+ unsigned int i, pidhash_size;
pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
HASH_EARLY | HASH_SMALL,
&pidhash_shift, NULL, 4096);
- pidhash_size = 1 << pidhash_shift;
+ pidhash_size = 1U << pidhash_shift;
for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 0c4defe6d3b..21724eee520 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -231,8 +231,28 @@ extern int pm_test_level;
#ifdef CONFIG_SUSPEND_FREEZER
static inline int suspend_freeze_processes(void)
{
- int error = freeze_processes();
- return error ? : freeze_kernel_threads();
+ int error;
+
+ error = freeze_processes();
+
+ /*
+ * freeze_processes() automatically thaws every task if freezing
+ * fails. So we need not do anything extra upon error.
+ */
+ if (error)
+ goto Finish;
+
+ error = freeze_kernel_threads();
+
+ /*
+ * freeze_kernel_threads() thaws only kernel threads upon freezing
+ * failure. So we have to thaw the userspace tasks ourselves.
+ */
+ if (error)
+ thaw_processes();
+
+ Finish:
+ return error;
}
static inline void suspend_thaw_processes(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 77274c9ba2f..7e426459e60 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -143,7 +143,10 @@ int freeze_processes(void)
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
*
- * On success, returns 0. On failure, -errno and system is fully thawed.
+ * On success, returns 0. On failure, -errno and only the kernel threads are
+ * thawed, so as to give a chance to the caller to do additional cleanups
+ * (if any) before thawing the userspace tasks. So, it is the responsibility
+ * of the caller to thaw the userspace tasks, when the time is right.
*/
int freeze_kernel_threads(void)
{
@@ -159,7 +162,7 @@ int freeze_kernel_threads(void)
BUG_ON(in_atomic());
if (error)
- thaw_processes();
+ thaw_kernel_threads();
return error;
}
@@ -188,3 +191,22 @@ void thaw_processes(void)
printk("done.\n");
}
+void thaw_kernel_threads(void)
+{
+ struct task_struct *g, *p;
+
+ pm_nosig_freezing = false;
+ printk("Restarting kernel threads ... ");
+
+ thaw_workqueues();
+
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
+ __thaw_task(p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+
+ schedule();
+ printk("done.\n");
+}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6b1ab7a8852..3e100075b13 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -249,13 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
}
pm_restore_gfp_mask();
error = hibernation_snapshot(data->platform_support);
- if (!error) {
+ if (error) {
+ thaw_kernel_threads();
+ } else {
error = put_user(in_suspend, (int __user *)arg);
if (!error && !freezer_test_done)
data->ready = 1;
if (freezer_test_done) {
freezer_test_done = false;
- thaw_processes();
+ thaw_kernel_threads();
}
}
break;
@@ -274,6 +276,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
swsusp_free();
memset(&data->handle, 0, sizeof(struct snapshot_handle));
data->ready = 0;
+ /*
+ * It is necessary to thaw kernel threads here, because
+ * SNAPSHOT_CREATE_IMAGE may be invoked directly after
+ * SNAPSHOT_FREE. In that case, if kernel threads were not
+ * thawed, the preallocation of memory carried out by
+ * hibernation_snapshot() might run into problems (i.e. it
+ * might fail or even deadlock).
+ */
+ thaw_kernel_threads();
break;
case SNAPSHOT_PREF_IMAGE_SIZE:
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 88f17b8a3b1..a58ac285fc6 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -56,8 +56,8 @@ static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
static int nfakewriters = 4; /* # fake writer threads */
static int stat_interval; /* Interval between stats, in seconds. */
/* Defaults to "only at end of test". */
-static int verbose; /* Print more debug info. */
-static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
+static bool verbose; /* Print more debug info. */
+static bool test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
static int stutter = 5; /* Start/stop testing interval (in sec) */
static int irqreader = 1; /* RCU readers from irq (timers). */
@@ -1399,7 +1399,7 @@ rcu_torture_shutdown(void *arg)
* Execute random CPU-hotplug operations at the interval specified
* by the onoff_interval.
*/
-static int
+static int __cpuinit
rcu_torture_onoff(void *arg)
{
int cpu;
@@ -1447,7 +1447,7 @@ rcu_torture_onoff(void *arg)
return 0;
}
-static int
+static int __cpuinit
rcu_torture_onoff_init(void)
{
if (onoff_interval <= 0)
diff --git a/kernel/relay.c b/kernel/relay.c
index 4335e1d7ee2..ab56a1764d4 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -164,10 +164,14 @@ depopulate:
*/
static struct rchan_buf *relay_create_buf(struct rchan *chan)
{
- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
- if (!buf)
+ struct rchan_buf *buf;
+
+ if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
return NULL;
+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
if (!buf->padding)
goto free_buf;
@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename,
if (!(subbuf_size && n_subbufs))
return NULL;
+ if (subbuf_size > UINT_MAX / n_subbufs)
+ return NULL;
chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
if (!chan)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d7c43227311..91fe6a0e909 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1932,7 +1932,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
- trace_sched_stat_sleeptime(current, rq->clock);
fire_sched_in_preempt_notifiers(current);
if (mm)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6ce9992926d..79e9e13c31a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1003,6 +1003,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (unlikely(delta > se->statistics.sleep_max))
se->statistics.sleep_max = delta;
+ se->statistics.sleep_start = 0;
se->statistics.sum_sleep_runtime += delta;
if (tsk) {
@@ -1019,6 +1020,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (unlikely(delta > se->statistics.block_max))
se->statistics.block_max = delta;
+ se->statistics.block_start = 0;
se->statistics.sum_sleep_runtime += delta;
if (tsk) {
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1d7bca7f4f5..d117262deba 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -296,7 +296,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
if (__this_cpu_read(soft_watchdog_warn) == true)
return HRTIMER_RESTART;
- printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
+ printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
print_modules();