From e69f61862ab833e9b8d3c15b6ce07fd69f3bfecc Mon Sep 17 00:00:00 2001 From: Yacine Belkadi Date: Fri, 12 Jul 2013 20:45:47 +0200 Subject: sched: Fix some kernel-doc warnings When building the htmldocs (in verbose mode), scripts/kernel-doc reports the follwing type of warnings: Warning(kernel/sched/core.c:936): No description found for return value of 'task_curr' ... Fix those by: - adding the missing descriptions - using "Return" sections for the descriptions Signed-off-by: Yacine Belkadi Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1373654747-2389-1-git-send-email-yacine.belkadi.1@gmail.com [ While at it, fix the cpupri_set() explanation. ] Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 82 ++++++++++++++++++++++++++++++++++++++------------- kernel/sched/cpupri.c | 4 +-- kernel/sched/fair.c | 9 ++++-- 3 files changed, 70 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0d8eb4525e7..4c3967f91e2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p) /** * task_curr - is this task currently executing on a CPU? * @p: the task in question. + * + * Return: 1 if the task is currently executing. 0 otherwise. */ inline int task_curr(const struct task_struct *p) { @@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu) * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * - * Returns %true if @p was woken up, %false if it was already running + * Return: %true if @p was woken up, %false if it was already running. * or @state didn't match @p's state. */ static int @@ -1577,8 +1579,9 @@ out: * @p: The process to be woken up. * * Attempt to wake up the nominated process and move it to the set of runnable - * processes. Returns 1 if the process was woken up, 0 if it was already - * running. + * processes. + * + * Return: 1 if the process was woken up, 0 if it was already running. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. @@ -2191,6 +2194,8 @@ void scheduler_tick(void) * This makes sure that uptime, CFS vruntime, load * balancing, etc... continue to move forward, even * with a very low granularity. + * + * Return: Maximum deferment in nanoseconds. */ u64 scheduler_tick_max_deferment(void) { @@ -2796,8 +2801,8 @@ EXPORT_SYMBOL(wait_for_completion); * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. * - * The return value is 0 if timed out, and positive (at least 1, or number of - * jiffies left till timeout) if completed. + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left + * till timeout) if completed. */ unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) @@ -2829,8 +2834,8 @@ EXPORT_SYMBOL(wait_for_completion_io); * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. The caller is accounted as waiting for IO. * - * The return value is 0 if timed out, and positive (at least 1, or number of - * jiffies left till timeout) if completed. + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left + * till timeout) if completed. */ unsigned long __sched wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) @@ -2846,7 +2851,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout); * This waits for completion of a specific task to be signaled. It is * interruptible. * - * The return value is -ERESTARTSYS if interrupted, 0 if completed. + * Return: -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_interruptible(struct completion *x) { @@ -2865,8 +2870,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. It is interruptible. The timeout is in jiffies. * - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, - * positive (at least 1, or number of jiffies left till timeout) if completed. + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_interruptible_timeout(struct completion *x, @@ -2883,7 +2888,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); * This waits to be signaled for completion of a specific task. It can be * interrupted by a kill signal. * - * The return value is -ERESTARTSYS if interrupted, 0 if completed. + * Return: -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_killable(struct completion *x) { @@ -2903,8 +2908,8 @@ EXPORT_SYMBOL(wait_for_completion_killable); * signaled or for a specified timeout to expire. It can be * interrupted by a kill signal. The timeout is in jiffies. * - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, - * positive (at least 1, or number of jiffies left till timeout) if completed. + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_killable_timeout(struct completion *x, @@ -2918,7 +2923,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout); * try_wait_for_completion - try to decrement a completion without blocking * @x: completion structure * - * Returns: 0 if a decrement cannot be done without blocking + * Return: 0 if a decrement cannot be done without blocking * 1 if a decrement succeeded. * * If a completion is being used as a counting completion, @@ -2945,7 +2950,7 @@ EXPORT_SYMBOL(try_wait_for_completion); * completion_done - Test to see if a completion has any waiters * @x: completion structure * - * Returns: 0 if there are waiters (wait_for_completion() in progress) + * Return: 0 if there are waiters (wait_for_completion() in progress) * 1 if there are no waiters. * */ @@ -3182,7 +3187,7 @@ SYSCALL_DEFINE1(nice, int, increment) * task_prio - return the priority value of a given task. * @p: the task in question. * - * This is the priority value as seen by users in /proc. + * Return: The priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ @@ -3194,6 +3199,8 @@ int task_prio(const struct task_struct *p) /** * task_nice - return the nice value of a given task. * @p: the task in question. + * + * Return: The nice value [ -20 ... 0 ... 19 ]. */ int task_nice(const struct task_struct *p) { @@ -3204,6 +3211,8 @@ EXPORT_SYMBOL(task_nice); /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. + * + * Return: 1 if the CPU is currently idle. 0 otherwise. */ int idle_cpu(int cpu) { @@ -3226,6 +3235,8 @@ int idle_cpu(int cpu) /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. + * + * Return: The idle task for the cpu @cpu. */ struct task_struct *idle_task(int cpu) { @@ -3235,6 +3246,8 @@ struct task_struct *idle_task(int cpu) /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. + * + * The task of @pid, if found. %NULL otherwise. */ static struct task_struct *find_process_by_pid(pid_t pid) { @@ -3432,6 +3445,8 @@ recheck: * @policy: new policy. * @param: structure containing the new RT priority. * + * Return: 0 on success. An error code otherwise. + * * NOTE that the task may be already dead. */ int sched_setscheduler(struct task_struct *p, int policy, @@ -3451,6 +3466,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); * current context has permission. For example, this is needed in * stop_machine(): we create temporary high priority worker threads, * but our caller might not have that capability. + * + * Return: 0 on success. An error code otherwise. */ int sched_setscheduler_nocheck(struct task_struct *p, int policy, const struct sched_param *param) @@ -3485,6 +3502,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) * @pid: the pid in question. * @policy: new policy. * @param: structure containing the new RT priority. + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) @@ -3500,6 +3519,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) { @@ -3509,6 +3530,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) /** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. + * + * Return: On success, the policy of the thread. Otherwise, a negative error + * code. */ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) { @@ -3535,6 +3559,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) * sys_sched_getparam - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. + * + * Return: On success, 0 and the RT priority is in @param. Otherwise, an error + * code. */ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) { @@ -3659,6 +3686,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) @@ -3710,6 +3739,8 @@ out_unlock: * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) @@ -3744,6 +3775,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, * * This function yields the current CPU to other tasks. If there are no * other threads running on this CPU then this function will return. + * + * Return: 0. */ SYSCALL_DEFINE0(sched_yield) { @@ -3869,7 +3902,7 @@ EXPORT_SYMBOL(yield); * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. * - * Returns: + * Return: * true (>0) if we indeed boosted the target task. * false (0) if we failed to boost the target. * -ESRCH if there's no task to yield to. @@ -3972,8 +4005,9 @@ long __sched io_schedule_timeout(long timeout) * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * - * this syscall returns the maximum rt_priority that can be used - * by a given scheduling class. + * Return: On success, this syscall returns the maximum + * rt_priority that can be used by a given scheduling class. + * On failure, a negative error code is returned. */ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) { @@ -3997,8 +4031,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * - * this syscall returns the minimum rt_priority that can be used - * by a given scheduling class. + * Return: On success, this syscall returns the minimum + * rt_priority that can be used by a given scheduling class. + * On failure, a negative error code is returned. */ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) { @@ -4024,6 +4059,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. + * + * Return: On success, 0 and the timeslice is in @interval. Otherwise, + * an error code. */ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, struct timespec __user *, interval) @@ -6632,6 +6670,8 @@ void normalize_rt_tasks(void) * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! + * + * Return: The current task for @cpu. */ struct task_struct *curr_task(int cpu) { diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 1095e878a46..8b836b376d9 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -62,7 +62,7 @@ static int convert_prio(int prio) * any discrepancies created by racing against the uncertainty of the current * priority configuration. * - * Returns: (int)bool - CPUs were found + * Return: (int)bool - CPUs were found */ int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) @@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * cpupri_init - initialize the cpupri structure * @cp: The cpupri context * - * Returns: -ENOMEM if memory fails. + * Return: -ENOMEM on memory allocation failure. */ int cpupri_init(struct cpupri *cp) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f77f9c52744..98d135584b4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4280,6 +4280,8 @@ struct sg_lb_stats { * get_sd_load_idx - Obtain the load index for a given sched domain. * @sd: The sched_domain whose load_idx is to be obtained. * @idle: The Idle status of the CPU for whose sd load_icx is obtained. + * + * Return: The load index. */ static inline int get_sd_load_idx(struct sched_domain *sd, enum cpu_idle_type idle) @@ -4574,6 +4576,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, * * Determine if @sg is a busier group than the previously selected * busiest group. + * + * Return: %true if @sg is a busier group than the previously selected + * busiest group. %false otherwise. */ static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, @@ -4691,7 +4696,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, * assuming lower CPU number will be equivalent to lower a SMT thread * number. * - * Returns 1 when packing is required and a task should be moved to + * Return: 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in *imbalance. * * @env: The load balancing environment. @@ -4869,7 +4874,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. * - * Returns: - the busiest group if imbalance exists. + * Return: - The busiest group if imbalance exists. * - If no imbalance and user has opted for power-savings balance, * return the least loaded group whose CPUs can be * put to idle by rebalancing its tasks onto our group. -- cgit v1.2.3-70-g09d2 From 53c035204253efe373d9ff166fae6147e8c693b6 Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Wed, 17 Jul 2013 12:46:53 +0300 Subject: sched_clock: Fix integer overflow The expression '(1 << 32)' happens to evaluate as 0 on ARM, but it evaluates as 1 on xtensa and x86_64. This zeros sched_clock_mask, and breaks sched_clock(). Set the type of 1 to 'unsigned long long' to get the value we need. Reported-by: Max Filippov Tested-by: Max Filippov Acked-by: Russell King Signed-off-by: Baruch Siach Signed-off-by: John Stultz --- kernel/time/sched_clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a326f27d7f0..0b479a6a22b 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) BUG_ON(bits > 32); WARN_ON(!irqs_disabled()); read_sched_clock = read; - sched_clock_mask = (1 << bits) - 1; + sched_clock_mask = (1ULL << bits) - 1; cd.rate = rate; /* calculate the mult/shift to convert counter ticks to ns. */ -- cgit v1.2.3-70-g09d2 From 543487c7a2670bb0d96c00673a44b74360e3b6c1 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 16 Jul 2013 10:22:12 -0400 Subject: nohz: Do not warn about unstable tsc unless user uses nohz_full If the user enables CONFIG_NO_HZ_FULL and runs the kernel on a machine with an unstable TSC, it will produce a WARN_ON dump as well as taint the kernel. This is a bit extreme for a kernel that just enables a feature but doesn't use it. The warning should only happen if the user tries to use the feature by either adding nohz_full to the kernel command line, or by enabling CONFIG_NO_HZ_FULL_ALL that makes nohz used on all CPUs at boot up. Note, this second feature should not (yet) be used by distros or anyone that doesn't care if NO_HZ is used or not. Signed-off-by: Steven Rostedt Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Li Zhong Cc: Mike Galbraith Cc: Kevin Hilman Signed-off-by: Frederic Weisbecker --- kernel/time/tick-sched.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e80183f4a6c..1102534a1a5 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -182,7 +182,8 @@ static bool can_stop_full_tick(void) * Don't allow the user to think they can get * full NO_HZ with this machine. */ - WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); + WARN_ONCE(have_nohz_full_mask, + "NO_HZ FULL will not work with unstable sched clock"); return false; } #endif -- cgit v1.2.3-70-g09d2 From ca06416b2b4fa562cd3c3f9eb4198c3b2a983342 Mon Sep 17 00:00:00 2001 From: Li Zhong Date: Tue, 16 Jul 2013 12:18:47 +0800 Subject: nohz: fix compile warning in tick_nohz_init() cpu is not used after commit 5b8621a68fdcd2baf1d3b413726f913a5254d46a Signed-off-by: Li Zhong Cc: Steven Rostedt Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Li Zhong Cc: Mike Galbraith Cc: Kevin Hilman Signed-off-by: Frederic Weisbecker --- kernel/time/tick-sched.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1102534a1a5..9563c744dad 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -344,8 +344,6 @@ static int tick_nohz_init_all(void) void __init tick_nohz_init(void) { - int cpu; - if (!have_nohz_full_mask) { if (tick_nohz_init_all() < 0) return; -- cgit v1.2.3-70-g09d2 From bf0bd948d1682e3996adc093b43021ed391983e6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 26 Jul 2013 23:48:42 +0200 Subject: sched: Ensure update_cfs_shares() is called for parents of continuously-running tasks We typically update a task_group's shares within the dequeue/enqueue path. However, continuously running tasks sharing a CPU are not subject to these updates as they are only put/picked. Unfortunately, when we reverted f269ae046 (in 17bc14b7), we lost the augmenting periodic update that was supposed to account for this; resulting in a potential loss of fairness. To fix this, re-introduce the explicit update in update_cfs_rq_blocked_load() [called via entity_tick()]. Reported-by: Max Hailperin Signed-off-by: Peter Zijlstra Reviewed-by: Paul Turner Link: http://lkml.kernel.org/n/tip-9545m3apw5d93ubyrotrj31y@git.kernel.org Cc: Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 98d135584b4..06db94bf47a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) */ update_entity_load_avg(curr, 1); update_cfs_rq_blocked_load(cfs_rq, 1); + update_cfs_shares(cfs_rq); #ifdef CONFIG_SCHED_HRTICK /* -- cgit v1.2.3-70-g09d2 From 85f4896123d0299128f2c95cc40f3b8b01d4b0f6 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 30 Jul 2013 10:13:41 +0200 Subject: mutex: Fix w/w mutex deadlock injection The check needs to be for > 1, because ctx->acquired is already incremented. This will prevent ww_mutex_lock_slow from returning -EDEADLK and not locking the mutex. It caused a lot of false gpu lockups on radeon with CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y because a function that shouldn't be able to return -EDEADLK did. Signed-off-by: Maarten Lankhorst Signed-off-by: Peter Zijlstra Cc: Alex Deucher Cc: Linus Torvalds Cc: Andrew Morton Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/51F775B5.201@canonical.com Signed-off-by: Ingo Molnar --- kernel/mutex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86e..a52ee7bb830 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) might_sleep(); ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, &ctx->dep_map, _RET_IP_, ctx); - if (!ret && ctx->acquired > 0) + if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; @@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, &ctx->dep_map, _RET_IP_, ctx); - if (!ret && ctx->acquired > 0) + if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; -- cgit v1.2.3-70-g09d2 From a903f0865a190f8778c73df1a810ea6e25e5d7cf Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 13 Aug 2013 10:05:59 +0800 Subject: cpuset: fix the return value of cpuset_write_u64() Writing to this file always returns -ENODEV: # echo 1 > cpuset.memory_pressure_enabled -bash: echo: write error: No such device Signed-off-by: Li Zefan Cc: # 3.9+ Signed-off-by: Tejun Heo --- kernel/cpuset.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e5657788fed..010a0083c0a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1608,11 +1608,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) { struct cpuset *cs = cgroup_cs(cgrp); cpuset_filetype_t type = cft->private; - int retval = -ENODEV; + int retval = 0; mutex_lock(&cpuset_mutex); - if (!is_cpuset_online(cs)) + if (!is_cpuset_online(cs)) { + retval = -ENODEV; goto out_unlock; + } switch (type) { case FILE_CPU_EXCLUSIVE: -- cgit v1.2.3-70-g09d2 From e0acd0a68ec7dbf6b7a81a87a867ebd7ac9b76c4 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 12 Aug 2013 18:14:00 +0200 Subject: sched: fix the theoretical signal_wake_up() vs schedule() race This is only theoretical, but after try_to_wake_up(p) was changed to check p->state under p->pi_lock the code like __set_current_state(TASK_INTERRUPTIBLE); schedule(); can miss a signal. This is the special case of wait-for-condition, it relies on try_to_wake_up/schedule interaction and thus it does not need mb() between __set_current_state() and if(signal_pending). However, this __set_current_state() can move into the critical section protected by rq->lock, now that try_to_wake_up() takes another lock we need to ensure that it can't be reordered with "if (signal_pending(current))" check inside that section. The patch is actually one-liner, it simply adds smp_wmb() before spin_lock_irq(rq->lock). This is what try_to_wake_up() already does by the same reason. We turn this wmb() into the new helper, smp_mb__before_spinlock(), for better documentation and to allow the architectures to change the default implementation. While at it, kill smp_mb__after_lock(), it has no callers. Perhaps we can also add smp_mb__before/after_spinunlock() for prepare_to_wait(). Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Signed-off-by: Linus Torvalds --- arch/x86/include/asm/spinlock.h | 4 ---- include/linux/spinlock.h | 14 +++++++++++--- kernel/sched/core.c | 14 +++++++++++++- 3 files changed, 24 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab..e3ddd7db723 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() -/* The {read|write|spin}_lock() on x86 are full memory barriers. */ -static inline void smp_mb__after_lock(void) { } -#define ARCH_HAS_SMP_MB_AFTER_LOCK - #endif /* _ASM_X86_SPINLOCK_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949..75f34949d9a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -117,9 +117,17 @@ do { \ #endif /*arch_spin_is_contended*/ #endif -/* The lock does not imply full memory barrier. */ -#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK -static inline void smp_mb__after_lock(void) { smp_mb(); } +/* + * Despite its name it doesn't necessarily has to be a full barrier. + * It should only guarantee that a STORE before the critical section + * can not be reordered with a LOAD inside this section. + * spin_lock() is the one-way barrier, this LOAD can not escape out + * of the region. So the default implementation simply ensures that + * a STORE can not move into the critical section, smp_wmb() should + * serialize it with another STORE done by spin_lock(). + */ +#ifndef smp_mb__before_spinlock +#define smp_mb__before_spinlock() smp_wmb() #endif /** diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfe..ef51b0ef4bd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1491,7 +1491,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) unsigned long flags; int cpu, success = 0; - smp_wmb(); + /* + * If we are going to wake up a thread waiting for CONDITION we + * need to ensure that CONDITION=1 done by the caller can not be + * reordered with p->state check below. This pairs with mb() in + * set_current_state() the waiting thread does. + */ + smp_mb__before_spinlock(); raw_spin_lock_irqsave(&p->pi_lock, flags); if (!(p->state & state)) goto out; @@ -2394,6 +2400,12 @@ need_resched: if (sched_feat(HRTICK)) hrtick_clear(rq); + /* + * Make sure that signal_pending_state()->signal_pending() below + * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) + * done by the caller to avoid the race with signal_wake_up(). + */ + smp_mb__before_spinlock(); raw_spin_lock_irq(&rq->lock); switch_count = &prev->nivcsw; -- cgit v1.2.3-70-g09d2 From 40fea92ffb5fa0ef26d10ae0fe5688bc8e61c791 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 13 Aug 2013 14:12:40 -0700 Subject: PM / QoS: Fix workqueue deadlock when using pm_qos_update_request_timeout() pm_qos_update_request_timeout() updates a qos and then schedules a delayed work item to bring the qos back down to the default after the timeout. When the work item runs, pm_qos_work_fn() will call pm_qos_update_request() and deadlock because it tries to cancel itself via cancel_delayed_work_sync(). Future callers of that qos will also hang waiting to cancel the work that is canceling itself. Let's extract the little bit of code that does the real work of pm_qos_update_request() and call it from the work function so that we don't deadlock. Before ed1ac6e (PM: don't use [delayed_]work_pending()) this didn't happen because the work function wouldn't try to cancel itself. Signed-off-by: Stephen Boyd Reviewed-by: Tejun Heo Cc: 3.9+ # 3.9+ Signed-off-by: Rafael J. Wysocki --- kernel/power/qos.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 06fe28589e9..a394297f8b2 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req) } EXPORT_SYMBOL_GPL(pm_qos_request_active); +static void __pm_qos_update_request(struct pm_qos_request *req, + s32 new_value) +{ + trace_pm_qos_update_request(req->pm_qos_class, new_value); + + if (new_value != req->node.prio) + pm_qos_update_target( + pm_qos_array[req->pm_qos_class]->constraints, + &req->node, PM_QOS_UPDATE_REQ, new_value); +} + /** * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout * @work: work struct for the delayed work (timeout) @@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work) struct pm_qos_request, work); - pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); + __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); } /** @@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req, } cancel_delayed_work_sync(&req->work); - - trace_pm_qos_update_request(req->pm_qos_class, new_value); - if (new_value != req->node.prio) - pm_qos_update_target( - pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, new_value); + __pm_qos_update_request(req, new_value); } EXPORT_SYMBOL_GPL(pm_qos_update_request); -- cgit v1.2.3-70-g09d2 From dfa9771a7c4784bafd0673bc7abcee3813088b77 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Tue, 13 Aug 2013 16:00:53 -0700 Subject: microblaze: fix clone syscall Fix inadvertent breakage in the clone syscall ABI for Microblaze that was introduced in commit f3268edbe6fe ("microblaze: switch to generic fork/vfork/clone"). The Microblaze syscall ABI for clone takes the parent tid address in the 4th argument; the third argument slot is used for the stack size. The incorrectly-used CLONE_BACKWARDS type assigned parent tid to the 3rd slot. This commit restores the original ABI so that existing userspace libc code will work correctly. All kernel versions from v3.8-rc1 were affected. Signed-off-by: Michal Simek Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 6 ++++++ arch/microblaze/Kconfig | 2 +- include/linux/syscalls.h | 5 +++++ kernel/fork.c | 6 ++++++ 4 files changed, 18 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/Kconfig b/arch/Kconfig index 8d2ae24b9f4..1feb169274f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -407,6 +407,12 @@ config CLONE_BACKWARDS2 help Architecture has the first two arguments of clone(2) swapped. +config CLONE_BACKWARDS3 + bool + help + Architecture has tls passed as the 3rd argument of clone(2), + not the 5th one. + config ODD_RT_SIGACTION bool help diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d22a4ecffff..4fab52294d9 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -28,7 +28,7 @@ config MICROBLAZE select GENERIC_CLOCKEVENTS select GENERIC_IDLE_POLL_SETUP select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS + select CLONE_BACKWARDS3 config SWAP def_bool n diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 4147d700a29..84662ecc7b5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void); asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, int __user *); #else +#ifdef CONFIG_CLONE_BACKWARDS3 +asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, + int __user *, int); +#else asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int __user *, int); #endif +#endif asmlinkage long sys_execve(const char __user *filename, const char __user *const __user *argv, diff --git a/kernel/fork.c b/kernel/fork.c index 403d2bb8a96..e23bb19e2a3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val) +#elif defined(CONFIG_CLONE_BACKWARDS3) +SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, + int, stack_size, + int __user *, parent_tidptr, + int __user *, child_tidptr, + int, tls_val) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, -- cgit v1.2.3-70-g09d2 From 2203547f82b7727e2cd3fee3e56fceae2b2b691c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 18 Aug 2013 20:08:07 -0700 Subject: kernel: fix new kernel-doc warning in wait.c Fix new kernel-doc warnings in kernel/wait.c: Warning(kernel/wait.c:374): No description found for parameter 'p' Warning(kernel/wait.c:374): Excess function parameter 'word' description in 'wake_up_atomic_t' Warning(kernel/wait.c:374): Excess function parameter 'bit' description in 'wake_up_atomic_t' Signed-off-by: Randy Dunlap Cc: David Howells Signed-off-by: Linus Torvalds --- kernel/wait.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/wait.c b/kernel/wait.c index dec68bd4e9d..d550920e040 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); /** * wake_up_atomic_t - Wake up a waiter on a atomic_t - * @word: The word being waited on, a kernel virtual address - * @bit: The bit of the word being waited on + * @p: The atomic_t being waited on, a kernel virtual address * * Wake up anyone waiting for the atomic_t to go to zero. * -- cgit v1.2.3-70-g09d2 From 1c09b195d37fa459844036f429a0f378e70c3db6 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 21 Aug 2013 10:22:28 +0800 Subject: cpuset: fix a regression in validating config change It's not allowed to clear masks of a cpuset if there're tasks in it, but it's broken: # mkdir /cgroup/sub # echo 0 > /cgroup/sub/cpuset.cpus # echo 0 > /cgroup/sub/cpuset.mems # echo $$ > /cgroup/sub/tasks # echo > /cgroup/sub/cpuset.cpus (should fail) This bug was introduced by commit 88fa523bff295f1d60244a54833480b02f775152 ("cpuset: allow to move tasks to empty cpusets"). tj: Dropped temp bool variables and nestes the conditionals directly. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cpuset.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 010a0083c0a..ea1966db34f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) /* * Cpusets with tasks - existing or newly being attached - can't - * have empty cpus_allowed or mems_allowed. + * be changed to have empty cpus_allowed or mems_allowed. */ ret = -ENOSPC; - if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && - (cpumask_empty(trial->cpus_allowed) && - nodes_empty(trial->mems_allowed))) - goto out; + if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) { + if (!cpumask_empty(cur->cpus_allowed) && + cpumask_empty(trial->cpus_allowed)) + goto out; + if (!nodes_empty(cur->mems_allowed) && + nodes_empty(trial->mems_allowed)) + goto out; + } ret = 0; out: -- cgit v1.2.3-70-g09d2 From 84a78a6504f5c5394a8e558702e5b54131f01d14 Mon Sep 17 00:00:00 2001 From: Nathan Zimmer Date: Wed, 28 Aug 2013 16:35:14 -0700 Subject: timer_list: correct the iterator for timer_list Correct an issue with /proc/timer_list reported by Holger. When reading from the proc file with a sufficiently small buffer, 2k so not really that small, there was one could get hung trying to read the file a chunk at a time. The timer_list_start function failed to account for the possibility that the offset was adjusted outside the timer_list_next. Signed-off-by: Nathan Zimmer Reported-by: Holger Hans Peter Freyther Cc: John Stultz Cc: Thomas Gleixner Cc: Berke Durak Cc: Jeff Layton Tested-by: Al Viro Cc: # 3.10.x Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/timer_list.c | 41 ++++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 3bdf2832301..61ed862cdd3 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now) static int timer_list_show(struct seq_file *m, void *v) { struct timer_list_iter *iter = v; - u64 now = ktime_to_ns(ktime_get()); if (iter->cpu == -1 && !iter->second_pass) - timer_list_header(m, now); + timer_list_header(m, iter->now); else if (!iter->second_pass) print_cpu(m, iter->cpu, iter->now); #ifdef CONFIG_GENERIC_CLOCKEVENTS @@ -298,33 +297,41 @@ void sysrq_timer_list_show(void) return; } -static void *timer_list_start(struct seq_file *file, loff_t *offset) +static void *move_iter(struct timer_list_iter *iter, loff_t offset) { - struct timer_list_iter *iter = file->private; - - if (!*offset) { - iter->cpu = -1; - iter->now = ktime_to_ns(ktime_get()); - } else if (iter->cpu >= nr_cpu_ids) { + for (; offset; offset--) { + iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); + if (iter->cpu >= nr_cpu_ids) { #ifdef CONFIG_GENERIC_CLOCKEVENTS - if (!iter->second_pass) { - iter->cpu = -1; - iter->second_pass = true; - } else - return NULL; + if (!iter->second_pass) { + iter->cpu = -1; + iter->second_pass = true; + } else + return NULL; #else - return NULL; + return NULL; #endif + } } return iter; } +static void *timer_list_start(struct seq_file *file, loff_t *offset) +{ + struct timer_list_iter *iter = file->private; + + if (!*offset) + iter->now = ktime_to_ns(ktime_get()); + iter->cpu = -1; + iter->second_pass = false; + return move_iter(iter, *offset); +} + static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) { struct timer_list_iter *iter = file->private; - iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); ++*offset; - return timer_list_start(file, offset); + return move_iter(iter, 1); } static void timer_list_stop(struct seq_file *seq, void *v) -- cgit v1.2.3-70-g09d2