From ff1df896aef8e0ec1556a5c44f424bd45bfa2cbe Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 27 Jan 2014 17:03:41 -0500 Subject: sched/numa: Rename p->numa_faults to numa_faults_memory In order to get a more consistent naming scheme, making it clear which fault statistics track memory locality, and which track CPU locality, rename the memory fault statistics. Suggested-by: Mel Gorman Signed-off-by: Rik van Riel Acked-by: Mel Gorman Signed-off-by: Peter Zijlstra Cc: Chegu Vinod Link: http://lkml.kernel.org/r/1390860228-21539-3-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 81343d6bd9c..bc708c53bf0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1744,8 +1744,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; p->numa_work.next = &p->numa_work; - p->numa_faults = NULL; - p->numa_faults_buffer = NULL; + p->numa_faults_memory = NULL; + p->numa_faults_buffer_memory = NULL; INIT_LIST_HEAD(&p->numa_entry); p->numa_group = NULL; -- cgit v1.2.3-70-g09d2 From 7e2703e6099609adc93679c4d45cd6247f565971 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 27 Jan 2014 17:03:45 -0500 Subject: sched/numa: Normalize faults_cpu stats and weigh by CPU use Tracing the code that decides the active nodes has made it abundantly clear that the naive implementation of the faults_from code has issues. Specifically, the garbage collector in some workloads will access orders of magnitudes more memory than the threads that do all the active work. This resulted in the node with the garbage collector being marked the only active node in the group. This issue is avoided if we weigh the statistics by CPU use of each task in the numa group, instead of by how many faults each thread has occurred. To achieve this, we normalize the number of faults to the fraction of faults that occurred on each node, and then multiply that fraction by the fraction of CPU time the task has used since the last time task_numa_placement was invoked. This way the nodes in the active node mask will be the ones where the tasks from the numa group are most actively running, and the influence of eg. the garbage collector and other do-little threads is properly minimized. On a 4 node system, using CPU use statistics calculated over a longer interval results in about 1% fewer page migrations with two 32-warehouse specjbb runs on a 4 node system, and about 5% fewer page migrations, as well as 1% better throughput, with two 8-warehouse specjbb runs, as compared with the shorter term statistics kept by the scheduler. Signed-off-by: Rik van Riel Acked-by: Mel Gorman Signed-off-by: Peter Zijlstra Cc: Chegu Vinod Link: http://lkml.kernel.org/r/1390860228-21539-7-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 ++ kernel/sched/core.c | 2 ++ kernel/sched/fair.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 55 insertions(+), 2 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/include/linux/sched.h b/include/linux/sched.h index 5ab3b89fc33..ef92953764f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1459,6 +1459,8 @@ struct task_struct { int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; /* migration stamp */ + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bc708c53bf0..a561c9e8e38 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1746,6 +1746,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->numa_work.next = &p->numa_work; p->numa_faults_memory = NULL; p->numa_faults_buffer_memory = NULL; + p->last_task_numa_placement = 0; + p->last_sum_exec_runtime = 0; INIT_LIST_HEAD(&p->numa_entry); p->numa_group = NULL; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index eeabb33f349..8fc3a823481 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -887,6 +887,11 @@ struct numa_group { struct rcu_head rcu; nodemask_t active_nodes; unsigned long total_faults; + /* + * Faults_cpu is used to decide whether memory should move + * towards the CPU. As a consequence, these stats are weighted + * more by CPU use than by memory faults. + */ unsigned long *faults_cpu; unsigned long faults[0]; }; @@ -1446,11 +1451,41 @@ static void update_task_scan_period(struct task_struct *p, memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); } +/* + * Get the fraction of time the task has been running since the last + * NUMA placement cycle. The scheduler keeps similar statistics, but + * decays those on a 32ms period, which is orders of magnitude off + * from the dozens-of-seconds NUMA balancing period. Use the scheduler + * stats only if the task is so new there are no NUMA statistics yet. + */ +static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) +{ + u64 runtime, delta, now; + /* Use the start of this time slice to avoid calculations. */ + now = p->se.exec_start; + runtime = p->se.sum_exec_runtime; + + if (p->last_task_numa_placement) { + delta = runtime - p->last_sum_exec_runtime; + *period = now - p->last_task_numa_placement; + } else { + delta = p->se.avg.runnable_avg_sum; + *period = p->se.avg.runnable_avg_period; + } + + p->last_sum_exec_runtime = runtime; + p->last_task_numa_placement = now; + + return delta; +} + static void task_numa_placement(struct task_struct *p) { int seq, nid, max_nid = -1, max_group_nid = -1; unsigned long max_faults = 0, max_group_faults = 0; unsigned long fault_types[2] = { 0, 0 }; + unsigned long total_faults; + u64 runtime, period; spinlock_t *group_lock = NULL; seq = ACCESS_ONCE(p->mm->numa_scan_seq); @@ -1459,6 +1494,10 @@ static void task_numa_placement(struct task_struct *p) p->numa_scan_seq = seq; p->numa_scan_period_max = task_scan_max(p); + total_faults = p->numa_faults_locality[0] + + p->numa_faults_locality[1]; + runtime = numa_get_avg_runtime(p, &period); + /* If the task is part of a group prevent parallel updates to group stats */ if (p->numa_group) { group_lock = &p->numa_group->lock; @@ -1471,7 +1510,7 @@ static void task_numa_placement(struct task_struct *p) int priv, i; for (priv = 0; priv < 2; priv++) { - long diff, f_diff; + long diff, f_diff, f_weight; i = task_faults_idx(nid, priv); diff = -p->numa_faults_memory[i]; @@ -1483,8 +1522,18 @@ static void task_numa_placement(struct task_struct *p) fault_types[priv] += p->numa_faults_buffer_memory[i]; p->numa_faults_buffer_memory[i] = 0; + /* + * Normalize the faults_from, so all tasks in a group + * count according to CPU use, instead of by the raw + * number of faults. Tasks with little runtime have + * little over-all impact on throughput, and thus their + * faults are less important. + */ + f_weight = div64_u64(runtime << 16, period + 1); + f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) / + (total_faults + 1); p->numa_faults_cpu[i] >>= 1; - p->numa_faults_cpu[i] += p->numa_faults_buffer_cpu[i]; + p->numa_faults_cpu[i] += f_weight; p->numa_faults_buffer_cpu[i] = 0; faults += p->numa_faults_memory[i]; -- cgit v1.2.3-70-g09d2 From 073219e995b4a3f8cf1ce8228b7ef440b6994ac0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: clean up cgroup_subsys names and initialization cgroup_subsys is a bit messier than it needs to be. * The name of a subsys can be different from its internal identifier defined in cgroup_subsys.h. Most subsystems use the matching name but three - cpu, memory and perf_event - use different ones. * cgroup_subsys_id enums are postfixed with _subsys_id and each cgroup_subsys is postfixed with _subsys. cgroup.h is widely included throughout various subsystems, it doesn't and shouldn't have claim on such generic names which don't have any qualifier indicating that they belong to cgroup. * cgroup_subsys->subsys_id should always equal the matching cgroup_subsys_id enum; however, we require each controller to initialize it and then BUG if they don't match, which is a bit silly. This patch cleans up cgroup_subsys names and initialization by doing the followings. * cgroup_subsys_id enums are now postfixed with _cgrp_id, and each cgroup_subsys with _cgrp_subsys. * With the above, renaming subsys identifiers to match the userland visible names doesn't cause any naming conflicts. All non-matching identifiers are renamed to match the official names. cpu_cgroup -> cpu mem_cgroup -> memory perf -> perf_event * controllers no longer need to initialize ->subsys_id and ->name. They're generated in cgroup core and set automatically during boot. * Redundant cgroup_subsys declarations removed. * While updating BUG_ON()s in cgroup_init_early(), convert them to WARN()s. BUGging that early during boot is stupid - the kernel can't print anything, even through serial console and the trap handler doesn't even link stack frame properly for back-tracing. This patch doesn't introduce any behavior changes. v2: Rebased on top of fe1217c4f3f7 ("net: net_cls: move cgroupfs classid handling into core"). Signed-off-by: Tejun Heo Acked-by: Neil Horman Acked-by: "David S. Miller" Acked-by: "Rafael J. Wysocki" Acked-by: Michal Hocko Acked-by: Peter Zijlstra Acked-by: Aristeu Rozanski Acked-by: Ingo Molnar Acked-by: Li Zefan Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Serge E. Hallyn Cc: Vivek Goyal Cc: Thomas Graf --- block/blk-cgroup.c | 8 +++----- block/blk-cgroup.h | 2 +- fs/bio.c | 2 +- include/linux/cgroup.h | 7 ++++--- include/linux/cgroup_subsys.h | 6 +++--- include/linux/hugetlb_cgroup.h | 2 +- include/linux/memcontrol.h | 2 +- include/net/cls_cgroup.h | 2 +- include/net/netprio_cgroup.h | 2 +- kernel/cgroup.c | 34 ++++++++++++++++++++-------------- kernel/cgroup_freezer.c | 8 ++------ kernel/cpuset.c | 10 ++++------ kernel/events/core.c | 8 +++----- kernel/sched/core.c | 6 ++---- kernel/sched/cpuacct.c | 6 ++---- mm/hugetlb_cgroup.c | 9 +++------ mm/memcontrol.c | 22 ++++++++++------------ net/core/netclassid_cgroup.c | 6 ++---- net/core/netprio_cgroup.c | 4 +--- net/ipv4/tcp_memcontrol.c | 2 +- security/device_cgroup.c | 8 ++------ 21 files changed, 68 insertions(+), 88 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 660d419918a..1cef07cf9c2 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -906,16 +906,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css, return ret; } -struct cgroup_subsys blkio_subsys = { - .name = "blkio", +struct cgroup_subsys blkio_cgrp_subsys = { .css_alloc = blkcg_css_alloc, .css_offline = blkcg_css_offline, .css_free = blkcg_css_free, .can_attach = blkcg_can_attach, - .subsys_id = blkio_subsys_id, .base_cftypes = blkcg_files, }; -EXPORT_SYMBOL_GPL(blkio_subsys); +EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); /** * blkcg_activate_policy - activate a blkcg policy on a request_queue @@ -1105,7 +1103,7 @@ int blkcg_policy_register(struct blkcg_policy *pol) /* everything is in place, add intf files for the new policy */ if (pol->cftypes) - WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); + WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes)); ret = 0; out_unlock: mutex_unlock(&blkcg_pol_mutex); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 86154eab952..453b528c8e1 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) static inline struct blkcg *task_blkcg(struct task_struct *tsk) { - return css_to_blkcg(task_css(tsk, blkio_subsys_id)); + return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); } static inline struct blkcg *bio_blkcg(struct bio *bio) diff --git a/fs/bio.c b/fs/bio.c index 75c49a38223..4872102b839 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1965,7 +1965,7 @@ int bio_associate_current(struct bio *bio) /* associate blkcg if exists */ rcu_read_lock(); - css = task_css(current, blkio_subsys_id); + css = task_css(current, blkio_cgrp_id); if (css && css_tryget(css)) bio->bi_css = css; rcu_read_unlock(); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index d842a737d44..cd6611e622f 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -41,7 +41,7 @@ extern int cgroupstats_build(struct cgroupstats *stats, extern int proc_cgroup_show(struct seq_file *, void *); /* define the enumeration of all cgroup subsystems */ -#define SUBSYS(_x) _x ## _subsys_id, +#define SUBSYS(_x) _x ## _cgrp_id, enum cgroup_subsys_id { #include CGROUP_SUBSYS_COUNT, @@ -573,7 +573,6 @@ struct cgroup_subsys { struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); - int subsys_id; int disabled; int early_init; @@ -592,6 +591,8 @@ struct cgroup_subsys { bool broken_hierarchy; bool warned_broken_hierarchy; + /* the following two fields are initialized automtically during boot */ + int subsys_id; #define MAX_CGROUP_TYPE_NAMELEN 32 const char *name; @@ -606,7 +607,7 @@ struct cgroup_subsys { struct cftype_set base_cftset; }; -#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; +#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; #include #undef SUBSYS diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 11c42f6a25a..768fe44e19f 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -12,7 +12,7 @@ SUBSYS(debug) #endif #if IS_ENABLED(CONFIG_CGROUP_SCHED) -SUBSYS(cpu_cgroup) +SUBSYS(cpu) #endif #if IS_ENABLED(CONFIG_CGROUP_CPUACCT) @@ -20,7 +20,7 @@ SUBSYS(cpuacct) #endif #if IS_ENABLED(CONFIG_MEMCG) -SUBSYS(mem_cgroup) +SUBSYS(memory) #endif #if IS_ENABLED(CONFIG_CGROUP_DEVICE) @@ -40,7 +40,7 @@ SUBSYS(blkio) #endif #if IS_ENABLED(CONFIG_CGROUP_PERF) -SUBSYS(perf) +SUBSYS(perf_event) #endif #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 787bba3bf55..0129f89cf98 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -49,7 +49,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) static inline bool hugetlb_cgroup_disabled(void) { - if (hugetlb_subsys.disabled) + if (hugetlb_cgrp_subsys.disabled) return true; return false; } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index abd0113b662..eccfb4a4b37 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -162,7 +162,7 @@ extern int do_swap_account; static inline bool mem_cgroup_disabled(void) { - if (mem_cgroup_subsys.disabled) + if (memory_cgrp_subsys.disabled) return true; return false; } diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 9cf2d5ef38d..c15d39456e1 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -34,7 +34,7 @@ static inline u32 task_cls_classid(struct task_struct *p) return 0; rcu_read_lock(); - classid = container_of(task_css(p, net_cls_subsys_id), + classid = container_of(task_css(p, net_cls_cgrp_id), struct cgroup_cls_state, css)->classid; rcu_read_unlock(); diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h index b7ff5bd3c3c..f2a9597ff53 100644 --- a/include/net/netprio_cgroup.h +++ b/include/net/netprio_cgroup.h @@ -33,7 +33,7 @@ static inline u32 task_netprioidx(struct task_struct *p) u32 idx; rcu_read_lock(); - css = task_css(p, net_prio_subsys_id); + css = task_css(p, net_prio_cgrp_id); idx = css->cgroup->id; rcu_read_unlock(); return idx; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ccb16b47e29..fe3f7253aa9 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -120,10 +120,18 @@ static struct workqueue_struct *cgroup_destroy_wq; static struct workqueue_struct *cgroup_pidlist_destroy_wq; /* generate an array of cgroup subsystem pointers */ -#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, +#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, static struct cgroup_subsys *cgroup_subsys[] = { #include }; +#undef SUBSYS + +/* array of cgroup subsystem names */ +#define SUBSYS(_x) [_x ## _cgrp_id] = #_x, +static const char *cgroup_subsys_name[] = { +#include +}; +#undef SUBSYS /* * The dummy hierarchy, reserved for the subsystems that are otherwise @@ -1076,7 +1084,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) BUG_ON(!mutex_is_locked(&cgroup_mutex)); #ifdef CONFIG_CPUSETS - mask = ~(1UL << cpuset_subsys_id); + mask = ~(1UL << cpuset_cgrp_id); #endif memset(opts, 0, sizeof(*opts)); @@ -4528,15 +4536,15 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); for_each_subsys(ss, i) { - BUG_ON(!ss->name); - BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); - BUG_ON(!ss->css_alloc); - BUG_ON(!ss->css_free); - if (ss->subsys_id != i) { - printk(KERN_ERR "cgroup: Subsys %s id == %d\n", - ss->name, ss->subsys_id); - BUG(); - } + WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->subsys_id, + "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", + i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, + ss->subsys_id, ss->name); + WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, + "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); + + ss->subsys_id = i; + ss->name = cgroup_subsys_name[i]; if (ss->early_init) cgroup_init_subsys(ss); @@ -5167,11 +5175,9 @@ static struct cftype debug_files[] = { { } /* terminate */ }; -struct cgroup_subsys debug_subsys = { - .name = "debug", +struct cgroup_subsys debug_cgrp_subsys = { .css_alloc = debug_css_alloc, .css_free = debug_css_free, - .subsys_id = debug_subsys_id, .base_cftypes = debug_files, }; #endif /* CONFIG_CGROUP_DEBUG */ diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 6c3154e477f..98ea26a9907 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) static inline struct freezer *task_freezer(struct task_struct *task) { - return css_freezer(task_css(task, freezer_subsys_id)); + return css_freezer(task_css(task, freezer_cgrp_id)); } static struct freezer *parent_freezer(struct freezer *freezer) @@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state) return "THAWED"; }; -struct cgroup_subsys freezer_subsys; - static struct cgroup_subsys_state * freezer_css_alloc(struct cgroup_subsys_state *parent_css) { @@ -473,13 +471,11 @@ static struct cftype files[] = { { } /* terminate */ }; -struct cgroup_subsys freezer_subsys = { - .name = "freezer", +struct cgroup_subsys freezer_cgrp_subsys = { .css_alloc = freezer_css_alloc, .css_online = freezer_css_online, .css_offline = freezer_css_offline, .css_free = freezer_css_free, - .subsys_id = freezer_subsys_id, .attach = freezer_attach, .fork = freezer_fork, .base_cftypes = files, diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f..2d018c795fe 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) /* Retrieve the cpuset for a task */ static inline struct cpuset *task_cs(struct task_struct *task) { - return css_cs(task_css(task, cpuset_subsys_id)); + return css_cs(task_css(task, cpuset_cgrp_id)); } static inline struct cpuset *parent_cs(struct cpuset *cs) @@ -1521,7 +1521,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, struct task_struct *task; struct task_struct *leader = cgroup_taskset_first(tset); struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset, - cpuset_subsys_id); + cpuset_cgrp_id); struct cpuset *cs = css_cs(css); struct cpuset *oldcs = css_cs(oldcss); struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); @@ -2024,8 +2024,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) kfree(cs); } -struct cgroup_subsys cpuset_subsys = { - .name = "cpuset", +struct cgroup_subsys cpuset_cgrp_subsys = { .css_alloc = cpuset_css_alloc, .css_online = cpuset_css_online, .css_offline = cpuset_css_offline, @@ -2033,7 +2032,6 @@ struct cgroup_subsys cpuset_subsys = { .can_attach = cpuset_can_attach, .cancel_attach = cpuset_cancel_attach, .attach = cpuset_attach, - .subsys_id = cpuset_subsys_id, .base_cftypes = files, .early_init = 1, }; @@ -2699,7 +2697,7 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) goto out_free; rcu_read_lock(); - css = task_css(tsk, cpuset_subsys_id); + css = task_css(tsk, cpuset_cgrp_id); retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); rcu_read_unlock(); if (retval < 0) diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd..64903731d83 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -342,7 +342,7 @@ struct perf_cgroup { static inline struct perf_cgroup * perf_cgroup_from_task(struct task_struct *task) { - return container_of(task_css(task, perf_subsys_id), + return container_of(task_css(task, perf_event_cgrp_id), struct perf_cgroup, css); } @@ -595,7 +595,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, rcu_read_lock(); - css = css_from_dir(f.file->f_dentry, &perf_subsys); + css = css_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); if (IS_ERR(css)) { ret = PTR_ERR(css); goto out; @@ -8055,9 +8055,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css, task_function_call(task, __perf_cgroup_move, task); } -struct cgroup_subsys perf_subsys = { - .name = "perf_event", - .subsys_id = perf_subsys_id, +struct cgroup_subsys perf_event_cgrp_subsys = { .css_alloc = perf_cgroup_css_alloc, .css_free = perf_cgroup_css_free, .exit = perf_cgroup_exit, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aa..d4cfc556183 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7176,7 +7176,7 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); - tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, + tg = container_of(task_css_check(tsk, cpu_cgrp_id, lockdep_is_held(&tsk->sighand->siglock)), struct task_group, css); tg = autogroup_task_group(tsk, tg); @@ -7957,8 +7957,7 @@ static struct cftype cpu_files[] = { { } /* terminate */ }; -struct cgroup_subsys cpu_cgroup_subsys = { - .name = "cpu", +struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, .css_free = cpu_cgroup_css_free, .css_online = cpu_cgroup_css_online, @@ -7966,7 +7965,6 @@ struct cgroup_subsys cpu_cgroup_subsys = { .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, .exit = cpu_cgroup_exit, - .subsys_id = cpu_cgroup_subsys_id, .base_cftypes = cpu_files, .early_init = 1, }; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 622e0818f90..c143ee380e3 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) /* return cpu accounting group to which this task belongs */ static inline struct cpuacct *task_ca(struct task_struct *tsk) { - return css_ca(task_css(tsk, cpuacct_subsys_id)); + return css_ca(task_css(tsk, cpuacct_cgrp_id)); } static inline struct cpuacct *parent_ca(struct cpuacct *ca) @@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val) rcu_read_unlock(); } -struct cgroup_subsys cpuacct_subsys = { - .name = "cpuacct", +struct cgroup_subsys cpuacct_cgrp_subsys = { .css_alloc = cpuacct_css_alloc, .css_free = cpuacct_css_free, - .subsys_id = cpuacct_subsys_id, .base_cftypes = files, .early_init = 1, }; diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index cb00829bb46..b135853e68f 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -30,7 +30,6 @@ struct hugetlb_cgroup { #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff) -struct cgroup_subsys hugetlb_subsys __read_mostly; static struct hugetlb_cgroup *root_h_cgroup __read_mostly; static inline @@ -42,7 +41,7 @@ struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) static inline struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) { - return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id)); + return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id)); } static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) @@ -358,7 +357,7 @@ static void __init __hugetlb_cgroup_file_init(int idx) cft = &h->cgroup_files[4]; memset(cft, 0, sizeof(*cft)); - WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files)); + WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files)); return; } @@ -402,10 +401,8 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) return; } -struct cgroup_subsys hugetlb_subsys = { - .name = "hugetlb", +struct cgroup_subsys hugetlb_cgrp_subsys = { .css_alloc = hugetlb_cgroup_css_alloc, .css_offline = hugetlb_cgroup_css_offline, .css_free = hugetlb_cgroup_css_free, - .subsys_id = hugetlb_subsys_id, }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 53385cd4e6f..04a97bce227 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -66,8 +66,8 @@ #include -struct cgroup_subsys mem_cgroup_subsys __read_mostly; -EXPORT_SYMBOL(mem_cgroup_subsys); +struct cgroup_subsys memory_cgrp_subsys __read_mostly; +EXPORT_SYMBOL(memory_cgrp_subsys); #define MEM_CGROUP_RECLAIM_RETRIES 5 static struct mem_cgroup *root_mem_cgroup __read_mostly; @@ -538,7 +538,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) { struct cgroup_subsys_state *css; - css = css_from_id(id - 1, &mem_cgroup_subsys); + css = css_from_id(id - 1, &memory_cgrp_subsys); return mem_cgroup_from_css(css); } @@ -1072,7 +1072,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) if (unlikely(!p)) return NULL; - return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id)); + return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); } struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) @@ -1702,7 +1702,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) rcu_read_lock(); mem_cgrp = memcg->css.cgroup; - task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); + task_cgrp = task_cgroup(p, memory_cgrp_id); ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); if (ret < 0) { @@ -6187,7 +6187,7 @@ static int memcg_write_event_control(struct cgroup_subsys_state *css, ret = -EINVAL; cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, - &mem_cgroup_subsys); + &memory_cgrp_subsys); if (cfile_css == css && css_tryget(css)) ret = 0; @@ -6566,11 +6566,11 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) * unfortunate state in our controller. */ if (parent != root_mem_cgroup) - mem_cgroup_subsys.broken_hierarchy = true; + memory_cgrp_subsys.broken_hierarchy = true; } mutex_unlock(&memcg_create_mutex); - return memcg_init_kmem(memcg, &mem_cgroup_subsys); + return memcg_init_kmem(memcg, &memory_cgrp_subsys); } /* @@ -7264,9 +7264,7 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) mem_cgroup_from_css(root_css)->use_hierarchy = true; } -struct cgroup_subsys mem_cgroup_subsys = { - .name = "memory", - .subsys_id = mem_cgroup_subsys_id, +struct cgroup_subsys memory_cgrp_subsys = { .css_alloc = mem_cgroup_css_alloc, .css_online = mem_cgroup_css_online, .css_offline = mem_cgroup_css_offline, @@ -7292,7 +7290,7 @@ __setup("swapaccount=", enable_swap_account); static void __init memsw_file_init(void) { - WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files)); + WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files)); } static void __init enable_swap_cgroup(void) diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 9e5ad5d74e6..b865662fba7 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -23,7 +23,7 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state struct cgroup_cls_state *task_cls_state(struct task_struct *p) { - return css_cls_state(task_css(p, net_cls_subsys_id)); + return css_cls_state(task_css(p, net_cls_cgrp_id)); } EXPORT_SYMBOL_GPL(task_cls_state); @@ -102,12 +102,10 @@ static struct cftype ss_files[] = { { } /* terminate */ }; -struct cgroup_subsys net_cls_subsys = { - .name = "net_cls", +struct cgroup_subsys net_cls_cgrp_subsys = { .css_alloc = cgrp_css_alloc, .css_online = cgrp_css_online, .css_free = cgrp_css_free, .attach = cgrp_attach, - .subsys_id = net_cls_subsys_id, .base_cftypes = ss_files, }; diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 857e1603f9b..d7d23e28faf 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -244,13 +244,11 @@ static struct cftype ss_files[] = { { } /* terminate */ }; -struct cgroup_subsys net_prio_subsys = { - .name = "net_prio", +struct cgroup_subsys net_prio_cgrp_subsys = { .css_alloc = cgrp_css_alloc, .css_online = cgrp_css_online, .css_free = cgrp_css_free, .attach = net_prio_attach, - .subsys_id = net_prio_subsys_id, .base_cftypes = ss_files, }; diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index f7e522c558b..20a0aca9131 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -219,7 +219,7 @@ static struct cftype tcp_files[] = { static int __init tcp_memcontrol_init(void) { - WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files)); + WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, tcp_files)); return 0; } __initcall(tcp_memcontrol_init); diff --git a/security/device_cgroup.c b/security/device_cgroup.c index d3b6d2cd3a0..7f88bcde7c6 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -58,11 +58,9 @@ static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) { - return css_to_devcgroup(task_css(task, devices_subsys_id)); + return css_to_devcgroup(task_css(task, devices_cgrp_id)); } -struct cgroup_subsys devices_subsys; - /* * called under devcgroup_mutex */ @@ -684,13 +682,11 @@ static struct cftype dev_cgroup_files[] = { { } /* terminate */ }; -struct cgroup_subsys devices_subsys = { - .name = "devices", +struct cgroup_subsys devices_cgrp_subsys = { .css_alloc = devcgroup_css_alloc, .css_free = devcgroup_css_free, .css_online = devcgroup_online, .css_offline = devcgroup_offline, - .subsys_id = devices_subsys_id, .base_cftypes = dev_cgroup_files, }; -- cgit v1.2.3-70-g09d2 From d0ea026808ad81de2af14938448419a95211b938 Mon Sep 17 00:00:00 2001 From: Dongsheng Yang Date: Mon, 27 Jan 2014 22:00:45 -0500 Subject: sched: Implement task_nice() as static inline function As patch "sched: Move the priority specific bits into a new header file" exposes the priority related macros in linux/sched/prio.h, we don't have to implement task_nice() in kernel/sched/core.c any more. This patch implements it in linux/sched/sched.h as static inline function, saving the kernel stack and enhancing performance a bit. Signed-off-by: Dongsheng Yang Cc: clark.williams@gmail.com Cc: rostedt@goodmis.org Cc: raistlin@linux.it Cc: juri.lelli@gmail.com Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1390878045-7096-1-git-send-email-yangds.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 11 ++++++++++- include/linux/sched/prio.h | 1 - kernel/sched/core.c | 26 +++++++------------------- kernel/sched/cputime.c | 4 ++-- 4 files changed, 19 insertions(+), 23 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/include/linux/sched.h b/include/linux/sched.h index d97d0a8e87d..e3d556427b2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2094,7 +2094,16 @@ static inline void sched_autogroup_exit(struct signal_struct *sig) { } extern bool yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); -extern int task_nice(const struct task_struct *p); +/** + * task_nice - return the nice value of a given task. + * @p: the task in question. + * + * Return: The nice value [ -20 ... 0 ... 19 ]. + */ +static inline int task_nice(const struct task_struct *p) +{ + return PRIO_TO_NICE((p)->static_prio); +} extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index 13216f16762..410ccb74c9e 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -27,7 +27,6 @@ */ #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) -#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) /* * 'User priority' is the nice value converted to something we diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 210a12acf2c..104c8164e04 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3000,7 +3000,7 @@ void set_user_nice(struct task_struct *p, long nice) unsigned long flags; struct rq *rq; - if (TASK_NICE(p) == nice || nice < -20 || nice > 19) + if (task_nice(p) == nice || nice < -20 || nice > 19) return; /* * We have to be careful, if called from sys_setpriority(), @@ -3078,7 +3078,7 @@ SYSCALL_DEFINE1(nice, int, increment) if (increment > 40) increment = 40; - nice = TASK_NICE(current) + increment; + nice = task_nice(current) + increment; if (nice < -20) nice = -20; if (nice > 19) @@ -3110,18 +3110,6 @@ int task_prio(const struct task_struct *p) return p->prio - MAX_RT_PRIO; } -/** - * task_nice - return the nice value of a given task. - * @p: the task in question. - * - * Return: The nice value [ -20 ... 0 ... 19 ]. - */ -int task_nice(const struct task_struct *p) -{ - return TASK_NICE(p); -} -EXPORT_SYMBOL(task_nice); - /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. @@ -3321,7 +3309,7 @@ recheck: */ if (user && !capable(CAP_SYS_NICE)) { if (fair_policy(policy)) { - if (attr->sched_nice < TASK_NICE(p) && + if (attr->sched_nice < task_nice(p) && !can_nice(p, attr->sched_nice)) return -EPERM; } @@ -3345,7 +3333,7 @@ recheck: * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. */ if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { - if (!can_nice(p, TASK_NICE(p))) + if (!can_nice(p, task_nice(p))) return -EPERM; } @@ -3385,7 +3373,7 @@ recheck: * If not changing anything there's no need to proceed further: */ if (unlikely(policy == p->policy)) { - if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p)) + if (fair_policy(policy) && attr->sched_nice != task_nice(p)) goto change; if (rt_policy(policy) && attr->sched_priority != p->rt_priority) goto change; @@ -3837,7 +3825,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, else if (task_has_rt_policy(p)) attr.sched_priority = p->rt_priority; else - attr.sched_nice = TASK_NICE(p); + attr.sched_nice = task_nice(p); rcu_read_unlock(); @@ -7010,7 +6998,7 @@ void normalize_rt_tasks(void) * Renice negative nice level userspace * tasks back to 0: */ - if (TASK_NICE(p) < 0 && p->mm) + if (task_nice(p) < 0 && p->mm) set_user_nice(p, 0); continue; } diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 99947919e30..58624a65f12 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -142,7 +142,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime, p->utimescaled += cputime_scaled; account_group_user_time(p, cputime); - index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; + index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; /* Add user time to cpustat. */ task_group_account_field(p, index, (__force u64) cputime); @@ -169,7 +169,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, p->gtime += cputime; /* Add guest time to cpustat. */ - if (TASK_NICE(p) > 0) { + if (task_nice(p) > 0) { cpustat[CPUTIME_NICE] += (__force u64) cputime; cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; } else { -- cgit v1.2.3-70-g09d2 From b4f2ab43615e5b36c48fffa99f26aca381839ac6 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Fri, 17 Jan 2014 10:04:01 +0100 Subject: sched: Remove 'cpu' parameter from idle_balance() The cpu parameter passed to idle_balance() is not needed as it could be retrieved from 'struct rq.' Signed-off-by: Daniel Lezcano Cc: alex.shi@linaro.org Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1389949444-14821-1-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 3 ++- kernel/sched/sched.h | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 104c8164e04..74dd565c2e1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2705,7 +2705,7 @@ need_resched: pre_schedule(rq, prev); if (unlikely(!rq->nr_running)) - idle_balance(cpu, rq); + idle_balance(rq); put_prev_task(rq, prev); next = pick_next_task(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4caa8030824..428bc9d2c38 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6531,12 +6531,13 @@ out: * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -void idle_balance(int this_cpu, struct rq *this_rq) +void idle_balance(struct rq *this_rq) { struct sched_domain *sd; int pulled_task = 0; unsigned long next_balance = jiffies + HZ; u64 curr_cost = 0; + int this_cpu = this_rq->cpu; this_rq->idle_stamp = rq_clock(this_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b44720d38ae..82c0e02f2a5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1158,7 +1158,7 @@ extern const struct sched_class idle_sched_class; extern void update_group_power(struct sched_domain *sd, int cpu); extern void trigger_load_balance(struct rq *rq); -extern void idle_balance(int this_cpu, struct rq *this_rq); +extern void idle_balance(struct rq *this_rq); extern void idle_enter_fair(struct rq *this_rq); extern void idle_exit_fair(struct rq *this_rq); -- cgit v1.2.3-70-g09d2 From 3c4017c13f91069194fce3160944efec50f15a6e Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Fri, 17 Jan 2014 10:04:03 +0100 Subject: sched: Move rq->idle_stamp up to the core idle_balance() modifies the rq->idle_stamp field, making this information shared across core.c and fair.c. As we know if the cpu is going to idle or not with the previous patch, let's encapsulate the rq->idle_stamp information in core.c by moving it up to the caller. The idle_balance() function returns true in case a balancing occured and the cpu won't be idle, false if no balance happened and the cpu is going idle. Signed-off-by: Daniel Lezcano Cc: alex.shi@linaro.org Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1389949444-14821-3-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 11 +++++++++-- kernel/sched/fair.c | 14 ++++++-------- kernel/sched/sched.h | 2 +- 3 files changed, 16 insertions(+), 11 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 74dd565c2e1..417cf657a60 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2704,8 +2704,15 @@ need_resched: pre_schedule(rq, prev); - if (unlikely(!rq->nr_running)) - idle_balance(rq); + if (unlikely(!rq->nr_running)) { + /* + * We must set idle_stamp _before_ calling idle_balance(), such + * that we measure the duration of idle_balance() as idle time. + */ + rq->idle_stamp = rq_clock(rq); + if (idle_balance(rq)) + rq->idle_stamp = 0; + } put_prev_task(rq, prev); next = pick_next_task(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5ebc6817c03..04fea7744a9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6531,7 +6531,7 @@ out: * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -void idle_balance(struct rq *this_rq) +int idle_balance(struct rq *this_rq) { struct sched_domain *sd; int pulled_task = 0; @@ -6539,10 +6539,8 @@ void idle_balance(struct rq *this_rq) u64 curr_cost = 0; int this_cpu = this_rq->cpu; - this_rq->idle_stamp = rq_clock(this_rq); - if (this_rq->avg_idle < sysctl_sched_migration_cost) - return; + return 0; /* * Drop the rq->lock, but keep IRQ/preempt disabled. @@ -6580,10 +6578,8 @@ void idle_balance(struct rq *this_rq) interval = msecs_to_jiffies(sd->balance_interval); if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; - if (pulled_task) { - this_rq->idle_stamp = 0; + if (pulled_task) break; - } } rcu_read_unlock(); @@ -6594,7 +6590,7 @@ void idle_balance(struct rq *this_rq) * A task could have be enqueued in the meantime */ if (this_rq->nr_running && !pulled_task) - return; + return 1; if (pulled_task || time_after(jiffies, this_rq->next_balance)) { /* @@ -6606,6 +6602,8 @@ void idle_balance(struct rq *this_rq) if (curr_cost > this_rq->max_idle_balance_cost) this_rq->max_idle_balance_cost = curr_cost; + + return pulled_task; } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 82c0e02f2a5..bb89991ee40 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1158,7 +1158,7 @@ extern const struct sched_class idle_sched_class; extern void update_group_power(struct sched_domain *sd, int cpu); extern void trigger_load_balance(struct rq *rq); -extern void idle_balance(struct rq *this_rq); +extern int idle_balance(struct rq *this_rq); extern void idle_enter_fair(struct rq *this_rq); extern void idle_exit_fair(struct rq *this_rq); -- cgit v1.2.3-70-g09d2 From 606dba2e289446600a0b68422ed2019af5355c12 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 11 Feb 2012 06:05:00 +0100 Subject: sched: Push put_prev_task() into pick_next_task() In order to avoid having to do put/set on a whole cgroup hierarchy when we context switch, push the put into pick_next_task() so that both operations are in the same function. Further changes then allow us to possibly optimize away redundant work. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328936700.2476.17.camel@laptop Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 21 ++++++++------------- kernel/sched/deadline.c | 5 ++++- kernel/sched/fair.c | 6 +++++- kernel/sched/idle_task.c | 6 +++++- kernel/sched/rt.c | 27 ++++++++++++++++----------- kernel/sched/sched.h | 8 +++++++- kernel/sched/stop_task.c | 16 ++++++++++------ 7 files changed, 55 insertions(+), 34 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 417cf657a60..dedb5f07666 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2579,18 +2579,11 @@ static inline void schedule_debug(struct task_struct *prev) schedstat_inc(this_rq(), sched_count); } -static void put_prev_task(struct rq *rq, struct task_struct *prev) -{ - if (prev->on_rq || rq->skip_clock_update < 0) - update_rq_clock(rq); - prev->sched_class->put_prev_task(rq, prev); -} - /* * Pick up the highest-prio task: */ static inline struct task_struct * -pick_next_task(struct rq *rq) +pick_next_task(struct rq *rq, struct task_struct *prev) { const struct sched_class *class; struct task_struct *p; @@ -2600,13 +2593,13 @@ pick_next_task(struct rq *rq) * the fair class we can call that function directly: */ if (likely(rq->nr_running == rq->cfs.h_nr_running)) { - p = fair_sched_class.pick_next_task(rq); + p = fair_sched_class.pick_next_task(rq, prev); if (likely(p)) return p; } for_each_class(class) { - p = class->pick_next_task(rq); + p = class->pick_next_task(rq, prev); if (p) return p; } @@ -2714,8 +2707,10 @@ need_resched: rq->idle_stamp = 0; } - put_prev_task(rq, prev); - next = pick_next_task(rq); + if (prev->on_rq || rq->skip_clock_update < 0) + update_rq_clock(rq); + + next = pick_next_task(rq, prev); clear_tsk_need_resched(prev); clear_preempt_need_resched(); rq->skip_clock_update = 0; @@ -4748,7 +4743,7 @@ static void migrate_tasks(unsigned int dead_cpu) if (rq->nr_running == 1) break; - next = pick_next_task(rq); + next = pick_next_task(rq, NULL); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b5700bceee5..50797d57608 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -990,7 +990,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, return rb_entry(left, struct sched_dl_entity, rb_node); } -struct task_struct *pick_next_task_dl(struct rq *rq) +struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) { struct sched_dl_entity *dl_se; struct task_struct *p; @@ -1001,6 +1001,9 @@ struct task_struct *pick_next_task_dl(struct rq *rq) if (unlikely(!dl_rq->dl_nr_running)) return NULL; + if (prev) + prev->sched_class->put_prev_task(rq, prev); + dl_se = pick_next_dl_entity(rq, dl_rq); BUG_ON(!dl_se); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 748a7ac3388..c4bb0ac26a7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4655,7 +4655,8 @@ preempt: set_last_buddy(se); } -static struct task_struct *pick_next_task_fair(struct rq *rq) +static struct task_struct * +pick_next_task_fair(struct rq *rq, struct task_struct *prev) { struct task_struct *p; struct cfs_rq *cfs_rq = &rq->cfs; @@ -4664,6 +4665,9 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) if (!cfs_rq->nr_running) return NULL; + if (prev) + prev->sched_class->put_prev_task(rq, prev); + do { se = pick_next_entity(cfs_rq); set_next_entity(cfs_rq, se); diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 516c3d9ceea..e5c922ac40c 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -33,8 +33,12 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl resched_task(rq->idle); } -static struct task_struct *pick_next_task_idle(struct rq *rq) +static struct task_struct * +pick_next_task_idle(struct rq *rq, struct task_struct *prev) { + if (prev) + prev->sched_class->put_prev_task(rq, prev); + schedstat_inc(rq, sched_goidle); #ifdef CONFIG_SMP /* Trigger the post schedule to do an idle_enter for CFS */ diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b4..a15ca1c0c7b 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1310,15 +1310,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) { struct sched_rt_entity *rt_se; struct task_struct *p; - struct rt_rq *rt_rq; - - rt_rq = &rq->rt; - - if (!rt_rq->rt_nr_running) - return NULL; - - if (rt_rq_throttled(rt_rq)) - return NULL; + struct rt_rq *rt_rq = &rq->rt; do { rt_se = pick_next_rt_entity(rq, rt_rq); @@ -1332,9 +1324,22 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) return p; } -static struct task_struct *pick_next_task_rt(struct rq *rq) +static struct task_struct * +pick_next_task_rt(struct rq *rq, struct task_struct *prev) { - struct task_struct *p = _pick_next_task_rt(rq); + struct task_struct *p; + struct rt_rq *rt_rq = &rq->rt; + + if (!rt_rq->rt_nr_running) + return NULL; + + if (rt_rq_throttled(rt_rq)) + return NULL; + + if (prev) + prev->sched_class->put_prev_task(rq, prev); + + p = _pick_next_task_rt(rq); /* The running task is never eligible for pushing */ if (p) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bb89991ee40..c534cf4181a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1105,7 +1105,13 @@ struct sched_class { void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); - struct task_struct * (*pick_next_task) (struct rq *rq); + /* + * It is the responsibility of the pick_next_task() method that will + * return the next task to call put_prev_task() on the @prev task or + * something equivalent. + */ + struct task_struct * (*pick_next_task) (struct rq *rq, + struct task_struct *prev); void (*put_prev_task) (struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index fdb6bb0b335..a4147c9d201 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -23,16 +23,20 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) /* we're never preempted */ } -static struct task_struct *pick_next_task_stop(struct rq *rq) +static struct task_struct * +pick_next_task_stop(struct rq *rq, struct task_struct *prev) { struct task_struct *stop = rq->stop; - if (stop && stop->on_rq) { - stop->se.exec_start = rq_clock_task(rq); - return stop; - } + if (!stop || !stop->on_rq) + return NULL; - return NULL; + if (prev) + prev->sched_class->put_prev_task(rq, prev); + + stop->se.exec_start = rq_clock_task(rq); + + return stop; } static void -- cgit v1.2.3-70-g09d2 From 38033c37faab850ed5d33bb675c4de6c66be84d8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jan 2014 20:32:21 +0100 Subject: sched: Push down pre_schedule() and idle_balance() This patch both merged idle_balance() and pre_schedule() and pushes both of them into pick_next_task(). Conceptually pre_schedule() and idle_balance() are rather similar, both are used to pull more work onto the current CPU. We cannot however first move idle_balance() into pre_schedule_fair() since there is no guarantee the last runnable task is a fair task, and thus we would miss newidle balances. Similarly, the dl and rt pre_schedule calls must be ran before idle_balance() since their respective tasks have higher priority and it would not do to delay their execution searching for less important tasks first. However, by noticing that pick_next_tasks() already traverses the sched_class hierarchy in the right order, we can get the right behaviour and do away with both calls. We must however change the special case optimization to also require that prev is of sched_class_fair, otherwise we can miss doing a dl or rt pull where we needed one. Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/n/tip-a8k6vvaebtn64nie345kx1je@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 26 ++------------------------ kernel/sched/deadline.c | 15 +++++++-------- kernel/sched/fair.c | 26 ++++++++++++++++++++++---- kernel/sched/idle_task.c | 12 +++++------- kernel/sched/rt.c | 16 ++++++++-------- kernel/sched/sched.h | 1 - 6 files changed, 44 insertions(+), 52 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index dedb5f07666..3068f37f7c5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2169,13 +2169,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) #ifdef CONFIG_SMP -/* assumes rq->lock is held */ -static inline void pre_schedule(struct rq *rq, struct task_struct *prev) -{ - if (prev->sched_class->pre_schedule) - prev->sched_class->pre_schedule(rq, prev); -} - /* rq->lock is NOT held, but preemption is disabled */ static inline void post_schedule(struct rq *rq) { @@ -2193,10 +2186,6 @@ static inline void post_schedule(struct rq *rq) #else -static inline void pre_schedule(struct rq *rq, struct task_struct *p) -{ -} - static inline void post_schedule(struct rq *rq) { } @@ -2592,7 +2581,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev) * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ - if (likely(rq->nr_running == rq->cfs.h_nr_running)) { + if (likely(prev->sched_class == &fair_sched_class && + rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq, prev); if (likely(p)) return p; @@ -2695,18 +2685,6 @@ need_resched: switch_count = &prev->nvcsw; } - pre_schedule(rq, prev); - - if (unlikely(!rq->nr_running)) { - /* - * We must set idle_stamp _before_ calling idle_balance(), such - * that we measure the duration of idle_balance() as idle time. - */ - rq->idle_stamp = rq_clock(rq); - if (idle_balance(rq)) - rq->idle_stamp = 0; - } - if (prev->on_rq || rq->skip_clock_update < 0) update_rq_clock(rq); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 50797d57608..ed31ef66ab9 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -944,6 +944,8 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) resched_task(rq->curr); } +static int pull_dl_task(struct rq *this_rq); + #endif /* CONFIG_SMP */ /* @@ -998,6 +1000,11 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) dl_rq = &rq->dl; +#ifdef CONFIG_SMP + if (dl_task(prev)) + pull_dl_task(rq); +#endif + if (unlikely(!dl_rq->dl_nr_running)) return NULL; @@ -1429,13 +1436,6 @@ skip: return ret; } -static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) -{ - /* Try to pull other tasks here */ - if (dl_task(prev)) - pull_dl_task(rq); -} - static void post_schedule_dl(struct rq *rq) { push_dl_tasks(rq); @@ -1628,7 +1628,6 @@ const struct sched_class dl_sched_class = { .set_cpus_allowed = set_cpus_allowed_dl, .rq_online = rq_online_dl, .rq_offline = rq_offline_dl, - .pre_schedule = pre_schedule_dl, .post_schedule = post_schedule_dl, .task_woken = task_woken_dl, #endif diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a81b241ff70..43b49fe077a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2577,7 +2577,8 @@ void idle_exit_fair(struct rq *this_rq) update_rq_runnable_avg(this_rq, 0); } -#else +#else /* CONFIG_SMP */ + static inline void update_entity_load_avg(struct sched_entity *se, int update_cfs_rq) {} static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} @@ -2589,7 +2590,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, int sleep) {} static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) {} -#endif +#endif /* CONFIG_SMP */ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) { @@ -4682,9 +4683,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) struct sched_entity *se; struct task_struct *p; +again: __maybe_unused #ifdef CONFIG_FAIR_GROUP_SCHED if (!cfs_rq->nr_running) - return NULL; + goto idle; if (!prev || prev->sched_class != &fair_sched_class) goto simple; @@ -4760,7 +4762,7 @@ simple: #endif if (!cfs_rq->nr_running) - return NULL; + goto idle; if (prev) prev->sched_class->put_prev_task(rq, prev); @@ -4777,6 +4779,22 @@ simple: hrtick_start_fair(rq, p); return p; + +idle: +#ifdef CONFIG_SMP + idle_enter_fair(rq); + /* + * We must set idle_stamp _before_ calling idle_balance(), such that we + * measure the duration of idle_balance() as idle time. + */ + rq->idle_stamp = rq_clock(rq); + if (idle_balance(rq)) { /* drops rq->lock */ + rq->idle_stamp = 0; + goto again; + } +#endif + + return NULL; } /* diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 721371bf03b..f7d03af79a5 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -13,13 +13,8 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) { return task_cpu(p); /* IDLE tasks as never migrated */ } - -static void pre_schedule_idle(struct rq *rq, struct task_struct *prev) -{ - idle_exit_fair(rq); - rq_last_tick_reset(rq); -} #endif /* CONFIG_SMP */ + /* * Idle tasks are unconditionally rescheduled: */ @@ -56,6 +51,10 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { +#ifdef CONFIG_SMP + idle_exit_fair(rq); + rq_last_tick_reset(rq); +#endif } static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) @@ -99,7 +98,6 @@ const struct sched_class idle_sched_class = { #ifdef CONFIG_SMP .select_task_rq = select_task_rq_idle, - .pre_schedule = pre_schedule_idle, #endif .set_curr_task = set_curr_task_idle, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a15ca1c0c7b..72f9ec75997 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -229,6 +229,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) #ifdef CONFIG_SMP +static int pull_rt_task(struct rq *this_rq); + static inline int rt_overloaded(struct rq *rq) { return atomic_read(&rq->rd->rto_count); @@ -1330,6 +1332,12 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) struct task_struct *p; struct rt_rq *rt_rq = &rq->rt; +#ifdef CONFIG_SMP + /* Try to pull RT tasks here if we lower this rq's prio */ + if (rq->rt.highest_prio.curr > prev->prio) + pull_rt_task(rq); +#endif + if (!rt_rq->rt_nr_running) return NULL; @@ -1721,13 +1729,6 @@ skip: return ret; } -static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) -{ - /* Try to pull RT tasks here if we lower this rq's prio */ - if (rq->rt.highest_prio.curr > prev->prio) - pull_rt_task(rq); -} - static void post_schedule_rt(struct rq *rq) { push_rt_tasks(rq); @@ -2004,7 +2005,6 @@ const struct sched_class rt_sched_class = { .set_cpus_allowed = set_cpus_allowed_rt, .rq_online = rq_online_rt, .rq_offline = rq_offline_rt, - .pre_schedule = pre_schedule_rt, .post_schedule = post_schedule_rt, .task_woken = task_woken_rt, .switched_from = switched_from_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c534cf4181a..1bf34c257d3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1118,7 +1118,6 @@ struct sched_class { int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); void (*migrate_task_rq)(struct task_struct *p, int next_cpu); - void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*post_schedule) (struct rq *this_rq); void (*task_waking) (struct task_struct *task); void (*task_woken) (struct rq *this_rq, struct task_struct *task); -- cgit v1.2.3-70-g09d2 From 37e6bae8395a94b4dd934c92b02b9408be992365 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Thu, 23 Jan 2014 18:39:54 +0800 Subject: sched: Add statistic for newidle load balance cost Tracking rq->max_idle_balance_cost and sd->max_newidle_lb_cost. It's useful to know these values in debug mode. Signed-off-by: Alex Shi Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/52E0F3BF.5020904@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 9 ++++++--- kernel/sched/debug.c | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3068f37f7c5..fb9764fbc53 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4811,7 +4811,7 @@ set_table_entry(struct ctl_table *entry, static struct ctl_table * sd_alloc_ctl_domain_table(struct sched_domain *sd) { - struct ctl_table *table = sd_alloc_ctl_entry(13); + struct ctl_table *table = sd_alloc_ctl_entry(14); if (table == NULL) return NULL; @@ -4839,9 +4839,12 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax, false); - set_table_entry(&table[11], "name", sd->name, + set_table_entry(&table[11], "max_newidle_lb_cost", + &sd->max_newidle_lb_cost, + sizeof(long), 0644, proc_doulongvec_minmax, false); + set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false); - /* &table[12] is terminator */ + /* &table[13] is terminator */ return table; } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 31b908daaa1..f3344c31632 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -321,6 +321,7 @@ do { \ P(sched_goidle); #ifdef CONFIG_SMP P64(avg_idle); + P64(max_idle_balance_cost); #endif P(ttwu_count); -- cgit v1.2.3-70-g09d2 From 924f0d9a2078f49ff331bb43196ec5afadc16b8f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:41 -0500 Subject: cgroup: drop @skip_css from cgroup_taskset_for_each() If !NULL, @skip_css makes cgroup_taskset_for_each() skip the matching css. The intention of the interface is to make it easy to skip css's (cgroup_subsys_states) which already match the migration target; however, this is entirely unnecessary as migration taskset doesn't include tasks which are already in the target cgroup. Drop @skip_css from cgroup_taskset_for_each(). Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Daniel Borkmann --- block/blk-cgroup.c | 2 +- include/linux/cgroup.h | 8 ++------ kernel/cgroup_freezer.c | 2 +- kernel/cpuset.c | 4 ++-- kernel/events/core.c | 2 +- kernel/sched/core.c | 4 ++-- net/core/netclassid_cgroup.c | 2 +- net/core/netprio_cgroup.c | 2 +- 8 files changed, 11 insertions(+), 15 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1cef07cf9c2..4aefd46d7d9 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -894,7 +894,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css, int ret = 0; /* task_lock() is needed to avoid races with exit_io_context() */ - cgroup_taskset_for_each(task, css, tset) { + cgroup_taskset_for_each(task, tset) { task_lock(task); ioc = task->io_context; if (ioc && atomic_read(&ioc->nr_tasks) > 1) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 3bd0a713837..581a124c7bc 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -535,15 +535,11 @@ int cgroup_taskset_size(struct cgroup_taskset *tset); /** * cgroup_taskset_for_each - iterate cgroup_taskset * @task: the loop cursor - * @skip_css: skip if task's css matches this, %NULL to iterate through all * @tset: taskset to iterate */ -#define cgroup_taskset_for_each(task, skip_css, tset) \ +#define cgroup_taskset_for_each(task, tset) \ for ((task) = cgroup_taskset_first((tset)); (task); \ - (task) = cgroup_taskset_next((tset))) \ - if (!(skip_css) || \ - cgroup_taskset_cur_css((tset), \ - (skip_css)->ss->id) != (skip_css)) + (task) = cgroup_taskset_next((tset))) /* * Control Group subsystem type. diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 98ea26a9907..7201a637c40 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -187,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, * current state before executing the following - !frozen tasks may * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. */ - cgroup_taskset_for_each(task, new_css, tset) { + cgroup_taskset_for_each(task, tset) { if (!(freezer->state & CGROUP_FREEZING)) { __thaw_task(task); } else { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 65ae0bdf4af..bf20e4ac2f7 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1398,7 +1398,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) goto out_unlock; - cgroup_taskset_for_each(task, css, tset) { + cgroup_taskset_for_each(task, tset) { /* * Kthreads which disallow setaffinity shouldn't be moved * to a new cpuset; we don't want to change their cpu @@ -1467,7 +1467,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); - cgroup_taskset_for_each(task, css, tset) { + cgroup_taskset_for_each(task, tset) { /* * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here diff --git a/kernel/events/core.c b/kernel/events/core.c index a3c3ab50271..6dd714955b0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8021,7 +8021,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css, { struct task_struct *task; - cgroup_taskset_for_each(task, css, tset) + cgroup_taskset_for_each(task, tset) task_function_call(task, __perf_cgroup_move, task); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d4cfc556183..ba386a06ab1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7600,7 +7600,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, { struct task_struct *task; - cgroup_taskset_for_each(task, css, tset) { + cgroup_taskset_for_each(task, tset) { #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; @@ -7618,7 +7618,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css, { struct task_struct *task; - cgroup_taskset_for_each(task, css, tset) + cgroup_taskset_for_each(task, tset) sched_move_task(task); } diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index b865662fba7..22931e1b99b 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -73,7 +73,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css, void *v = (void *)(unsigned long)cs->classid; struct task_struct *p; - cgroup_taskset_for_each(p, css, tset) { + cgroup_taskset_for_each(p, tset) { task_lock(p); iterate_fd(p->files, 0, update_classid, v); task_unlock(p); diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index d7d23e28faf..f9f3a40d335 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css, struct task_struct *p; void *v = (void *)(unsigned long)css->cgroup->id; - cgroup_taskset_for_each(p, css, tset) { + cgroup_taskset_for_each(p, tset) { task_lock(p); iterate_fd(p->files, 0, update_netprio, v); task_unlock(p); -- cgit v1.2.3-70-g09d2 From a53efe5ff88d0283bae8a2c2fa066d0fff31dc91 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 26 Oct 2012 17:17:44 +0200 Subject: sched/mm: call finish_arch_post_lock_switch in idle_task_exit and use_mm The finish_arch_post_lock_switch is called at the end of the task switch after all locks have been released. In concept it is paired with the switch_mm function, but the current code only does the call in finish_task_switch. Add the call to idle_task_exit and use_mm. One use case for the additional calls is s390 which will use finish_arch_post_lock_switch to wait for the completion of TLB flush operations. Signed-off-by: Martin Schwidefsky --- kernel/sched/core.c | 4 +++- mm/mmu_context.c | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aa..4b0739c9558 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4692,8 +4692,10 @@ void idle_task_exit(void) BUG_ON(cpu_online(smp_processor_id())); - if (mm != &init_mm) + if (mm != &init_mm) { switch_mm(mm, &init_mm, current); + finish_arch_post_lock_switch(); + } mmdrop(mm); } diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 8a8cd0265e5..f802c2d216a 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -31,6 +31,9 @@ void use_mm(struct mm_struct *mm) tsk->mm = mm; switch_mm(active_mm, mm, tsk); task_unlock(tsk); +#ifdef finish_arch_post_lock_switch + finish_arch_post_lock_switch(); +#endif if (active_mm != mm) mmdrop(active_mm); -- cgit v1.2.3-70-g09d2 From 3f1d2a318171bf61850d4e5a72031271e5aada76 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Feb 2014 10:49:30 +0100 Subject: sched: Fix hotplug task migration Dan Carpenter reported: > kernel/sched/rt.c:1347 pick_next_task_rt() warn: variable dereferenced before check 'prev' (see line 1338) > kernel/sched/deadline.c:1011 pick_next_task_dl() warn: variable dereferenced before check 'prev' (see line 1005) Kirill also spotted that migrate_tasks() will have an instant NULL deref because pick_next_task() will immediately deref prev. Instead of fixing all the corner cases because migrate_tasks() can pass in a NULL prev task in the unlikely case of hot-un-plug, provide a fake task such that we can remove all the NULL checks from the far more common paths. A further problem; not previously spotted; is that because we pushed pre_schedule() and idle_balance() into pick_next_task() we now need to avoid those getting called and pulling more tasks on our dying CPU. We avoid pull_{dl,rt}_task() by setting fake_task.prio to MAX_PRIO+1. We also note that since we call pick_next_task() exactly the amount of times we have runnable tasks present, we should never land in idle_balance(). Fixes: 38033c37faab ("sched: Push down pre_schedule() and idle_balance()") Cc: Juri Lelli Cc: Ingo Molnar Cc: Steven Rostedt Reported-by: Kirill Tkhai Reported-by: Dan Carpenter Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20140212094930.GB3545@laptop.programming.kicks-ass.net Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 18 +++++++++++++++++- kernel/sched/deadline.c | 3 +-- kernel/sched/fair.c | 5 ++--- kernel/sched/idle_task.c | 3 +-- kernel/sched/rt.c | 3 +-- kernel/sched/sched.h | 5 +++++ kernel/sched/stop_task.c | 3 +-- 7 files changed, 28 insertions(+), 12 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fb9764fbc53..49db434a35d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4681,6 +4681,22 @@ static void calc_load_migrate(struct rq *rq) atomic_long_add(delta, &calc_load_tasks); } +static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) +{ +} + +static const struct sched_class fake_sched_class = { + .put_prev_task = put_prev_task_fake, +}; + +static struct task_struct fake_task = { + /* + * Avoid pull_{rt,dl}_task() + */ + .prio = MAX_PRIO + 1, + .sched_class = &fake_sched_class, +}; + /* * Migrate all tasks from the rq, sleeping tasks will be migrated by * try_to_wake_up()->select_task_rq(). @@ -4721,7 +4737,7 @@ static void migrate_tasks(unsigned int dead_cpu) if (rq->nr_running == 1) break; - next = pick_next_task(rq, NULL); + next = pick_next_task(rq, &fake_task); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index ed31ef66ab9..bfeb84ecc32 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1008,8 +1008,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) if (unlikely(!dl_rq->dl_nr_running)) return NULL; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); dl_se = pick_next_dl_entity(rq, dl_rq); BUG_ON(!dl_se); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 40c758bbdd5..e884e45982a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4690,7 +4690,7 @@ again: if (!cfs_rq->nr_running) goto idle; - if (!prev || prev->sched_class != &fair_sched_class) + if (prev->sched_class != &fair_sched_class) goto simple; /* @@ -4766,8 +4766,7 @@ simple: if (!cfs_rq->nr_running) goto idle; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); do { se = pick_next_entity(cfs_rq, NULL); diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index f7d03af79a5..53ff9e7c76d 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -26,8 +26,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl static struct task_struct * pick_next_task_idle(struct rq *rq, struct task_struct *prev) { - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); schedstat_inc(rq, sched_goidle); #ifdef CONFIG_SMP diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 72f9ec75997..65c2d6881ac 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1344,8 +1344,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) if (rt_rq_throttled(rt_rq)) return NULL; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); p = _pick_next_task_rt(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 92018f9821e..d276147ba5e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1147,6 +1147,11 @@ struct sched_class { #endif }; +static inline void put_prev_task(struct rq *rq, struct task_struct *prev) +{ + prev->sched_class->put_prev_task(rq, prev); +} + #define sched_class_highest (&stop_sched_class) #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index a4147c9d201..d6ce65dde54 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -31,8 +31,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev) if (!stop || !stop->on_rq) return NULL; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); stop->se.exec_start = rq_clock_task(rq); -- cgit v1.2.3-70-g09d2 From 77177856e3bf39d435b3ae4bfd164ca3c8cd4577 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 7 Feb 2014 20:58:37 +0100 Subject: sched: Init idle->on_rq in init_idle() We stumbled in RT over a SMP bringup issue on ARM where the idle->on_rq == 0 was causing try_to_wakeup() on the other cpu to run into nada land. After adding that idle->on_rq = 1; I was able to find the root cause of the lockup: the idle task on the newly woken up cpu was fiddling with a sleeping spinlock, which is a nono. I kept the init of idle->on_rq to keep the state consistent and to avoid another long lasting debug session. As a side note, the whole debug mess could have been avoided if might_sleep() would have yelled when called from the idle task. That's fixed with patch 2/6 - and that one actually has a changelog :) Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1391803122-4425-2-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 49db434a35d..06da865043e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4443,6 +4443,7 @@ void init_idle(struct task_struct *idle, int cpu) rcu_read_unlock(); rq->curr = rq->idle = idle; + idle->on_rq = 1; #if defined(CONFIG_SMP) idle->on_cpu = 1; #endif -- cgit v1.2.3-70-g09d2 From db273be2a7d42f92b3471e0f717982928214a650 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 7 Feb 2014 20:58:38 +0100 Subject: sched: Check for idle task in might_sleep() Idle is not allowed to call sleeping functions ever! Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1391803122-4425-3-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 06da865043e..a01fe6cfdb9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6927,7 +6927,8 @@ void __might_sleep(const char *file, int line, int preempt_offset) static unsigned long prev_jiffy; /* ratelimiting */ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ - if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || + if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && + !is_idle_task(current)) || system_state != SYSTEM_RUNNING || oops_in_progress) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) -- cgit v1.2.3-70-g09d2 From 8f47b1871b8aac98f1a9d93bc3467fb97b65199a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 7 Feb 2014 20:58:39 +0100 Subject: sched: Add better debug output for might_sleep() might_sleep() can tell us where interrupts have been disabled, but we have no idea what disabled preemption. Add some debug infrastructure. Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1391803122-4425-4-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 +++ kernel/sched/core.c | 23 +++++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/include/linux/sched.h b/include/linux/sched.h index c49a2585ff7..825ed838d4b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1463,6 +1463,9 @@ struct task_struct { struct mutex perf_event_mutex; struct list_head perf_event_list; #endif +#ifdef CONFIG_DEBUG_PREEMPT + unsigned long preempt_disable_ip; +#endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; /* Protected by alloc_lock */ short il_next; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a01fe6cfdb9..c94e851dc98 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2501,8 +2501,13 @@ void __kprobes preempt_count_add(int val) DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); #endif - if (preempt_count() == val) - trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + if (preempt_count() == val) { + unsigned long ip = get_parent_ip(CALLER_ADDR1); +#ifdef CONFIG_DEBUG_PREEMPT + current->preempt_disable_ip = ip; +#endif + trace_preempt_off(CALLER_ADDR0, ip); + } } EXPORT_SYMBOL(preempt_count_add); @@ -2545,6 +2550,13 @@ static noinline void __schedule_bug(struct task_struct *prev) print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); +#ifdef CONFIG_DEBUG_PREEMPT + if (in_atomic_preempt_off()) { + pr_err("Preemption disabled at:"); + print_ip_sym(current->preempt_disable_ip); + pr_cont("\n"); + } +#endif dump_stack(); add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } @@ -6946,6 +6958,13 @@ void __might_sleep(const char *file, int line, int preempt_offset) debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); +#ifdef CONFIG_DEBUG_PREEMPT + if (!preempt_count_equals(preempt_offset)) { + pr_err("Preemption disabled at:"); + print_ip_sym(current->preempt_disable_ip); + pr_cont("\n"); + } +#endif dump_stack(); } EXPORT_SYMBOL(__might_sleep); -- cgit v1.2.3-70-g09d2 From d6b1e9119787fd2e31dcf0f0ce90b71197604206 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 7 Feb 2014 20:58:40 +0100 Subject: sched: Adjust p->sched_reset_on_fork when nothing else changes If the policy and priority remain unchanged a possible modification of p->sched_reset_on_fork gets lost in the early exit path. Signed-off-by: Thomas Gleixner [ Rebase ontop of v3.14-rc1. ] Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1391803122-4425-5-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c94e851dc98..771eb8762df 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3362,7 +3362,8 @@ recheck: } /* - * If not changing anything there's no need to proceed further: + * If not changing anything there's no need to proceed further, + * but store a possible modification of reset_on_fork. */ if (unlikely(policy == p->policy)) { if (fair_policy(policy) && attr->sched_nice != task_nice(p)) @@ -3372,6 +3373,7 @@ recheck: if (dl_policy(policy)) goto change; + p->sched_reset_on_fork = reset_on_fork; task_rq_unlock(rq, p, &flags); return 0; } -- cgit v1.2.3-70-g09d2 From 81a44c5441d7f7d2c3dc9105f4d65ad0d5818617 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 7 Feb 2014 20:58:41 +0100 Subject: sched: Queue RT tasks to head when prio drops The following scenario does not work correctly: Runqueue of CPUx contains two runnable and pinned tasks: T1: SCHED_FIFO, prio 80 T2: SCHED_FIFO, prio 80 T1 is on the cpu and executes the following syscalls (classic priority ceiling scenario): sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 90); ... sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 80); ... Now T1 gets preempted by T3 (SCHED_FIFO, prio 95). After T3 goes back to sleep the scheduler picks T2. Surprise! The same happens w/o actual preemption when T1 is forced into the scheduler due to a sporadic NEED_RESCHED event. The scheduler invokes pick_next_task() which returns T2. So T1 gets preempted and scheduled out. This happens because sched_setscheduler() dequeues T1 from the prio 90 list and then enqueues it on the tail of the prio 80 list behind T2. This violates the POSIX spec and surprises user space which relies on the guarantee that SCHED_FIFO tasks are not scheduled out unless they give the CPU up voluntarily or are preempted by a higher priority task. In the latter case the preempted task must get back on the CPU after the preempting task schedules out again. We fixed a similar issue already in commit 60db48c (sched: Queue a deboosted task to the head of the RT prio queue). The same treatment is necessary for sched_setscheduler(). So enqueue to head of the prio bucket list if the priority of the task is lowered. It might be possible that existing user space relies on the current behaviour, but it can be considered highly unlikely due to the corner case nature of the application scenario. Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1391803122-4425-6-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 771eb8762df..9c2fcbf9a26 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3442,8 +3442,13 @@ change: if (running) p->sched_class->set_curr_task(rq); - if (on_rq) - enqueue_task(rq, p, 0); + if (on_rq) { + /* + * We enqueue to tail when the priority of a task is + * increased (user space view). + */ + enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); + } check_class_changed(rq, p, prev_class, oldprio); task_rq_unlock(rq, p, &flags); -- cgit v1.2.3-70-g09d2 From c365c292d05908c6ea6f32708f331e21033fe71d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 7 Feb 2014 20:58:42 +0100 Subject: sched: Consider pi boosting in setscheduler() If a PI boosted task policy/priority is modified by a setscheduler() call we unconditionally dequeue and requeue the task if it is on the runqueue even if the new priority is lower than the current effective boosted priority. This can result in undesired reordering of the priority bucket list. If the new priority is less or equal than the current effective we just store the new parameters in the task struct and leave the scheduler class and the runqueue untouched. This is handled when the task deboosts itself. Only if the new priority is higher than the effective boosted priority we apply the change immediately. Signed-off-by: Thomas Gleixner [ Rebase ontop of v3.14-rc1. ] Signed-off-by: Sebastian Andrzej Siewior Cc: Dario Faggioli Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1391803122-4425-7-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/sched/rt.h | 7 +++++++ kernel/locking/rtmutex.c | 12 ++++++++++++ kernel/sched/core.c | 41 ++++++++++++++++++++++++++++++----------- 3 files changed, 49 insertions(+), 11 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index f7453d4c561..6341f5be6e2 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -18,6 +18,7 @@ static inline int rt_task(struct task_struct *p) #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern int rt_mutex_check_prio(struct task_struct *task, int newprio); extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) @@ -29,6 +30,12 @@ static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } + +static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) +{ + return 0; +} + static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2e960a2bab8..aa4dff04b59 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -212,6 +212,18 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task) return task_top_pi_waiter(task)->task; } +/* + * Called by sched_setscheduler() to check whether the priority change + * is overruled by a possible priority boosting. + */ +int rt_mutex_check_prio(struct task_struct *task, int newprio) +{ + if (!task_has_pi_waiters(task)) + return 0; + + return task_top_pi_waiter(task)->task->prio <= newprio; +} + /* * Adjust the priority of a task, after its pi_waiters got modified. * diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9c2fcbf9a26..003263b3b05 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2902,7 +2902,8 @@ EXPORT_SYMBOL(sleep_on_timeout); * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * - * Used by the rt_mutex code to implement priority inheritance logic. + * Used by the rt_mutex code to implement priority inheritance + * logic. Call site only calls if the priority of the task changed. */ void rt_mutex_setprio(struct task_struct *p, int prio) { @@ -3171,9 +3172,8 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) dl_se->dl_new = 1; } -/* Actually do priority change: must hold pi & rq lock. */ -static void __setscheduler(struct rq *rq, struct task_struct *p, - const struct sched_attr *attr) +static void __setscheduler_params(struct task_struct *p, + const struct sched_attr *attr) { int policy = attr->sched_policy; @@ -3193,9 +3193,14 @@ static void __setscheduler(struct rq *rq, struct task_struct *p, * getparam()/getattr() don't report silly values for !rt tasks. */ p->rt_priority = attr->sched_priority; + set_load_weight(p); +} - p->normal_prio = normal_prio(p); - p->prio = rt_mutex_getprio(p); +/* Actually do priority change: must hold pi & rq lock. */ +static void __setscheduler(struct rq *rq, struct task_struct *p, + const struct sched_attr *attr) +{ + __setscheduler_params(p, attr); if (dl_prio(p->prio)) p->sched_class = &dl_sched_class; @@ -3203,8 +3208,6 @@ static void __setscheduler(struct rq *rq, struct task_struct *p, p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; - - set_load_weight(p); } static void @@ -3257,6 +3260,7 @@ static int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user) { + int newprio = MAX_RT_PRIO - 1 - attr->sched_priority; int retval, oldprio, oldpolicy = -1, on_rq, running; int policy = attr->sched_policy; unsigned long flags; @@ -3427,6 +3431,24 @@ change: return -EBUSY; } + p->sched_reset_on_fork = reset_on_fork; + oldprio = p->prio; + + /* + * Special case for priority boosted tasks. + * + * If the new priority is lower or equal (user space view) + * than the current (boosted) priority, we just store the new + * normal parameters and do not touch the scheduler class and + * the runqueue. This will be done when the task deboost + * itself. + */ + if (rt_mutex_check_prio(p, newprio)) { + __setscheduler_params(p, attr); + task_rq_unlock(rq, p, &flags); + return 0; + } + on_rq = p->on_rq; running = task_current(rq, p); if (on_rq) @@ -3434,9 +3456,6 @@ change: if (running) p->sched_class->put_prev_task(rq, p); - p->sched_reset_on_fork = reset_on_fork; - - oldprio = p->prio; prev_class = p->sched_class; __setscheduler(rq, p, attr); -- cgit v1.2.3-70-g09d2 From d82fd25356b902703152c1800845661835541878 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 8 Feb 2014 14:17:26 +0800 Subject: sched/rt: Remove 'leaf_rt_rq_list' from 'struct rq' This is a leftover from commit e23ee74777f389369431d77390c4b09332ce026a ("sched/rt: Simplify pull_rt_task() logic and remove .leaf_rt_rq_list"). Signed-off-by: Li Zefan Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/52F5CBF6.4060901@huawei.com Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 - kernel/sched/sched.h | 4 ---- 2 files changed, 5 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 003263b3b05..cc4965e969b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6876,7 +6876,6 @@ void __init sched_init(void) rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; #ifdef CONFIG_RT_GROUP_SCHED - INIT_LIST_HEAD(&rq->leaf_rt_rq_list); init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index caf4abda45e..d608125b36e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -543,10 +543,6 @@ struct rq { struct list_head leaf_cfs_rq_list; #endif /* CONFIG_FAIR_GROUP_SCHED */ -#ifdef CONFIG_RT_GROUP_SCHED - struct list_head leaf_rt_rq_list; -#endif - /* * This is part of a global counter where only the total sum * over all CPUs matters. A task can increase this counter on -- cgit v1.2.3-70-g09d2 From 75e45d512f257beedae0d8a67d053cde5537bd4c Mon Sep 17 00:00:00 2001 From: Dongsheng Yang Date: Tue, 11 Feb 2014 15:34:50 +0800 Subject: sched: Replace hardcoding of -20 and 19 with MIN_NICE and MAX_NICE Signed-off-by: Dongsheng Yang Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/bd80780f19b4f9b4a765acc353c8dbc130274dd6.1392103744.git.yangds.fnst@cn.fujitsu.com Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/auto_group.c | 2 +- kernel/sched/core.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 4a073539c58..e73efba9830 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -203,7 +203,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) struct autogroup *ag; int err; - if (nice < -20 || nice > 19) + if (nice < MIN_NICE || nice > MAX_NICE) return -EINVAL; err = security_task_setnice(current, nice); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cc4965e969b..a8a73b8897b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2993,7 +2993,7 @@ void set_user_nice(struct task_struct *p, long nice) unsigned long flags; struct rq *rq; - if (task_nice(p) == nice || nice < -20 || nice > 19) + if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) return; /* * We have to be careful, if called from sys_setpriority(), @@ -3072,10 +3072,10 @@ SYSCALL_DEFINE1(nice, int, increment) increment = 40; nice = task_nice(current) + increment; - if (nice < -20) - nice = -20; - if (nice > 19) - nice = 19; + if (nice < MIN_NICE) + nice = MIN_NICE; + if (nice > MAX_NICE) + nice = MAX_NICE; if (increment < 0 && !can_nice(current, nice)) return -EPERM; @@ -3623,7 +3623,7 @@ static int sched_copy_attr(struct sched_attr __user *uattr, * XXX: do we want to be lenient like existing syscalls; or do we want * to be strict and return an error on out-of-bounds values? */ - attr->sched_nice = clamp(attr->sched_nice, -20, 19); + attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); out: return ret; -- cgit v1.2.3-70-g09d2 From fce8ad1568c57e7f334018dec4fa1744c926c135 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 24 Feb 2014 16:40:01 +0100 Subject: smp: Remove wait argument from __smp_call_function_single() The main point of calling __smp_call_function_single() is to send an IPI in a pure asynchronous way. By embedding a csd in an object, a caller can send the IPI without waiting for a previous one to complete as is required by smp_call_function_single() for example. As such, sending this kind of IPI can be safe even when irqs are disabled. This flexibility comes at the expense of the caller who then needs to synchronize the csd lifecycle by himself and make sure that IPIs on a single csd are serialized. This is how __smp_call_function_single() works when wait = 0 and this usecase is relevant. Now there don't seem to be any usecase with wait = 1 that can't be covered by smp_call_function_single() instead, which is safer. Lets look at the two possible scenario: 1) The user calls __smp_call_function_single(wait = 1) on a csd embedded in an object. It looks like a nice and convenient pattern at the first sight because we can then retrieve the object from the IPI handler easily. But actually it is a waste of memory space in the object since the csd can be allocated from the stack by smp_call_function_single(wait = 1) and the object can be passed an the IPI argument. Besides that, embedding the csd in an object is more error prone because the caller must take care of the serialization of the IPIs for this csd. 2) The user calls __smp_call_function_single(wait = 1) on a csd that is allocated on the stack. It's ok but smp_call_function_single() can do it as well and it already takes care of the allocation on the stack. Again it's more simple and less error prone. Therefore, using the underscore prepend API version with wait = 1 is a bad pattern and a sign that the caller can do safer and more simple. There was a single user of that which has just been converted. So lets remove this option to discourage further users. Cc: Andrew Morton Cc: Christoph Hellwig Cc: Ingo Molnar Cc: Jan Kara Cc: Jens Axboe Signed-off-by: Frederic Weisbecker Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- block/blk-softirq.c | 2 +- drivers/cpuidle/coupled.c | 2 +- include/linux/smp.h | 2 +- kernel/sched/core.c | 2 +- kernel/smp.c | 19 ++++--------------- kernel/up.c | 3 +-- net/core/dev.c | 2 +- 8 files changed, 11 insertions(+), 23 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/block/blk-mq.c b/block/blk-mq.c index 1fa9dd153fd..62154edf148 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -353,7 +353,7 @@ void __blk_mq_complete_request(struct request *rq) rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; - __smp_call_function_single(ctx->cpu, &rq->csd, 0); + __smp_call_function_single(ctx->cpu, &rq->csd); } else { rq->q->softirq_done_fn(rq); } diff --git a/block/blk-softirq.c b/block/blk-softirq.c index b5c37d96cf0..6345b7ebd0d 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -70,7 +70,7 @@ static int raise_blk_irq(int cpu, struct request *rq) data->info = rq; data->flags = 0; - __smp_call_function_single(cpu, data, 0); + __smp_call_function_single(cpu, data); return 0; } diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index e952936418d..04115947acc 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -323,7 +323,7 @@ static void cpuidle_coupled_poke(int cpu) struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) - __smp_call_function_single(cpu, csd, 0); + __smp_call_function_single(cpu, csd); } /** diff --git a/include/linux/smp.h b/include/linux/smp.h index c39074c794c..b410a1f2328 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -50,7 +50,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags); -int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait); +int __smp_call_function_single(int cpu, struct call_single_data *csd); #ifdef CONFIG_SMP diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aa..eba3d84765f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -432,7 +432,7 @@ void hrtick_start(struct rq *rq, u64 delay) if (rq == this_rq()) { __hrtick_restart(rq); } else if (!rq->hrtick_csd_pending) { - __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); + __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); rq->hrtick_csd_pending = 1; } } diff --git a/kernel/smp.c b/kernel/smp.c index fa04ab938e5..b7676318975 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -241,29 +241,18 @@ EXPORT_SYMBOL(smp_call_function_single); * __smp_call_function_single(): Run a function on a specific CPU * @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure - * @wait: If true, wait until function has completed on specified CPU. * * Like smp_call_function_single(), but allow caller to pass in a * pre-allocated data structure. Useful for embedding @data inside * other structures, for instance. */ -int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait) +int __smp_call_function_single(int cpu, struct call_single_data *csd) { int err = 0; - int this_cpu; - this_cpu = get_cpu(); - /* - * Can deadlock when called with interrupts disabled. - * We allow cpu's that are not yet online though, as no one else can - * send smp call function interrupt to this cpu and as such deadlocks - * can't happen. - */ - WARN_ON_ONCE(cpu_online(this_cpu) && wait && irqs_disabled() - && !oops_in_progress); - - err = generic_exec_single(cpu, csd, csd->func, csd->info, wait); - put_cpu(); + preempt_disable(); + err = generic_exec_single(cpu, csd, csd->func, csd->info, 0); + preempt_enable(); return err; } diff --git a/kernel/up.c b/kernel/up.c index cdf03d16840..4e199d4cef8 100644 --- a/kernel/up.c +++ b/kernel/up.c @@ -22,8 +22,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, } EXPORT_SYMBOL(smp_call_function_single); -int __smp_call_function_single(int cpu, struct call_single_data *csd, - int wait) +int __smp_call_function_single(int cpu, struct call_single_data *csd) { unsigned long flags; diff --git a/net/core/dev.c b/net/core/dev.c index 4ad1b78c9c7..d1298128bff 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4129,7 +4129,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) if (cpu_online(remsd->cpu)) __smp_call_function_single(remsd->cpu, - &remsd->csd, 0); + &remsd->csd); remsd = next; } } else -- cgit v1.2.3-70-g09d2 From c46fff2a3b29794b35d717b5680a27f31a6a6bc0 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 24 Feb 2014 16:40:02 +0100 Subject: smp: Rename __smp_call_function_single() to smp_call_function_single_async() The name __smp_call_function_single() doesn't tell much about the properties of this function, especially when compared to smp_call_function_single(). The comments above the implementation are also misleading. The main point of this function is actually not to be able to embed the csd in an object. This is actually a requirement that result from the purpose of this function which is to raise an IPI asynchronously. As such it can be called with interrupts disabled. And this feature comes at the cost of the caller who then needs to serialize the IPIs on this csd. Lets rename the function and enhance the comments so that they reflect these properties. Suggested-by: Christoph Hellwig Cc: Andrew Morton Cc: Christoph Hellwig Cc: Ingo Molnar Cc: Jan Kara Cc: Jens Axboe Signed-off-by: Frederic Weisbecker Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- block/blk-softirq.c | 2 +- drivers/cpuidle/coupled.c | 2 +- include/linux/smp.h | 2 +- kernel/sched/core.c | 2 +- kernel/smp.c | 19 +++++++++++++------ kernel/up.c | 4 ++-- net/core/dev.c | 2 +- 8 files changed, 21 insertions(+), 14 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/block/blk-mq.c b/block/blk-mq.c index 62154edf148..6468a715a0e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -353,7 +353,7 @@ void __blk_mq_complete_request(struct request *rq) rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; - __smp_call_function_single(ctx->cpu, &rq->csd); + smp_call_function_single_async(ctx->cpu, &rq->csd); } else { rq->q->softirq_done_fn(rq); } diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 6345b7ebd0d..ebd6b6f1bde 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -70,7 +70,7 @@ static int raise_blk_irq(int cpu, struct request *rq) data->info = rq; data->flags = 0; - __smp_call_function_single(cpu, data); + smp_call_function_single_async(cpu, data); return 0; } diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 04115947acc..cb6654bfad7 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -323,7 +323,7 @@ static void cpuidle_coupled_poke(int cpu) struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) - __smp_call_function_single(cpu, csd); + smp_call_function_single_async(cpu, csd); } /** diff --git a/include/linux/smp.h b/include/linux/smp.h index b410a1f2328..633f5edd747 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -50,7 +50,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags); -int __smp_call_function_single(int cpu, struct call_single_data *csd); +int smp_call_function_single_async(int cpu, struct call_single_data *csd); #ifdef CONFIG_SMP diff --git a/kernel/sched/core.c b/kernel/sched/core.c index eba3d84765f..0cca04a53de 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -432,7 +432,7 @@ void hrtick_start(struct rq *rq, u64 delay) if (rq == this_rq()) { __hrtick_restart(rq); } else if (!rq->hrtick_csd_pending) { - __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); + smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); rq->hrtick_csd_pending = 1; } } diff --git a/kernel/smp.c b/kernel/smp.c index b7676318975..06d574e42c7 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -238,15 +238,22 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, EXPORT_SYMBOL(smp_call_function_single); /** - * __smp_call_function_single(): Run a function on a specific CPU + * smp_call_function_single_async(): Run an asynchronous function on a + * specific CPU. * @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure * - * Like smp_call_function_single(), but allow caller to pass in a - * pre-allocated data structure. Useful for embedding @data inside - * other structures, for instance. + * Like smp_call_function_single(), but the call is asynchonous and + * can thus be done from contexts with disabled interrupts. + * + * The caller passes his own pre-allocated data structure + * (ie: embedded in an object) and is responsible for synchronizing it + * such that the IPIs performed on the @csd are strictly serialized. + * + * NOTE: Be careful, there is unfortunately no current debugging facility to + * validate the correctness of this serialization. */ -int __smp_call_function_single(int cpu, struct call_single_data *csd) +int smp_call_function_single_async(int cpu, struct call_single_data *csd) { int err = 0; @@ -256,7 +263,7 @@ int __smp_call_function_single(int cpu, struct call_single_data *csd) return err; } -EXPORT_SYMBOL_GPL(__smp_call_function_single); +EXPORT_SYMBOL_GPL(smp_call_function_single_async); /* * smp_call_function_any - Run a function on any of the given cpus diff --git a/kernel/up.c b/kernel/up.c index 4e199d4cef8..1760bf3d146 100644 --- a/kernel/up.c +++ b/kernel/up.c @@ -22,7 +22,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, } EXPORT_SYMBOL(smp_call_function_single); -int __smp_call_function_single(int cpu, struct call_single_data *csd) +int smp_call_function_single_async(int cpu, struct call_single_data *csd) { unsigned long flags; @@ -31,7 +31,7 @@ int __smp_call_function_single(int cpu, struct call_single_data *csd) local_irq_restore(flags); return 0; } -EXPORT_SYMBOL(__smp_call_function_single); +EXPORT_SYMBOL(smp_call_function_single_async); int on_each_cpu(smp_call_func_t func, void *info, int wait) { diff --git a/net/core/dev.c b/net/core/dev.c index d1298128bff..ac7a2abb7f1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4128,7 +4128,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) struct softnet_data *next = remsd->rps_ipi_next; if (cpu_online(remsd->cpu)) - __smp_call_function_single(remsd->cpu, + smp_call_function_single_async(remsd->cpu, &remsd->csd); remsd = next; } -- cgit v1.2.3-70-g09d2 From 37e117c07b89194aae7062bc63bde1104c03db02 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 14 Feb 2014 12:25:08 +0100 Subject: sched: Guarantee task priority in pick_next_task() Michael spotted that the idle_balance() push down created a task priority problem. Previously, when we called idle_balance() before pick_next_task() it wasn't a problem when -- because of the rq->lock droppage -- an rt/dl task slipped in. Similarly for pre_schedule(), rt pre-schedule could have a dl task slip in. But by pulling it into the pick_next_task() loop, we'll not try a higher task priority again. Cure this by creating a re-start condition in pick_next_task(); and triggering this from pick_next_task_{rt,fair}(). It also fixes a live-lock where we get stuck in pick_next_task_fair() due to idle_balance() seeing !0 nr_running but there not actually being any fair tasks about. Reported-by: Michael Wang Fixes: 38033c37faab ("sched: Push down pre_schedule() and idle_balance()") Tested-by: Sasha Levin Signed-off-by: Peter Zijlstra Cc: Juri Lelli Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20140224121218.GR15586@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 12 ++++++++---- kernel/sched/fair.c | 13 ++++++++++++- kernel/sched/rt.c | 10 +++++++++- kernel/sched/sched.h | 5 +++++ 4 files changed, 34 insertions(+), 6 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a8a73b8897b..cde573d3f12 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2586,24 +2586,28 @@ static inline void schedule_debug(struct task_struct *prev) static inline struct task_struct * pick_next_task(struct rq *rq, struct task_struct *prev) { - const struct sched_class *class; + const struct sched_class *class = &fair_sched_class; struct task_struct *p; /* * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ - if (likely(prev->sched_class == &fair_sched_class && + if (likely(prev->sched_class == class && rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq, prev); - if (likely(p)) + if (likely(p && p != RETRY_TASK)) return p; } +again: for_each_class(class) { p = class->pick_next_task(rq, prev); - if (p) + if (p) { + if (unlikely(p == RETRY_TASK)) + goto again; return p; + } } BUG(); /* the idle class will always have a runnable task */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index be4f7d9eaf0..16042b58a32 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4686,6 +4686,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) struct cfs_rq *cfs_rq = &rq->cfs; struct sched_entity *se; struct task_struct *p; + int new_tasks; again: #ifdef CONFIG_FAIR_GROUP_SCHED @@ -4784,7 +4785,17 @@ simple: return p; idle: - if (idle_balance(rq)) /* drops rq->lock */ + /* + * Because idle_balance() releases (and re-acquires) rq->lock, it is + * possible for any higher priority task to appear. In that case we + * must re-start the pick_next_entity() loop. + */ + new_tasks = idle_balance(rq); + + if (rq->nr_running != rq->cfs.h_nr_running) + return RETRY_TASK; + + if (new_tasks) goto again; return NULL; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 4d4b386598a..398b3f99082 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1360,8 +1360,16 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) struct task_struct *p; struct rt_rq *rt_rq = &rq->rt; - if (need_pull_rt_task(rq, prev)) + if (need_pull_rt_task(rq, prev)) { pull_rt_task(rq); + /* + * pull_rt_task() can drop (and re-acquire) rq->lock; this + * means a dl task can slip in, in which case we need to + * re-start task selection. + */ + if (unlikely(rq->dl.dl_nr_running)) + return RETRY_TASK; + } if (!rt_rq->rt_nr_running) return NULL; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 046084ebb1f..1929deb3f29 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1091,6 +1091,8 @@ static const u32 prio_to_wmult[40] = { #define DEQUEUE_SLEEP 1 +#define RETRY_TASK ((void *)-1UL) + struct sched_class { const struct sched_class *next; @@ -1105,6 +1107,9 @@ struct sched_class { * It is the responsibility of the pick_next_task() method that will * return the next task to call put_prev_task() on the @prev task or * something equivalent. + * + * May return RETRY_TASK when it finds a higher prio class has runnable + * tasks. */ struct task_struct * (*pick_next_task) (struct rq *rq, struct task_struct *prev); -- cgit v1.2.3-70-g09d2 From d44753b843e093f9e1f2f14806fbe106fff74898 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Mon, 3 Mar 2014 12:09:21 +0100 Subject: sched/deadline: Deny unprivileged users to set/change SCHED_DEADLINE policy Deny the use of SCHED_DEADLINE policy to unprivileged users. Even if root users can set the policy for normal users, we don't want the latter to be able to change their parameters (safest behavior). Signed-off-by: Juri Lelli Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1393844961-18097-1-git-send-email-juri.lelli@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6edbef296ec..f5c6635b806 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3338,6 +3338,15 @@ recheck: return -EPERM; } + /* + * Can't set/change SCHED_DEADLINE policy at all for now + * (safest behavior); in the future we would like to allow + * unprivileged DL tasks to increase their relative deadline + * or reduce their runtime (both ways reducing utilization) + */ + if (dl_policy(policy)) + return -EPERM; + /* * Treat SCHED_IDLE as nice 20. Only allow a switch to * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. -- cgit v1.2.3-70-g09d2 From 156654f491dd8d52687a5fbe1637f472a52ce75b Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 28 Feb 2014 07:23:11 +0100 Subject: sched/numa: Move task_numa_free() to __put_task_struct() Bad idea on -rt: [ 908.026136] [] rt_spin_lock_slowlock+0xaa/0x2c0 [ 908.026145] [] task_numa_free+0x31/0x130 [ 908.026151] [] finish_task_switch+0xce/0x100 [ 908.026156] [] thread_return+0x48/0x4ae [ 908.026160] [] schedule+0x25/0xa0 [ 908.026163] [] rt_spin_lock_slowlock+0xd5/0x2c0 [ 908.026170] [] get_signal_to_deliver+0xaf/0x680 [ 908.026175] [] do_signal+0x3d/0x5b0 [ 908.026179] [] do_notify_resume+0x90/0xe0 [ 908.026186] [] int_signal+0x12/0x17 [ 908.026193] [<00007ff2a388b1d0>] 0x7ff2a388b1cf and since upstream does not mind where we do this, be a bit nicer ... Signed-off-by: Mike Galbraith Signed-off-by: Peter Zijlstra Cc: Mel Gorman Cc: Linus Torvalds Cc: Andrew Morton Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1393568591.6018.27.camel@marge.simpson.net Signed-off-by: Ingo Molnar --- kernel/fork.c | 1 + kernel/sched/core.c | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/fork.c b/kernel/fork.c index a17621c6cd4..332688e5e7b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -237,6 +237,7 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); + task_numa_free(tsk); security_task_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index dd89c27bb56..9e126a21c5c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2151,8 +2151,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) if (mm) mmdrop(mm); if (unlikely(prev_state == TASK_DEAD)) { - task_numa_free(prev); - if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); -- cgit v1.2.3-70-g09d2 From 383afd0971538b3d77532a56404b24cfe967b5dd Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 11 Mar 2014 19:24:20 -0400 Subject: sched: Fix broken setscheduler() I decided to run my tests on linux-next, and my wakeup_rt tracer was broken. After running a bisect, I found that the problem commit was: linux-next commit c365c292d059 "sched: Consider pi boosting in setscheduler()" And the reason the wake_rt tracer test was failing, was because it had no RT task to trace. I first noticed this when running with sched_switch event and saw that my RT task still had normal SCHED_OTHER priority. Looking at the problem commit, I found: - p->normal_prio = normal_prio(p); - p->prio = rt_mutex_getprio(p); With no + p->normal_prio = normal_prio(p); + p->prio = rt_mutex_getprio(p); Reading what the commit is suppose to do, I realize that the p->prio can't be set if the task is boosted with a higher prio, but the p->normal_prio still needs to be set regardless, otherwise, when the task is deboosted, it wont get the new priority. The p->prio has to be set before "check_class_changed()" is called, otherwise the class wont be changed. Also added fix to newprio to include a check for deadline policy that was missing. This change was suggested by Juri Lelli. Signed-off-by: Steven Rostedt Cc: SebastianAndrzej Siewior Cc: Juri Lelli Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20140306120438.638bfe94@gandalf.local.home Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9e126a21c5c..ae365aaa818 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3195,6 +3195,7 @@ static void __setscheduler_params(struct task_struct *p, * getparam()/getattr() don't report silly values for !rt tasks. */ p->rt_priority = attr->sched_priority; + p->normal_prio = normal_prio(p); set_load_weight(p); } @@ -3204,6 +3205,12 @@ static void __setscheduler(struct rq *rq, struct task_struct *p, { __setscheduler_params(p, attr); + /* + * If we get here, there was no pi waiters boosting the + * task. It is safe to use the normal prio. + */ + p->prio = normal_prio(p); + if (dl_prio(p->prio)) p->sched_class = &dl_sched_class; else if (rt_prio(p->prio)) @@ -3262,7 +3269,8 @@ static int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user) { - int newprio = MAX_RT_PRIO - 1 - attr->sched_priority; + int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : + MAX_RT_PRIO - 1 - attr->sched_priority; int retval, oldprio, oldpolicy = -1, on_rq, running; int policy = attr->sched_policy; unsigned long flags; -- cgit v1.2.3-70-g09d2 From 300a9d887ea221f344962506f724e02101bacc08 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 5 Mar 2014 17:05:57 +0100 Subject: sched: Remove needless round trip nsecs <-> tick conversion of steal time When update_rq_clock_task() accounts the pending steal time for a task, it converts the steal delta from nsecs to tick then from tick to nsecs. There is no apparent good reason for doing that though because both the task clock and the prev steal delta are u64 and store values in nsecs. So lets remove the needless conversion. Cc: Ingo Molnar Cc: Marcelo Tosatti Cc: Peter Zijlstra Cc: Thomas Gleixner Acked-by: Rik van Riel Signed-off-by: Frederic Weisbecker --- kernel/sched/core.c | 6 ------ kernel/sched/sched.h | 10 ---------- 2 files changed, 16 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aa..b14a188af89 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -823,19 +823,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) #endif #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING if (static_key_false((¶virt_steal_rq_enabled))) { - u64 st; - steal = paravirt_steal_clock(cpu_of(rq)); steal -= rq->prev_steal_time_rq; if (unlikely(steal > delta)) steal = delta; - st = steal_ticks(steal); - steal = st * TICK_NSEC; - rq->prev_steal_time_rq += steal; - delta -= steal; } #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c2119fd20f8..5ec99101012 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1214,16 +1214,6 @@ extern void update_idle_cpu_load(struct rq *this_rq); extern void init_task_runnable_average(struct task_struct *p); -#ifdef CONFIG_PARAVIRT -static inline u64 steal_ticks(u64 steal) -{ - if (unlikely(steal > NSEC_PER_SEC)) - return div_u64(steal, TICK_NSEC); - - return __iter_div_u64_rem(steal, TICK_NSEC, &steal); -} -#endif - static inline void inc_nr_running(struct rq *rq) { rq->nr_running++; -- cgit v1.2.3-70-g09d2 From 6201b4d61fbf194df6371fb3376c5026cb8f5eec Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 18 Mar 2014 16:26:07 +0530 Subject: timer: Remove code redundancy while calling get_nohz_timer_target() There are only two users of get_nohz_timer_target(): timer and hrtimer. Both call it under same circumstances, i.e. #ifdef CONFIG_NO_HZ_COMMON if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) return get_nohz_timer_target(); #endif So, it makes more sense to get all this as part of get_nohz_timer_target() instead of duplicating code at two places. For this another parameter is required to be passed to this routine, pinned. Signed-off-by: Viresh Kumar Cc: linaro-kernel@lists.linaro.org Cc: fweisbec@gmail.com Cc: peterz@infradead.org Link: http://lkml.kernel.org/r/1e1b53537217d58d48c2d7a222a9c3ac47d5b64c.1395140107.git.viresh.kumar@linaro.org Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 6 +++++- kernel/hrtimer.c | 15 +-------------- kernel/sched/core.c | 5 ++++- kernel/timer.c | 7 +------ 4 files changed, 11 insertions(+), 22 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/include/linux/sched.h b/include/linux/sched.h index 68a0e84463a..6f6c56f63c6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -291,10 +291,14 @@ extern int runqueue_is_locked(int cpu); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); extern void set_cpu_sd_state_idle(void); -extern int get_nohz_timer_target(void); +extern int get_nohz_timer_target(int pinned); #else static inline void nohz_balance_enter_idle(int cpu) { } static inline void set_cpu_sd_state_idle(void) { } +static inline int get_nohz_timer_target(int pinned) +{ + return smp_processor_id(); +} #endif /* diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 09094361dce..d55092ceee2 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -168,19 +168,6 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, } } - -/* - * Get the preferred target CPU for NOHZ - */ -static int hrtimer_get_target(int this_cpu, int pinned) -{ -#ifdef CONFIG_NO_HZ_COMMON - if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) - return get_nohz_timer_target(); -#endif - return this_cpu; -} - /* * With HIGHRES=y we do not migrate the timer when it is expiring * before the next event on the target cpu because we cannot reprogram @@ -214,7 +201,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, struct hrtimer_clock_base *new_base; struct hrtimer_cpu_base *new_cpu_base; int this_cpu = smp_processor_id(); - int cpu = hrtimer_get_target(this_cpu, pinned); + int cpu = get_nohz_timer_target(pinned); int basenum = base->index; again: diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aa..c0339e206cc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -555,12 +555,15 @@ void resched_cpu(int cpu) * selecting an idle cpu will add more delays to the timers than intended * (as that cpu's timer base may not be uptodate wrt jiffies etc). */ -int get_nohz_timer_target(void) +int get_nohz_timer_target(int pinned) { int cpu = smp_processor_id(); int i; struct sched_domain *sd; + if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu)) + return cpu; + rcu_read_lock(); for_each_domain(cpu, sd) { for_each_cpu(i, sched_domain_span(sd)) { diff --git a/kernel/timer.c b/kernel/timer.c index 8e503fec1fb..1d35ddadc04 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -760,12 +760,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, debug_activate(timer, expires); - cpu = smp_processor_id(); - -#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) - if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) - cpu = get_nohz_timer_target(); -#endif + cpu = get_nohz_timer_target(pinned); new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { -- cgit v1.2.3-70-g09d2 From b8780c363d808a726a34793caa900923d32b6b80 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 7 Apr 2014 17:33:06 +0200 Subject: sched: remove sleep_on() and friends This is the final piece in the puzzle, as all patches to remove the last users of \(interruptible_\|\)sleep_on\(_timeout\|\) have made it into the 3.15 merge window. The work was long overdue, and this interface in particular should not have survived the BKL removal that was done a couple of years ago. Citing Jon Corbet from http://lwn.net/2001/0201/kernel.php3": "[...] it was suggested that the janitors look for and fix all code that calls sleep_on() [...] since (1) almost all such code is incorrect, and (2) Linus has agreed that those functions should be removed in the 2.5 development series". We haven't quite made it for 2.5, but maybe we can merge this for 3.15. Signed-off-by: Arnd Bergmann Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Linus Torvalds --- Documentation/DocBook/kernel-hacking.tmpl | 10 ------- include/linux/wait.h | 11 -------- kernel/sched/core.c | 46 ------------------------------- 3 files changed, 67 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl index d0758b241b2..bd9015d10cf 100644 --- a/Documentation/DocBook/kernel-hacking.tmpl +++ b/Documentation/DocBook/kernel-hacking.tmpl @@ -850,16 +850,6 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress); -ERESTARTSYS if a signal is received. The wait_event() version ignores signals. - - Do not use the sleep_on() function family - - it is very easy to accidentally introduce races; almost certainly - one of the wait_event() family will do, or a - loop around schedule_timeout(). If you choose - to loop around schedule_timeout() remember - you must set the task state (with - set_current_state()) on each iteration to avoid - busy-looping. - diff --git a/include/linux/wait.h b/include/linux/wait.h index 559044c7923..e7d9d9ed14f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -803,17 +803,6 @@ do { \ __ret; \ }) - -/* - * These are the old interfaces to sleep waiting for an event. - * They are racy. DO NOT use them, use the wait_event* interfaces above. - * We plan to remove these interfaces. - */ -extern void sleep_on(wait_queue_head_t *q); -extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout); -extern void interruptible_sleep_on(wait_queue_head_t *q); -extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout); - /* * Waitqueues which are removed from the waitqueue_head at wakeup time */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1d1b87b3677..0ff3f34bc7e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2845,52 +2845,6 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, } EXPORT_SYMBOL(default_wake_function); -static long __sched -sleep_on_common(wait_queue_head_t *q, int state, long timeout) -{ - unsigned long flags; - wait_queue_t wait; - - init_waitqueue_entry(&wait, current); - - __set_current_state(state); - - spin_lock_irqsave(&q->lock, flags); - __add_wait_queue(q, &wait); - spin_unlock(&q->lock); - timeout = schedule_timeout(timeout); - spin_lock_irq(&q->lock); - __remove_wait_queue(q, &wait); - spin_unlock_irqrestore(&q->lock, flags); - - return timeout; -} - -void __sched interruptible_sleep_on(wait_queue_head_t *q) -{ - sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); -} -EXPORT_SYMBOL(interruptible_sleep_on); - -long __sched -interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) -{ - return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); -} -EXPORT_SYMBOL(interruptible_sleep_on_timeout); - -void __sched sleep_on(wait_queue_head_t *q) -{ - sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); -} -EXPORT_SYMBOL(sleep_on); - -long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) -{ - return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); -} -EXPORT_SYMBOL(sleep_on_timeout); - #ifdef CONFIG_RT_MUTEXES /* -- cgit v1.2.3-70-g09d2 From 52f5684c8e1ec7463192aba8e2916df49807511a Mon Sep 17 00:00:00 2001 From: Gideon Israel Dsouza Date: Mon, 7 Apr 2014 15:39:20 -0700 Subject: kernel: use macros from compiler.h instead of __attribute__((...)) To increase compiler portability there is which provides convenience macros for various gcc constructs. Eg: __weak for __attribute__((weak)). I've replaced all instances of gcc attributes with the right macro in the kernel subsystem. Signed-off-by: Gideon Israel Dsouza Cc: "Rafael J. Wysocki" Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 3 ++- kernel/kallsyms.c | 11 ++++++----- kernel/kexec.c | 5 +++-- kernel/ksysfs.c | 5 +++-- kernel/power/power.h | 3 ++- kernel/power/snapshot.c | 3 ++- kernel/power/suspend.c | 5 +++-- kernel/power/swap.c | 2 +- kernel/sched/clock.c | 3 ++- kernel/sched/core.c | 3 ++- kernel/signal.c | 4 +++- kernel/time/timekeeping.c | 5 +++-- kernel/trace/trace.h | 3 ++- 13 files changed, 34 insertions(+), 21 deletions(-) (limited to 'kernel/sched/core.c') diff --git a/kernel/fork.c b/kernel/fork.c index e905e9c6b22..54a8d26f612 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include @@ -286,7 +287,7 @@ void __init fork_init(unsigned long mempages) init_task.signal->rlim[RLIMIT_NPROC]; } -int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, +int __weak arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 3127ad52cdb..cb0cf37dac3 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -36,8 +37,8 @@ * These will be re-linked against their real values * during the second link stage. */ -extern const unsigned long kallsyms_addresses[] __attribute__((weak)); -extern const u8 kallsyms_names[] __attribute__((weak)); +extern const unsigned long kallsyms_addresses[] __weak; +extern const u8 kallsyms_names[] __weak; /* * Tell the compiler that the count isn't in the small data section if the arch @@ -46,10 +47,10 @@ extern const u8 kallsyms_names[] __attribute__((weak)); extern const unsigned long kallsyms_num_syms __attribute__((weak, section(".rodata"))); -extern const u8 kallsyms_token_table[] __attribute__((weak)); -extern const u16 kallsyms_token_index[] __attribute__((weak)); +extern const u8 kallsyms_token_table[] __weak; +extern const u16 kallsyms_token_index[] __weak; -extern const unsigned long kallsyms_markers[] __attribute__((weak)); +extern const unsigned long kallsyms_markers[] __weak; static inline int is_kernel_inittext(unsigned long addr) { diff --git a/kernel/kexec.c b/kernel/kexec.c index c0d261c7db7..c8380ad203b 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -1551,10 +1552,10 @@ void vmcoreinfo_append_str(const char *fmt, ...) * provide an empty default implementation here -- architecture * code may override this */ -void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) +void __weak arch_crash_save_vmcoreinfo(void) {} -unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) +unsigned long __weak paddr_vmcoreinfo_note(void) { return __pa((unsigned long)(char *)&vmcoreinfo_note); } diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index e660964086e..2495a9b14ac 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -18,6 +18,7 @@ #include #include #include +#include #include /* rcu_expedited */ @@ -162,8 +163,8 @@ KERNEL_ATTR_RW(rcu_expedited); /* * Make /sys/kernel/notes give the raw contents of our kernel .notes section. */ -extern const void __start_notes __attribute__((weak)); -extern const void __stop_notes __attribute__((weak)); +extern const void __start_notes __weak; +extern const void __stop_notes __weak; #define notes_size (&__stop_notes - &__start_notes) static ssize_t notes_read(struct file *filp, struct kobject *kobj, diff --git a/kernel/power/power.h b/kernel/power/power.h index 1ca75310655..15f37ea0871 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -2,6 +2,7 @@ #include #include #include +#include struct swsusp_info { struct new_utsname uts; @@ -11,7 +12,7 @@ struct swsusp_info { unsigned long image_pages; unsigned long pages; unsigned long size; -} __attribute__((aligned(PAGE_SIZE))); +} __aligned(PAGE_SIZE); #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 149e745eaa5..18fb7a2fb14 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -155,7 +156,7 @@ static inline void free_image_page(void *addr, int clear_nosave_free) struct linked_page { struct linked_page *next; char data[LINKED_PAGE_DATA_SIZE]; -} __attribute__((packed)); +} __packed; static inline void free_list_of_pages(struct linked_page *list, int clear_page_nosave) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 90b3d9366d1..c3ad9cafe93 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "power.h" @@ -156,13 +157,13 @@ static int suspend_prepare(suspend_state_t state) } /* default implementation */ -void __attribute__ ((weak)) arch_suspend_disable_irqs(void) +void __weak arch_suspend_disable_irqs(void) { local_irq_disable(); } /* default implementation */ -void __attribute__ ((weak)) arch_suspend_enable_irqs(void) +void __weak arch_suspend_enable_irqs(void) { local_irq_enable(); } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 7c33ed20041..8c9a4819f79 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -101,7 +101,7 @@ struct swsusp_header { unsigned int flags; /* Flags to pass to the "boot" kernel */ char orig_sig[10]; char sig[10]; -} __attribute__((packed)); +} __packed; static struct swsusp_header *swsusp_header; diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index b30a2924ef1..3ef6451e972 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -60,13 +60,14 @@ #include #include #include +#include /* * Scheduler clock - returns current time in nanosec units. * This is default implementation. * Architectures and sub-architectures can override this. */ -unsigned long long __attribute__((weak)) sched_clock(void) +unsigned long long __weak sched_clock(void) { return (unsigned long long)(jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1d1b87b3677..80bd491b718 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include @@ -6498,7 +6499,7 @@ static cpumask_var_t fallback_doms; * cpu core maps. It is supposed to return 1 if the topology changed * or 0 if it stayed the same. */ -int __attribute__((weak)) arch_update_cpu_topology(void) +int __weak arch_update_cpu_topology(void) { return 0; } diff --git a/kernel/signal.c b/kernel/signal.c index 5d4b05a229a..6ea13c09ae5 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -33,6 +33,8 @@ #include #include #include +#include + #define CREATE_TRACE_POINTS #include @@ -3618,7 +3620,7 @@ SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) } #endif -__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) +__weak const char *arch_vma_name(struct vm_area_struct *vma) { return NULL; } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5b40279ecd7..f7df8ea2170 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "tick-internal.h" #include "ntp_internal.h" @@ -760,7 +761,7 @@ u64 timekeeping_max_deferment(void) * * XXX - Do be sure to remove it once all arches implement it. */ -void __attribute__((weak)) read_persistent_clock(struct timespec *ts) +void __weak read_persistent_clock(struct timespec *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; @@ -775,7 +776,7 @@ void __attribute__((weak)) read_persistent_clock(struct timespec *ts) * * XXX - Do be sure to remove it once all arches implement it. */ -void __attribute__((weak)) read_boot_clock(struct timespec *ts) +void __weak read_boot_clock(struct timespec *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ffc314b7e92..2e29d7ba5a5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -13,6 +13,7 @@ #include #include #include +#include #ifdef CONFIG_FTRACE_SYSCALLS #include /* For NR_SYSCALLS */ @@ -1279,7 +1280,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); #undef FTRACE_ENTRY #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ extern struct ftrace_event_call \ - __attribute__((__aligned__(4))) event_##call; + __aligned(4) event_##call; #undef FTRACE_ENTRY_DUP #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ -- cgit v1.2.3-70-g09d2