summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-22 11:16:36 +0000
committerMel Gorman <mgorman@suse.de>2012-12-11 14:42:55 +0000
commit1a687c2e9a99335c9e77392f050fe607fa18a652 (patch)
tree06df958bfdfeaf9f38f333af106b55faa81f1c6b /kernel/sched
parentb8593bfda1652755136333cdd362de125b283a9c (diff)
mm: sched: numa: Control enabling and disabling of NUMA balancing
This patch adds Kconfig options and kernel parameters to allow the enabling and disabling of automatic NUMA balancing. The existance of such a switch was and is very important when debugging problems related to transparent hugepages and we should have the same for automatic NUMA placement. Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c48
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/features.h6
3 files changed, 40 insertions, 17 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9d255bc0e27..7a45015274a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -192,23 +192,10 @@ static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */
-static ssize_t
-sched_feat_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int sched_feat_set(char *cmp)
{
- char buf[64];
- char *cmp;
- int neg = 0;
int i;
-
- if (cnt > 63)
- cnt = 63;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
- cmp = strstrip(buf);
+ int neg = 0;
if (strncmp(cmp, "NO_", 3) == 0) {
neg = 1;
@@ -228,6 +215,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
}
}
+ return i;
+}
+
+static ssize_t
+sched_feat_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ char *cmp;
+ int i;
+
+ if (cnt > 63)
+ cnt = 63;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+ cmp = strstrip(buf);
+
+ i = sched_feat_set(cmp);
if (i == __SCHED_FEAT_NR)
return -EINVAL;
@@ -1549,6 +1557,16 @@ static void __sched_fork(struct task_struct *p)
#endif /* CONFIG_NUMA_BALANCING */
}
+#ifdef CONFIG_NUMA_BALANCING
+void set_numabalancing_state(bool enabled)
+{
+ if (enabled)
+ sched_feat_set("NUMA");
+ else
+ sched_feat_set("NO_NUMA");
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/*
* fork()/clone()-time setup:
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4b577863933..7a02a2082e9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -811,6 +811,9 @@ void task_numa_fault(int node, int pages, bool migrated)
{
struct task_struct *p = current;
+ if (!sched_feat_numa(NUMA))
+ return;
+
/* FIXME: Allocate task-specific structure for placement policy here */
/*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 5fb7aefbec8..d2373a3e325 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -63,8 +63,10 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
/*
- * Apply the automatic NUMA scheduling policy
+ * Apply the automatic NUMA scheduling policy. Enabled automatically
+ * at runtime if running on a NUMA machine. Can be controlled via
+ * numa_balancing=
*/
#ifdef CONFIG_NUMA_BALANCING
-SCHED_FEAT(NUMA, true)
+SCHED_FEAT(NUMA, false)
#endif