summaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 10131fdaff7..9fe58c46a42 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -37,7 +37,7 @@
#include <linux/mempolicy.h>
#include <linux/mm.h>
#include <linux/memory.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
@@ -949,6 +949,8 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
+ bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+
repeat:
/*
* Allow tasks that have access to memory reserves because they have
@@ -963,7 +965,6 @@ repeat:
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
-
/*
* ensure checking ->mems_allowed_change_disable after setting all new
* allowed nodes.
@@ -980,9 +981,11 @@ repeat:
/*
* Allocation of memory is very fast, we needn't sleep when waiting
- * for the read-side.
+ * for the read-side. No wait is necessary, however, if at least one
+ * node remains unchanged.
*/
- while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+ while (masks_disjoint &&
+ ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
task_unlock(tsk);
if (!task_curr(tsk))
yield();