summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-04-18 12:14:55 +0200
committerIngo Molnar <mingo@kernel.org>2014-04-18 12:14:55 +0200
commit1111b680d34bc19190f02a1b4479c3fcc592c22e (patch)
tree9d45ad88b9354205a2d1fd76a338ab8693a16d86 /kernel
parent37b6cb475a6d74bc047c53bc323d6eb3113e27a4 (diff)
parent24223657806a0ebd0ae5c9caaf7b021091889cf2 (diff)
Merge branch 'perf/urgent' into perf/core, to pick up PMU driver fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/mutex-debug.c19
-rw-r--r--kernel/seccomp.c17
-rw-r--r--kernel/user_namespace.c11
3 files changed, 22 insertions, 25 deletions
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index e1191c996c5..5cf6731b98e 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -71,18 +71,17 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
void debug_mutex_unlock(struct mutex *lock)
{
- if (unlikely(!debug_locks))
- return;
+ if (likely(debug_locks)) {
+ DEBUG_LOCKS_WARN_ON(lock->magic != lock);
- DEBUG_LOCKS_WARN_ON(lock->magic != lock);
+ if (!lock->owner)
+ DEBUG_LOCKS_WARN_ON(!lock->owner);
+ else
+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
- if (!lock->owner)
- DEBUG_LOCKS_WARN_ON(!lock->owner);
- else
- DEBUG_LOCKS_WARN_ON(lock->owner != current);
-
- DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
- mutex_clear_owner(lock);
+ DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
+ mutex_clear_owner(lock);
+ }
/*
* __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d8d046c0726..590c3792508 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -69,18 +69,17 @@ static void populate_seccomp_data(struct seccomp_data *sd)
{
struct task_struct *task = current;
struct pt_regs *regs = task_pt_regs(task);
+ unsigned long args[6];
sd->nr = syscall_get_nr(task, regs);
sd->arch = syscall_get_arch();
-
- /* Unroll syscall_get_args to help gcc on arm. */
- syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]);
- syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]);
- syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]);
- syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]);
- syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]);
- syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]);
-
+ syscall_get_arguments(task, regs, 0, 6, args);
+ sd->args[0] = args[0];
+ sd->args[1] = args[1];
+ sd->args[2] = args[2];
+ sd->args[3] = args[3];
+ sd->args[4] = args[4];
+ sd->args[5] = args[5];
sd->instruction_pointer = KSTK_EIP(task);
}
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 0d8f6023fd8..bf71b4b2d63 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
@@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
@@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].lower_first;
last = first + map->extent[idx].count - 1;
@@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
* were written before the count of the extents.
*
* To achieve this smp_wmb() is used on guarantee the write
- * order and smp_read_barrier_depends() is guaranteed that we
- * don't have crazy architectures returning stale data.
- *
+ * order and smp_rmb() is guaranteed that we don't have crazy
+ * architectures returning stale data.
*/
mutex_lock(&id_map_mutex);