diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-04-18 12:14:55 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-04-18 12:14:55 +0200 |
commit | 1111b680d34bc19190f02a1b4479c3fcc592c22e (patch) | |
tree | 9d45ad88b9354205a2d1fd76a338ab8693a16d86 /kernel/user_namespace.c | |
parent | 37b6cb475a6d74bc047c53bc323d6eb3113e27a4 (diff) | |
parent | 24223657806a0ebd0ae5c9caaf7b021091889cf2 (diff) |
Merge branch 'perf/urgent' into perf/core, to pick up PMU driver fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/user_namespace.c')
-rw-r--r-- | kernel/user_namespace.c | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 0d8f6023fd8..bf71b4b2d63 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) /* Find the matching extent */ extents = map->nr_extents; - smp_read_barrier_depends(); + smp_rmb(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; @@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id) /* Find the matching extent */ extents = map->nr_extents; - smp_read_barrier_depends(); + smp_rmb(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; @@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) /* Find the matching extent */ extents = map->nr_extents; - smp_read_barrier_depends(); + smp_rmb(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].lower_first; last = first + map->extent[idx].count - 1; @@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf, * were written before the count of the extents. * * To achieve this smp_wmb() is used on guarantee the write - * order and smp_read_barrier_depends() is guaranteed that we - * don't have crazy architectures returning stale data. - * + * order and smp_rmb() is guaranteed that we don't have crazy + * architectures returning stale data. */ mutex_lock(&id_map_mutex); |