|
@@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
|
|
|
|
|
|
/* Find the matching extent */
|
|
/* Find the matching extent */
|
|
extents = map->nr_extents;
|
|
extents = map->nr_extents;
|
|
- smp_read_barrier_depends();
|
|
|
|
|
|
+ smp_rmb();
|
|
for (idx = 0; idx < extents; idx++) {
|
|
for (idx = 0; idx < extents; idx++) {
|
|
first = map->extent[idx].first;
|
|
first = map->extent[idx].first;
|
|
last = first + map->extent[idx].count - 1;
|
|
last = first + map->extent[idx].count - 1;
|
|
@@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
|
|
|
|
|
|
/* Find the matching extent */
|
|
/* Find the matching extent */
|
|
extents = map->nr_extents;
|
|
extents = map->nr_extents;
|
|
- smp_read_barrier_depends();
|
|
|
|
|
|
+ smp_rmb();
|
|
for (idx = 0; idx < extents; idx++) {
|
|
for (idx = 0; idx < extents; idx++) {
|
|
first = map->extent[idx].first;
|
|
first = map->extent[idx].first;
|
|
last = first + map->extent[idx].count - 1;
|
|
last = first + map->extent[idx].count - 1;
|
|
@@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
|
|
|
|
|
|
/* Find the matching extent */
|
|
/* Find the matching extent */
|
|
extents = map->nr_extents;
|
|
extents = map->nr_extents;
|
|
- smp_read_barrier_depends();
|
|
|
|
|
|
+ smp_rmb();
|
|
for (idx = 0; idx < extents; idx++) {
|
|
for (idx = 0; idx < extents; idx++) {
|
|
first = map->extent[idx].lower_first;
|
|
first = map->extent[idx].lower_first;
|
|
last = first + map->extent[idx].count - 1;
|
|
last = first + map->extent[idx].count - 1;
|
|
@@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
|
* were written before the count of the extents.
|
|
* were written before the count of the extents.
|
|
*
|
|
*
|
|
* To achieve this smp_wmb() is used on guarantee the write
|
|
* To achieve this smp_wmb() is used on guarantee the write
|
|
- * order and smp_read_barrier_depends() is guaranteed that we
|
|
|
|
- * don't have crazy architectures returning stale data.
|
|
|
|
- *
|
|
|
|
|
|
+ * order and smp_rmb() is guaranteed that we don't have crazy
|
|
|
|
+ * architectures returning stale data.
|
|
*/
|
|
*/
|
|
mutex_lock(&id_map_mutex);
|
|
mutex_lock(&id_map_mutex);
|
|
|
|
|