|
@@ -689,7 +689,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
|
|
struct rps_map *old_map, *map;
|
|
|
cpumask_var_t mask;
|
|
|
int err, cpu, i;
|
|
|
- static DEFINE_SPINLOCK(rps_map_lock);
|
|
|
+ static DEFINE_MUTEX(rps_map_mutex);
|
|
|
|
|
|
if (!capable(CAP_NET_ADMIN))
|
|
|
return -EPERM;
|
|
@@ -722,9 +722,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
|
|
map = NULL;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&rps_map_lock);
|
|
|
+ mutex_lock(&rps_map_mutex);
|
|
|
old_map = rcu_dereference_protected(queue->rps_map,
|
|
|
- lockdep_is_held(&rps_map_lock));
|
|
|
+ mutex_is_locked(&rps_map_mutex));
|
|
|
rcu_assign_pointer(queue->rps_map, map);
|
|
|
|
|
|
if (map)
|
|
@@ -732,7 +732,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
|
|
if (old_map)
|
|
|
static_key_slow_dec(&rps_needed);
|
|
|
|
|
|
- spin_unlock(&rps_map_lock);
|
|
|
+ mutex_unlock(&rps_map_mutex);
|
|
|
|
|
|
if (old_map)
|
|
|
kfree_rcu(old_map, rcu);
|