|
@@ -2175,6 +2175,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
|
|
return active;
|
|
return active;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void reset_xps_maps(struct net_device *dev,
|
|
|
|
+ struct xps_dev_maps *dev_maps,
|
|
|
|
+ bool is_rxqs_map)
|
|
|
|
+{
|
|
|
|
+ if (is_rxqs_map) {
|
|
|
|
+ static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
|
|
|
|
+ RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
|
|
|
|
+ } else {
|
|
|
|
+ RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
|
|
|
|
+ }
|
|
|
|
+ static_key_slow_dec_cpuslocked(&xps_needed);
|
|
|
|
+ kfree_rcu(dev_maps, rcu);
|
|
|
|
+}
|
|
|
|
+
|
|
static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
|
|
static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
|
|
struct xps_dev_maps *dev_maps, unsigned int nr_ids,
|
|
struct xps_dev_maps *dev_maps, unsigned int nr_ids,
|
|
u16 offset, u16 count, bool is_rxqs_map)
|
|
u16 offset, u16 count, bool is_rxqs_map)
|
|
@@ -2186,13 +2200,8 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
|
|
j < nr_ids;)
|
|
j < nr_ids;)
|
|
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
|
|
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
|
|
count);
|
|
count);
|
|
- if (!active) {
|
|
|
|
- if (is_rxqs_map)
|
|
|
|
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
|
|
|
|
- else
|
|
|
|
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
|
|
|
|
- kfree_rcu(dev_maps, rcu);
|
|
|
|
- }
|
|
|
|
|
|
+ if (!active)
|
|
|
|
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
|
|
|
|
|
|
if (!is_rxqs_map) {
|
|
if (!is_rxqs_map) {
|
|
for (i = offset + (count - 1); count--; i--) {
|
|
for (i = offset + (count - 1); count--; i--) {
|
|
@@ -2236,10 +2245,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
|
|
false);
|
|
false);
|
|
|
|
|
|
out_no_maps:
|
|
out_no_maps:
|
|
- if (static_key_enabled(&xps_rxqs_needed))
|
|
|
|
- static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
|
|
|
|
-
|
|
|
|
- static_key_slow_dec_cpuslocked(&xps_needed);
|
|
|
|
mutex_unlock(&xps_map_mutex);
|
|
mutex_unlock(&xps_map_mutex);
|
|
cpus_read_unlock();
|
|
cpus_read_unlock();
|
|
}
|
|
}
|
|
@@ -2357,9 +2362,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
|
|
if (!new_dev_maps)
|
|
if (!new_dev_maps)
|
|
goto out_no_new_maps;
|
|
goto out_no_new_maps;
|
|
|
|
|
|
- static_key_slow_inc_cpuslocked(&xps_needed);
|
|
|
|
- if (is_rxqs_map)
|
|
|
|
- static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
|
|
|
|
|
|
+ if (!dev_maps) {
|
|
|
|
+ /* Increment static keys at most once per type */
|
|
|
|
+ static_key_slow_inc_cpuslocked(&xps_needed);
|
|
|
|
+ if (is_rxqs_map)
|
|
|
|
+ static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
|
|
|
|
+ }
|
|
|
|
|
|
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
|
|
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
|
|
j < nr_ids;) {
|
|
j < nr_ids;) {
|
|
@@ -2457,13 +2465,8 @@ out_no_new_maps:
|
|
}
|
|
}
|
|
|
|
|
|
/* free map if not active */
|
|
/* free map if not active */
|
|
- if (!active) {
|
|
|
|
- if (is_rxqs_map)
|
|
|
|
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
|
|
|
|
- else
|
|
|
|
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
|
|
|
|
- kfree_rcu(dev_maps, rcu);
|
|
|
|
- }
|
|
|
|
|
|
+ if (!active)
|
|
|
|
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
|
|
|
|
|
|
out_no_maps:
|
|
out_no_maps:
|
|
mutex_unlock(&xps_map_mutex);
|
|
mutex_unlock(&xps_map_mutex);
|