|
@@ -1948,37 +1948,80 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
|
|
|
+{
|
|
|
+ if (dev->num_tc) {
|
|
|
+ struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
|
|
|
+ if ((txq - tc->offset) < tc->count)
|
|
|
+ return i;
|
|
|
+ }
|
|
|
+
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_XPS
|
|
|
static DEFINE_MUTEX(xps_map_mutex);
|
|
|
#define xmap_dereference(P) \
|
|
|
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
|
|
|
|
|
|
-static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
|
|
|
- int cpu, u16 index)
|
|
|
+static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
|
|
|
+ int tci, u16 index)
|
|
|
{
|
|
|
struct xps_map *map = NULL;
|
|
|
int pos;
|
|
|
|
|
|
if (dev_maps)
|
|
|
- map = xmap_dereference(dev_maps->cpu_map[cpu]);
|
|
|
+ map = xmap_dereference(dev_maps->cpu_map[tci]);
|
|
|
+ if (!map)
|
|
|
+ return false;
|
|
|
|
|
|
- for (pos = 0; map && pos < map->len; pos++) {
|
|
|
- if (map->queues[pos] == index) {
|
|
|
- if (map->len > 1) {
|
|
|
- map->queues[pos] = map->queues[--map->len];
|
|
|
- } else {
|
|
|
- RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
|
|
|
- kfree_rcu(map, rcu);
|
|
|
- map = NULL;
|
|
|
- }
|
|
|
+ for (pos = map->len; pos--;) {
|
|
|
+ if (map->queues[pos] != index)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (map->len > 1) {
|
|
|
+ map->queues[pos] = map->queues[--map->len];
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
|
|
|
+ kfree_rcu(map, rcu);
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
- return map;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
-static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
|
|
|
+static bool remove_xps_queue_cpu(struct net_device *dev,
|
|
|
+ struct xps_dev_maps *dev_maps,
|
|
|
+ int cpu, u16 offset, u16 count)
|
|
|
+{
|
|
|
+ int num_tc = dev->num_tc ? : 1;
|
|
|
+ bool active = false;
|
|
|
+ int tci;
|
|
|
+
|
|
|
+ for (tci = cpu * num_tc; num_tc--; tci++) {
|
|
|
+ int i, j;
|
|
|
+
|
|
|
+ for (i = count, j = offset; i--; j++) {
|
|
|
+ if (!remove_xps_queue(dev_maps, cpu, j))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ active |= i < 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return active;
|
|
|
+}
|
|
|
+
|
|
|
+static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
|
|
|
+ u16 count)
|
|
|
{
|
|
|
struct xps_dev_maps *dev_maps;
|
|
|
int cpu, i;
|
|
@@ -1990,21 +2033,16 @@ static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
|
|
|
if (!dev_maps)
|
|
|
goto out_no_maps;
|
|
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- for (i = index; i < dev->num_tx_queues; i++) {
|
|
|
- if (!remove_xps_queue(dev_maps, cpu, i))
|
|
|
- break;
|
|
|
- }
|
|
|
- if (i == dev->num_tx_queues)
|
|
|
- active = true;
|
|
|
- }
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
|
|
|
+ offset, count);
|
|
|
|
|
|
if (!active) {
|
|
|
RCU_INIT_POINTER(dev->xps_maps, NULL);
|
|
|
kfree_rcu(dev_maps, rcu);
|
|
|
}
|
|
|
|
|
|
- for (i = index; i < dev->num_tx_queues; i++)
|
|
|
+ for (i = offset + (count - 1); count--; i--)
|
|
|
netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
|
|
|
NUMA_NO_NODE);
|
|
|
|
|
@@ -2012,6 +2050,11 @@ out_no_maps:
|
|
|
mutex_unlock(&xps_map_mutex);
|
|
|
}
|
|
|
|
|
|
+static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
|
|
|
+{
|
|
|
+ netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
|
|
|
+}
|
|
|
+
|
|
|
static struct xps_map *expand_xps_map(struct xps_map *map,
|
|
|
int cpu, u16 index)
|
|
|
{
|
|
@@ -2051,20 +2094,28 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
|
|
|
u16 index)
|
|
|
{
|
|
|
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
|
|
|
+ int i, cpu, tci, numa_node_id = -2;
|
|
|
+ int maps_sz, num_tc = 1, tc = 0;
|
|
|
struct xps_map *map, *new_map;
|
|
|
- int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
|
|
|
- int cpu, numa_node_id = -2;
|
|
|
bool active = false;
|
|
|
|
|
|
+ if (dev->num_tc) {
|
|
|
+ num_tc = dev->num_tc;
|
|
|
+ tc = netdev_txq_to_tc(dev, index);
|
|
|
+ if (tc < 0)
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
|
|
|
+ if (maps_sz < L1_CACHE_BYTES)
|
|
|
+ maps_sz = L1_CACHE_BYTES;
|
|
|
+
|
|
|
mutex_lock(&xps_map_mutex);
|
|
|
|
|
|
dev_maps = xmap_dereference(dev->xps_maps);
|
|
|
|
|
|
/* allocate memory for queue storage */
|
|
|
- for_each_online_cpu(cpu) {
|
|
|
- if (!cpumask_test_cpu(cpu, mask))
|
|
|
- continue;
|
|
|
-
|
|
|
+ for_each_cpu_and(cpu, cpu_online_mask, mask) {
|
|
|
if (!new_dev_maps)
|
|
|
new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
|
|
|
if (!new_dev_maps) {
|
|
@@ -2072,25 +2123,38 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
|
|
|
+ tci = cpu * num_tc + tc;
|
|
|
+ map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
|
|
|
NULL;
|
|
|
|
|
|
map = expand_xps_map(map, cpu, index);
|
|
|
if (!map)
|
|
|
goto error;
|
|
|
|
|
|
- RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
|
|
|
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
|
|
|
}
|
|
|
|
|
|
if (!new_dev_maps)
|
|
|
goto out_no_new_maps;
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
+ /* copy maps belonging to foreign traffic classes */
|
|
|
+ for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
|
|
|
+ /* fill in the new device map from the old device map */
|
|
|
+ map = xmap_dereference(dev_maps->cpu_map[tci]);
|
|
|
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We need to explicitly update tci as prevous loop
|
|
|
+ * could break out early if dev_maps is NULL.
|
|
|
+ */
|
|
|
+ tci = cpu * num_tc + tc;
|
|
|
+
|
|
|
if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
|
|
|
/* add queue to CPU maps */
|
|
|
int pos = 0;
|
|
|
|
|
|
- map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
|
|
|
+ map = xmap_dereference(new_dev_maps->cpu_map[tci]);
|
|
|
while ((pos < map->len) && (map->queues[pos] != index))
|
|
|
pos++;
|
|
|
|
|
@@ -2104,26 +2168,36 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
|
|
|
#endif
|
|
|
} else if (dev_maps) {
|
|
|
/* fill in the new device map from the old device map */
|
|
|
- map = xmap_dereference(dev_maps->cpu_map[cpu]);
|
|
|
- RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
|
|
|
+ map = xmap_dereference(dev_maps->cpu_map[tci]);
|
|
|
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
|
|
|
}
|
|
|
|
|
|
+ /* copy maps belonging to foreign traffic classes */
|
|
|
+ for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
|
|
|
+ /* fill in the new device map from the old device map */
|
|
|
+ map = xmap_dereference(dev_maps->cpu_map[tci]);
|
|
|
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
rcu_assign_pointer(dev->xps_maps, new_dev_maps);
|
|
|
|
|
|
/* Cleanup old maps */
|
|
|
- if (dev_maps) {
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
|
|
|
- map = xmap_dereference(dev_maps->cpu_map[cpu]);
|
|
|
+ if (!dev_maps)
|
|
|
+ goto out_no_old_maps;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
|
|
|
+ new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
|
|
|
+ map = xmap_dereference(dev_maps->cpu_map[tci]);
|
|
|
if (map && map != new_map)
|
|
|
kfree_rcu(map, rcu);
|
|
|
}
|
|
|
-
|
|
|
- kfree_rcu(dev_maps, rcu);
|
|
|
}
|
|
|
|
|
|
+ kfree_rcu(dev_maps, rcu);
|
|
|
+
|
|
|
+out_no_old_maps:
|
|
|
dev_maps = new_dev_maps;
|
|
|
active = true;
|
|
|
|
|
@@ -2138,11 +2212,12 @@ out_no_new_maps:
|
|
|
|
|
|
/* removes queue from unused CPUs */
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (remove_xps_queue(dev_maps, cpu, index))
|
|
|
- active = true;
|
|
|
+ for (i = tc, tci = cpu * num_tc; i--; tci++)
|
|
|
+ active |= remove_xps_queue(dev_maps, tci, index);
|
|
|
+ if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
|
|
|
+ active |= remove_xps_queue(dev_maps, tci, index);
|
|
|
+ for (i = num_tc - tc, tci++; --i; tci++)
|
|
|
+ active |= remove_xps_queue(dev_maps, tci, index);
|
|
|
}
|
|
|
|
|
|
/* free map if not active */
|
|
@@ -2158,11 +2233,14 @@ out_no_maps:
|
|
|
error:
|
|
|
/* remove any maps that we added */
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
|
|
|
- map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
|
|
|
- NULL;
|
|
|
- if (new_map && new_map != map)
|
|
|
- kfree(new_map);
|
|
|
+ for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
|
|
|
+ new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
|
|
|
+ map = dev_maps ?
|
|
|
+ xmap_dereference(dev_maps->cpu_map[tci]) :
|
|
|
+ NULL;
|
|
|
+ if (new_map && new_map != map)
|
|
|
+ kfree(new_map);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
mutex_unlock(&xps_map_mutex);
|
|
@@ -2173,6 +2251,44 @@ error:
|
|
|
EXPORT_SYMBOL(netif_set_xps_queue);
|
|
|
|
|
|
#endif
|
|
|
+void netdev_reset_tc(struct net_device *dev)
|
|
|
+{
|
|
|
+#ifdef CONFIG_XPS
|
|
|
+ netif_reset_xps_queues_gt(dev, 0);
|
|
|
+#endif
|
|
|
+ dev->num_tc = 0;
|
|
|
+ memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
|
|
|
+ memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(netdev_reset_tc);
|
|
|
+
|
|
|
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
|
|
|
+{
|
|
|
+ if (tc >= dev->num_tc)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+#ifdef CONFIG_XPS
|
|
|
+ netif_reset_xps_queues(dev, offset, count);
|
|
|
+#endif
|
|
|
+ dev->tc_to_txq[tc].count = count;
|
|
|
+ dev->tc_to_txq[tc].offset = offset;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(netdev_set_tc_queue);
|
|
|
+
|
|
|
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
|
|
|
+{
|
|
|
+ if (num_tc > TC_MAX_QUEUE)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+#ifdef CONFIG_XPS
|
|
|
+ netif_reset_xps_queues_gt(dev, 0);
|
|
|
+#endif
|
|
|
+ dev->num_tc = num_tc;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(netdev_set_num_tc);
|
|
|
+
|
|
|
/*
|
|
|
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
|
|
|
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
|
|
@@ -3085,8 +3201,14 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|
|
rcu_read_lock();
|
|
|
dev_maps = rcu_dereference(dev->xps_maps);
|
|
|
if (dev_maps) {
|
|
|
- map = rcu_dereference(
|
|
|
- dev_maps->cpu_map[skb->sender_cpu - 1]);
|
|
|
+ unsigned int tci = skb->sender_cpu - 1;
|
|
|
+
|
|
|
+ if (dev->num_tc) {
|
|
|
+ tci *= dev->num_tc;
|
|
|
+ tci += netdev_get_prio_tc_map(dev, skb->priority);
|
|
|
+ }
|
|
|
+
|
|
|
+ map = rcu_dereference(dev_maps->cpu_map[tci]);
|
|
|
if (map) {
|
|
|
if (map->len == 1)
|
|
|
queue_index = map->queues[0];
|