|
@@ -2936,6 +2936,84 @@ int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
|
|
|
}
|
|
|
EXPORT_SYMBOL(dev_loopback_xmit);
|
|
|
|
|
|
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|
|
+{
|
|
|
+#ifdef CONFIG_XPS
|
|
|
+ struct xps_dev_maps *dev_maps;
|
|
|
+ struct xps_map *map;
|
|
|
+ int queue_index = -1;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ dev_maps = rcu_dereference(dev->xps_maps);
|
|
|
+ if (dev_maps) {
|
|
|
+ map = rcu_dereference(
|
|
|
+ dev_maps->cpu_map[skb->sender_cpu - 1]);
|
|
|
+ if (map) {
|
|
|
+ if (map->len == 1)
|
|
|
+ queue_index = map->queues[0];
|
|
|
+ else
|
|
|
+ queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
|
|
|
+ map->len)];
|
|
|
+ if (unlikely(queue_index >= dev->real_num_tx_queues))
|
|
|
+ queue_index = -1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ return queue_index;
|
|
|
+#else
|
|
|
+ return -1;
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct sock *sk = skb->sk;
|
|
|
+ int queue_index = sk_tx_queue_get(sk);
|
|
|
+
|
|
|
+ if (queue_index < 0 || skb->ooo_okay ||
|
|
|
+ queue_index >= dev->real_num_tx_queues) {
|
|
|
+ int new_index = get_xps_queue(dev, skb);
|
|
|
+ if (new_index < 0)
|
|
|
+ new_index = skb_tx_hash(dev, skb);
|
|
|
+
|
|
|
+ if (queue_index != new_index && sk &&
|
|
|
+ rcu_access_pointer(sk->sk_dst_cache))
|
|
|
+ sk_tx_queue_set(sk, new_index);
|
|
|
+
|
|
|
+ queue_index = new_index;
|
|
|
+ }
|
|
|
+
|
|
|
+ return queue_index;
|
|
|
+}
|
|
|
+
|
|
|
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
|
|
+ struct sk_buff *skb,
|
|
|
+ void *accel_priv)
|
|
|
+{
|
|
|
+ int queue_index = 0;
|
|
|
+
|
|
|
+#ifdef CONFIG_XPS
|
|
|
+ if (skb->sender_cpu == 0)
|
|
|
+ skb->sender_cpu = raw_smp_processor_id() + 1;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (dev->real_num_tx_queues != 1) {
|
|
|
+ const struct net_device_ops *ops = dev->netdev_ops;
|
|
|
+ if (ops->ndo_select_queue)
|
|
|
+ queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
|
|
|
+ __netdev_pick_tx);
|
|
|
+ else
|
|
|
+ queue_index = __netdev_pick_tx(dev, skb);
|
|
|
+
|
|
|
+ if (!accel_priv)
|
|
|
+ queue_index = netdev_cap_txqueue(dev, queue_index);
|
|
|
+ }
|
|
|
+
|
|
|
+ skb_set_queue_mapping(skb, queue_index);
|
|
|
+ return netdev_get_tx_queue(dev, queue_index);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __dev_queue_xmit - transmit a buffer
|
|
|
* @skb: buffer to transmit
|