|
@@ -196,12 +196,6 @@ static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
|
|
|
return jhash_3words(a, b, c, hashrnd);
|
|
|
}
|
|
|
|
|
|
-static __always_inline u32 __flow_hash_1word(u32 a)
|
|
|
-{
|
|
|
- __flow_hash_secret_init();
|
|
|
- return jhash_1word(a, hashrnd);
|
|
|
-}
|
|
|
-
|
|
|
static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
|
|
|
{
|
|
|
u32 hash;
|
|
@@ -253,7 +247,7 @@ EXPORT_SYMBOL(__skb_get_hash);
|
|
|
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
|
|
|
* to be used as a distribution range.
|
|
|
*/
|
|
|
-u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
|
|
|
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
|
|
|
unsigned int num_tx_queues)
|
|
|
{
|
|
|
u32 hash;
|
|
@@ -273,13 +267,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
|
|
|
qcount = dev->tc_to_txq[tc].count;
|
|
|
}
|
|
|
|
|
|
- if (skb->sk && skb->sk->sk_hash)
|
|
|
- hash = skb->sk->sk_hash;
|
|
|
- else
|
|
|
- hash = (__force u16) skb->protocol;
|
|
|
- hash = __flow_hash_1word(hash);
|
|
|
-
|
|
|
- return (u16) (((u64) hash * qcount) >> 32) + qoffset;
|
|
|
+ return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__skb_tx_hash);
|
|
|
|
|
@@ -351,17 +339,10 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|
|
if (map) {
|
|
|
if (map->len == 1)
|
|
|
queue_index = map->queues[0];
|
|
|
- else {
|
|
|
- u32 hash;
|
|
|
- if (skb->sk && skb->sk->sk_hash)
|
|
|
- hash = skb->sk->sk_hash;
|
|
|
- else
|
|
|
- hash = (__force u16) skb->protocol ^
|
|
|
- skb->hash;
|
|
|
- hash = __flow_hash_1word(hash);
|
|
|
+ else
|
|
|
queue_index = map->queues[
|
|
|
- ((u64)hash * map->len) >> 32];
|
|
|
- }
|
|
|
+ ((u64)skb_get_hash(skb) * map->len) >> 32];
|
|
|
+
|
|
|
if (unlikely(queue_index >= dev->real_num_tx_queues))
|
|
|
queue_index = -1;
|
|
|
}
|