|
@@ -820,30 +820,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
|
|
return sk->sk_backlog_rcv(sk, skb);
|
|
|
}
|
|
|
|
|
|
-static inline void sock_rps_record_flow(const struct sock *sk)
|
|
|
+static inline void sock_rps_record_flow_hash(__u32 hash)
|
|
|
{
|
|
|
#ifdef CONFIG_RPS
|
|
|
struct rps_sock_flow_table *sock_flow_table;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
sock_flow_table = rcu_dereference(rps_sock_flow_table);
|
|
|
- rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
|
|
|
+ rps_record_sock_flow(sock_flow_table, hash);
|
|
|
rcu_read_unlock();
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-static inline void sock_rps_reset_flow(const struct sock *sk)
|
|
|
+static inline void sock_rps_reset_flow_hash(__u32 hash)
|
|
|
{
|
|
|
#ifdef CONFIG_RPS
|
|
|
struct rps_sock_flow_table *sock_flow_table;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
sock_flow_table = rcu_dereference(rps_sock_flow_table);
|
|
|
- rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
|
|
|
+ rps_reset_sock_flow(sock_flow_table, hash);
|
|
|
rcu_read_unlock();
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static inline void sock_rps_record_flow(const struct sock *sk)
|
|
|
+{
|
|
|
+ sock_rps_record_flow_hash(sk->sk_rxhash);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void sock_rps_reset_flow(const struct sock *sk)
|
|
|
+{
|
|
|
+ sock_rps_reset_flow_hash(sk->sk_rxhash);
|
|
|
+}
|
|
|
+
|
|
|
static inline void sock_rps_save_rxhash(struct sock *sk,
|
|
|
const struct sk_buff *skb)
|
|
|
{
|