|
@@ -834,9 +834,29 @@ static void efx_filter_rfs_work(struct work_struct *data)
|
|
struct efx_nic *efx = netdev_priv(req->net_dev);
|
|
struct efx_nic *efx = netdev_priv(req->net_dev);
|
|
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
|
|
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
|
|
int slot_idx = req - efx->rps_slot;
|
|
int slot_idx = req - efx->rps_slot;
|
|
|
|
+ struct efx_arfs_rule *rule;
|
|
|
|
+ u16 arfs_id = 0;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
rc = efx->type->filter_insert(efx, &req->spec, true);
|
|
rc = efx->type->filter_insert(efx, &req->spec, true);
|
|
|
|
+ if (efx->rps_hash_table) {
|
|
|
|
+ spin_lock_bh(&efx->rps_hash_lock);
|
|
|
|
+ rule = efx_rps_hash_find(efx, &req->spec);
|
|
|
|
+ /* The rule might have already gone, if someone else's request
|
|
|
|
+ * for the same spec was already worked and then expired before
|
|
|
|
+ * we got around to our work. In that case we have nothing
|
|
|
|
+ * tying us to an arfs_id, meaning that as soon as the filter
|
|
|
|
+ * is considered for expiry it will be removed.
|
|
|
|
+ */
|
|
|
|
+ if (rule) {
|
|
|
|
+ if (rc < 0)
|
|
|
|
+ rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
|
|
|
|
+ else
|
|
|
|
+ rule->filter_id = rc;
|
|
|
|
+ arfs_id = rule->arfs_id;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_bh(&efx->rps_hash_lock);
|
|
|
|
+ }
|
|
if (rc >= 0) {
|
|
if (rc >= 0) {
|
|
/* Remember this so we can check whether to expire the filter
|
|
/* Remember this so we can check whether to expire the filter
|
|
* later.
|
|
* later.
|
|
@@ -848,18 +868,18 @@ static void efx_filter_rfs_work(struct work_struct *data)
|
|
|
|
|
|
if (req->spec.ether_type == htons(ETH_P_IP))
|
|
if (req->spec.ether_type == htons(ETH_P_IP))
|
|
netif_info(efx, rx_status, efx->net_dev,
|
|
netif_info(efx, rx_status, efx->net_dev,
|
|
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
|
|
|
|
|
|
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
- req->rxq_index, req->flow_id, rc);
|
|
|
|
|
|
+ req->rxq_index, req->flow_id, rc, arfs_id);
|
|
else
|
|
else
|
|
netif_info(efx, rx_status, efx->net_dev,
|
|
netif_info(efx, rx_status, efx->net_dev,
|
|
- "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
|
|
|
|
|
|
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
- req->rxq_index, req->flow_id, rc);
|
|
|
|
|
|
+ req->rxq_index, req->flow_id, rc, arfs_id);
|
|
}
|
|
}
|
|
|
|
|
|
/* Release references */
|
|
/* Release references */
|
|
@@ -872,8 +892,10 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|
{
|
|
{
|
|
struct efx_nic *efx = netdev_priv(net_dev);
|
|
struct efx_nic *efx = netdev_priv(net_dev);
|
|
struct efx_async_filter_insertion *req;
|
|
struct efx_async_filter_insertion *req;
|
|
|
|
+ struct efx_arfs_rule *rule;
|
|
struct flow_keys fk;
|
|
struct flow_keys fk;
|
|
int slot_idx;
|
|
int slot_idx;
|
|
|
|
+ bool new;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
/* find a free slot */
|
|
/* find a free slot */
|
|
@@ -926,12 +948,42 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|
req->spec.rem_port = fk.ports.src;
|
|
req->spec.rem_port = fk.ports.src;
|
|
req->spec.loc_port = fk.ports.dst;
|
|
req->spec.loc_port = fk.ports.dst;
|
|
|
|
|
|
|
|
+ if (efx->rps_hash_table) {
|
|
|
|
+ /* Add it to ARFS hash table */
|
|
|
|
+ spin_lock(&efx->rps_hash_lock);
|
|
|
|
+ rule = efx_rps_hash_add(efx, &req->spec, &new);
|
|
|
|
+ if (!rule) {
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto out_unlock;
|
|
|
|
+ }
|
|
|
|
+ if (new)
|
|
|
|
+ rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
|
|
|
|
+ rc = rule->arfs_id;
|
|
|
|
+ /* Skip if existing or pending filter already does the right thing */
|
|
|
|
+ if (!new && rule->rxq_index == rxq_index &&
|
|
|
|
+ rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
|
|
|
|
+ goto out_unlock;
|
|
|
|
+ rule->rxq_index = rxq_index;
|
|
|
|
+ rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
|
|
|
|
+ spin_unlock(&efx->rps_hash_lock);
|
|
|
|
+ } else {
|
|
|
|
+ /* Without an ARFS hash table, we just use arfs_id 0 for all
|
|
|
|
+ * filters. This means if multiple flows hash to the same
|
|
|
|
+ * flow_id, all but the most recently touched will be eligible
|
|
|
|
+ * for expiry.
|
|
|
|
+ */
|
|
|
|
+ rc = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Queue the request */
|
|
dev_hold(req->net_dev = net_dev);
|
|
dev_hold(req->net_dev = net_dev);
|
|
INIT_WORK(&req->work, efx_filter_rfs_work);
|
|
INIT_WORK(&req->work, efx_filter_rfs_work);
|
|
req->rxq_index = rxq_index;
|
|
req->rxq_index = rxq_index;
|
|
req->flow_id = flow_id;
|
|
req->flow_id = flow_id;
|
|
schedule_work(&req->work);
|
|
schedule_work(&req->work);
|
|
- return 0;
|
|
|
|
|
|
+ return rc;
|
|
|
|
+out_unlock:
|
|
|
|
+ spin_unlock(&efx->rps_hash_lock);
|
|
out_clear:
|
|
out_clear:
|
|
clear_bit(slot_idx, &efx->rps_slot_map);
|
|
clear_bit(slot_idx, &efx->rps_slot_map);
|
|
return rc;
|
|
return rc;
|