|
@@ -58,6 +58,7 @@ struct mlx5_nic_flow_attr {
|
|
|
u32 flow_tag;
|
|
|
u32 mod_hdr_id;
|
|
|
u32 hairpin_tirn;
|
|
|
+ u8 match_level;
|
|
|
struct mlx5_flow_table *hairpin_ft;
|
|
|
};
|
|
|
|
|
@@ -97,7 +98,7 @@ enum {
|
|
|
};
|
|
|
|
|
|
#define MLX5E_TC_TABLE_NUM_GROUPS 4
|
|
|
-#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
|
|
|
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
|
|
|
|
|
|
struct mlx5e_hairpin {
|
|
|
struct mlx5_hairpin *pair;
|
|
@@ -753,7 +754,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
|
|
table_created = true;
|
|
|
}
|
|
|
|
|
|
- parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
|
|
+ if (attr->match_level != MLX5_MATCH_NONE)
|
|
|
+ parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
|
|
+
|
|
|
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
|
|
|
&flow_act, dest, dest_ix);
|
|
|
|
|
@@ -789,7 +792,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
|
|
mlx5_del_flow_rules(flow->rule);
|
|
|
mlx5_fc_destroy(priv->mdev, counter);
|
|
|
|
|
|
- if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
|
|
|
+ if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
|
|
|
mlx5_destroy_flow_table(priv->fs.tc.t);
|
|
|
priv->fs.tc.t = NULL;
|
|
|
}
|
|
@@ -982,6 +985,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+ if (neigh_used)
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
if (neigh_used) {
|
|
@@ -1190,7 +1195,7 @@ vxlan_match_offload_err:
|
|
|
static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
struct mlx5_flow_spec *spec,
|
|
|
struct tc_cls_flower_offload *f,
|
|
|
- u8 *min_inline)
|
|
|
+ u8 *match_level)
|
|
|
{
|
|
|
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
|
|
outer_headers);
|
|
@@ -1199,7 +1204,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
u16 addr_type = 0;
|
|
|
u8 ip_proto = 0;
|
|
|
|
|
|
- *min_inline = MLX5_INLINE_MODE_L2;
|
|
|
+ *match_level = MLX5_MATCH_NONE;
|
|
|
|
|
|
if (f->dissector->used_keys &
|
|
|
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
|
|
@@ -1249,58 +1254,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
inner_headers);
|
|
|
}
|
|
|
|
|
|
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
|
|
|
- struct flow_dissector_key_control *key =
|
|
|
- skb_flow_dissector_target(f->dissector,
|
|
|
- FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
- f->key);
|
|
|
-
|
|
|
- struct flow_dissector_key_control *mask =
|
|
|
- skb_flow_dissector_target(f->dissector,
|
|
|
- FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
- f->mask);
|
|
|
- addr_type = key->addr_type;
|
|
|
-
|
|
|
- /* the HW doesn't support frag first/later */
|
|
|
- if (mask->flags & FLOW_DIS_FIRST_FRAG)
|
|
|
- return -EOPNOTSUPP;
|
|
|
-
|
|
|
- if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
|
|
|
- key->flags & FLOW_DIS_IS_FRAGMENT);
|
|
|
-
|
|
|
- /* the HW doesn't need L3 inline to match on frag=no */
|
|
|
- if (key->flags & FLOW_DIS_IS_FRAGMENT)
|
|
|
- *min_inline = MLX5_INLINE_MODE_IP;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
|
|
|
- struct flow_dissector_key_basic *key =
|
|
|
- skb_flow_dissector_target(f->dissector,
|
|
|
- FLOW_DISSECTOR_KEY_BASIC,
|
|
|
- f->key);
|
|
|
- struct flow_dissector_key_basic *mask =
|
|
|
- skb_flow_dissector_target(f->dissector,
|
|
|
- FLOW_DISSECTOR_KEY_BASIC,
|
|
|
- f->mask);
|
|
|
- ip_proto = key->ip_proto;
|
|
|
-
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
|
|
|
- ntohs(mask->n_proto));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
|
|
- ntohs(key->n_proto));
|
|
|
-
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
|
|
|
- mask->ip_proto);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
|
|
|
- key->ip_proto);
|
|
|
-
|
|
|
- if (mask->ip_proto)
|
|
|
- *min_inline = MLX5_INLINE_MODE_IP;
|
|
|
- }
|
|
|
-
|
|
|
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
|
|
struct flow_dissector_key_eth_addrs *key =
|
|
|
skb_flow_dissector_target(f->dissector,
|
|
@@ -1324,6 +1277,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
|
|
smac_47_16),
|
|
|
key->src);
|
|
|
+
|
|
|
+ if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
|
|
|
+ *match_level = MLX5_MATCH_L2;
|
|
|
}
|
|
|
|
|
|
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
|
|
@@ -1344,9 +1300,79 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
|
|
|
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
|
|
|
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
|
|
|
+
|
|
|
+ *match_level = MLX5_MATCH_L2;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
|
|
|
+ struct flow_dissector_key_basic *key =
|
|
|
+ skb_flow_dissector_target(f->dissector,
|
|
|
+ FLOW_DISSECTOR_KEY_BASIC,
|
|
|
+ f->key);
|
|
|
+ struct flow_dissector_key_basic *mask =
|
|
|
+ skb_flow_dissector_target(f->dissector,
|
|
|
+ FLOW_DISSECTOR_KEY_BASIC,
|
|
|
+ f->mask);
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
|
|
|
+ ntohs(mask->n_proto));
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
|
|
+ ntohs(key->n_proto));
|
|
|
+
|
|
|
+ if (mask->n_proto)
|
|
|
+ *match_level = MLX5_MATCH_L2;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
|
|
|
+ struct flow_dissector_key_control *key =
|
|
|
+ skb_flow_dissector_target(f->dissector,
|
|
|
+ FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
+ f->key);
|
|
|
+
|
|
|
+ struct flow_dissector_key_control *mask =
|
|
|
+ skb_flow_dissector_target(f->dissector,
|
|
|
+ FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
+ f->mask);
|
|
|
+ addr_type = key->addr_type;
|
|
|
+
|
|
|
+ /* the HW doesn't support frag first/later */
|
|
|
+ if (mask->flags & FLOW_DIS_FIRST_FRAG)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
|
|
|
+ key->flags & FLOW_DIS_IS_FRAGMENT);
|
|
|
+
|
|
|
+ /* the HW doesn't need L3 inline to match on frag=no */
|
|
|
+ if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
|
|
|
+ *match_level = MLX5_INLINE_MODE_L2;
|
|
|
+ /* *** L2 attributes parsing up to here *** */
|
|
|
+ else
|
|
|
+ *match_level = MLX5_INLINE_MODE_IP;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
|
|
|
+ struct flow_dissector_key_basic *key =
|
|
|
+ skb_flow_dissector_target(f->dissector,
|
|
|
+ FLOW_DISSECTOR_KEY_BASIC,
|
|
|
+ f->key);
|
|
|
+ struct flow_dissector_key_basic *mask =
|
|
|
+ skb_flow_dissector_target(f->dissector,
|
|
|
+ FLOW_DISSECTOR_KEY_BASIC,
|
|
|
+ f->mask);
|
|
|
+ ip_proto = key->ip_proto;
|
|
|
+
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
|
|
|
+ mask->ip_proto);
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
|
|
|
+ key->ip_proto);
|
|
|
+
|
|
|
+ if (mask->ip_proto)
|
|
|
+ *match_level = MLX5_MATCH_L3;
|
|
|
+ }
|
|
|
+
|
|
|
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
|
|
struct flow_dissector_key_ipv4_addrs *key =
|
|
|
skb_flow_dissector_target(f->dissector,
|
|
@@ -1371,7 +1397,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
&key->dst, sizeof(key->dst));
|
|
|
|
|
|
if (mask->src || mask->dst)
|
|
|
- *min_inline = MLX5_INLINE_MODE_IP;
|
|
|
+ *match_level = MLX5_MATCH_L3;
|
|
|
}
|
|
|
|
|
|
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
|
@@ -1400,7 +1426,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
|
|
|
if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
|
|
|
ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
|
|
|
- *min_inline = MLX5_INLINE_MODE_IP;
|
|
|
+ *match_level = MLX5_MATCH_L3;
|
|
|
}
|
|
|
|
|
|
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
|
|
@@ -1428,9 +1454,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
if (mask->tos || mask->ttl)
|
|
|
- *min_inline = MLX5_INLINE_MODE_IP;
|
|
|
+ *match_level = MLX5_MATCH_L3;
|
|
|
}
|
|
|
|
|
|
+ /* *** L3 attributes parsing up to here *** */
|
|
|
+
|
|
|
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
|
|
|
struct flow_dissector_key_ports *key =
|
|
|
skb_flow_dissector_target(f->dissector,
|
|
@@ -1471,7 +1499,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
}
|
|
|
|
|
|
if (mask->src || mask->dst)
|
|
|
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
|
|
|
+ *match_level = MLX5_MATCH_L4;
|
|
|
}
|
|
|
|
|
|
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
|
|
@@ -1490,7 +1518,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
ntohs(key->flags));
|
|
|
|
|
|
if (mask->flags)
|
|
|
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
|
|
|
+ *match_level = MLX5_MATCH_L4;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -1505,23 +1533,28 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
|
|
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
|
|
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
|
|
struct mlx5_eswitch_rep *rep;
|
|
|
- u8 min_inline;
|
|
|
+ u8 match_level;
|
|
|
int err;
|
|
|
|
|
|
- err = __parse_cls_flower(priv, spec, f, &min_inline);
|
|
|
+ err = __parse_cls_flower(priv, spec, f, &match_level);
|
|
|
|
|
|
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
|
|
|
rep = rpriv->rep;
|
|
|
if (rep->vport != FDB_UPLINK_VPORT &&
|
|
|
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
|
|
|
- esw->offloads.inline_mode < min_inline)) {
|
|
|
+ esw->offloads.inline_mode < match_level)) {
|
|
|
netdev_warn(priv->netdev,
|
|
|
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
|
|
|
- min_inline, esw->offloads.inline_mode);
|
|
|
+ match_level, esw->offloads.inline_mode);
|
|
|
return -EOPNOTSUPP;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
|
|
|
+ flow->esw_attr->match_level = match_level;
|
|
|
+ else
|
|
|
+ flow->nic_attr->match_level = match_level;
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -1577,7 +1610,6 @@ struct mlx5_fields {
|
|
|
{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
|
|
|
|
|
|
static struct mlx5_fields fields[] = {
|
|
|
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
|
|
|
OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
|
|
|
OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
|
|
|
OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
|
|
@@ -1764,12 +1796,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
|
|
err = -EOPNOTSUPP; /* can't be all optimistic */
|
|
|
|
|
|
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
|
|
|
- printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
|
|
|
+ netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
|
|
|
goto out_err;
|
|
|
}
|
|
|
|
|
|
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
|
|
|
- printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
|
|
|
+ netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
|
|
|
goto out_err;
|
|
|
}
|
|
|
|
|
@@ -1793,8 +1825,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
|
|
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
|
|
|
cmd_masks = &masks[cmd];
|
|
|
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
|
|
|
- printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
|
|
|
- cmd);
|
|
|
+ netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
|
|
|
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
|
|
|
16, 1, cmd_masks, sizeof(zero_masks), true);
|
|
|
err = -EOPNOTSUPP;
|
|
@@ -1917,21 +1948,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
|
|
|
const struct tc_action *a;
|
|
|
LIST_HEAD(actions);
|
|
|
+ u32 action = 0;
|
|
|
int err;
|
|
|
|
|
|
if (!tcf_exts_has_actions(exts))
|
|
|
return -EINVAL;
|
|
|
|
|
|
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
|
|
|
- attr->action = 0;
|
|
|
|
|
|
tcf_exts_to_list(exts, &actions);
|
|
|
list_for_each_entry(a, &actions, list) {
|
|
|
if (is_tcf_gact_shot(a)) {
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
|
|
|
if (MLX5_CAP_FLOWTABLE(priv->mdev,
|
|
|
flow_table_properties_nic_receive.flow_counter))
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1941,13 +1972,13 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
|
|
|
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
|
|
|
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
if (is_tcf_csum(a)) {
|
|
|
- if (csum_offload_supported(priv, attr->action,
|
|
|
+ if (csum_offload_supported(priv, action,
|
|
|
tcf_csum_update_flags(a)))
|
|
|
continue;
|
|
|
|
|
@@ -1961,8 +1992,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
same_hw_devs(priv, netdev_priv(peer_dev))) {
|
|
|
parse_attr->mirred_ifindex = peer_dev->ifindex;
|
|
|
flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
|
|
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
|
|
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
} else {
|
|
|
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
|
|
|
peer_dev->name);
|
|
@@ -1981,13 +2012,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
}
|
|
|
|
|
|
attr->flow_tag = mark;
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ attr->action = action;
|
|
|
if (!actions_match_supported(priv, exts, parse_attr, flow))
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
@@ -2459,34 +2491,35 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
const struct tc_action *a;
|
|
|
LIST_HEAD(actions);
|
|
|
bool encap = false;
|
|
|
- int err = 0;
|
|
|
+ u32 action = 0;
|
|
|
|
|
|
if (!tcf_exts_has_actions(exts))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- memset(attr, 0, sizeof(*attr));
|
|
|
attr->in_rep = rpriv->rep;
|
|
|
|
|
|
tcf_exts_to_list(exts, &actions);
|
|
|
list_for_each_entry(a, &actions, list) {
|
|
|
if (is_tcf_gact_shot(a)) {
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
|
|
|
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
|
|
|
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
if (is_tcf_pedit(a)) {
|
|
|
+ int err;
|
|
|
+
|
|
|
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
|
|
|
parse_attr);
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
if (is_tcf_csum(a)) {
|
|
|
- if (csum_offload_supported(priv, attr->action,
|
|
|
+ if (csum_offload_supported(priv, action,
|
|
|
tcf_csum_update_flags(a)))
|
|
|
continue;
|
|
|
|
|
@@ -2501,8 +2534,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
|
|
|
if (switchdev_port_same_parent_id(priv->netdev,
|
|
|
out_dev)) {
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
|
|
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
|
|
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
out_priv = netdev_priv(out_dev);
|
|
|
rpriv = out_priv->ppriv;
|
|
|
attr->out_rep = rpriv->rep;
|
|
@@ -2510,9 +2543,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
parse_attr->mirred_ifindex = out_dev->ifindex;
|
|
|
parse_attr->tun_info = *info;
|
|
|
attr->parse_attr = parse_attr;
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
|
|
|
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
|
|
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
|
|
|
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
|
|
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
/* attr->out_rep is resolved when we handle encap */
|
|
|
} else {
|
|
|
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
|
|
@@ -2533,9 +2566,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
|
|
|
if (is_tcf_vlan(a)) {
|
|
|
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
|
|
|
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
|
|
|
attr->vlan_vid = tcf_vlan_push_vid(a);
|
|
|
if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
|
|
|
attr->vlan_prio = tcf_vlan_push_prio(a);
|
|
@@ -2553,17 +2586,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|
|
}
|
|
|
|
|
|
if (is_tcf_tunnel_release(a)) {
|
|
|
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
|
|
|
+ action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ attr->action = action;
|
|
|
if (!actions_match_supported(priv, exts, parse_attr, flow))
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
- return err;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
int mlx5e_configure_flower(struct mlx5e_priv *priv,
|