|
@@ -1216,12 +1216,12 @@ int ovs_nla_get_flow_metadata(const struct nlattr *attr,
|
|
|
return metadata_from_nlattrs(&match, &attrs, a, false, log);
|
|
|
}
|
|
|
|
|
|
-int ovs_nla_put_flow(const struct sw_flow_key *swkey,
|
|
|
- const struct sw_flow_key *output, struct sk_buff *skb)
|
|
|
+static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
|
|
|
+ const struct sw_flow_key *output, bool is_mask,
|
|
|
+ struct sk_buff *skb)
|
|
|
{
|
|
|
struct ovs_key_ethernet *eth_key;
|
|
|
struct nlattr *nla, *encap;
|
|
|
- bool is_mask = (swkey != output);
|
|
|
|
|
|
if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
|
|
|
goto nla_put_failure;
|
|
@@ -1431,6 +1431,38 @@ nla_put_failure:
|
|
|
return -EMSGSIZE;
|
|
|
}
|
|
|
|
|
|
+int ovs_nla_put_key(const struct sw_flow_key *swkey,
|
|
|
+ const struct sw_flow_key *output, int attr, bool is_mask,
|
|
|
+ struct sk_buff *skb)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ struct nlattr *nla;
|
|
|
+
|
|
|
+ nla = nla_nest_start(skb, attr);
|
|
|
+ if (!nla)
|
|
|
+ return -EMSGSIZE;
|
|
|
+ err = __ovs_nla_put_key(swkey, output, is_mask, skb);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ nla_nest_end(skb, nla);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* Called with ovs_mutex or RCU read lock. */
|
|
|
+int ovs_nla_put_unmasked_key(const struct sw_flow *flow, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ return ovs_nla_put_key(&flow->unmasked_key, &flow->unmasked_key,
|
|
|
+ OVS_FLOW_ATTR_KEY, false, skb);
|
|
|
+}
|
|
|
+
|
|
|
+/* Called with ovs_mutex or RCU read lock. */
|
|
|
+int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ return ovs_nla_put_key(&flow->key, &flow->mask->key,
|
|
|
+ OVS_FLOW_ATTR_MASK, true, skb);
|
|
|
+}
|
|
|
+
|
|
|
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
|
|
|
|
|
|
static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
|