|
@@ -22,6 +22,7 @@
|
|
#include <linux/in.h>
|
|
#include <linux/in.h>
|
|
#include <linux/ip.h>
|
|
#include <linux/ip.h>
|
|
#include <linux/openvswitch.h>
|
|
#include <linux/openvswitch.h>
|
|
|
|
+#include <linux/netfilter_ipv6.h>
|
|
#include <linux/sctp.h>
|
|
#include <linux/sctp.h>
|
|
#include <linux/tcp.h>
|
|
#include <linux/tcp.h>
|
|
#include <linux/udp.h>
|
|
#include <linux/udp.h>
|
|
@@ -29,6 +30,7 @@
|
|
#include <linux/if_arp.h>
|
|
#include <linux/if_arp.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/if_vlan.h>
|
|
|
|
|
|
|
|
+#include <net/dst.h>
|
|
#include <net/ip.h>
|
|
#include <net/ip.h>
|
|
#include <net/ipv6.h>
|
|
#include <net/ipv6.h>
|
|
#include <net/checksum.h>
|
|
#include <net/checksum.h>
|
|
@@ -38,6 +40,7 @@
|
|
|
|
|
|
#include "datapath.h"
|
|
#include "datapath.h"
|
|
#include "flow.h"
|
|
#include "flow.h"
|
|
|
|
+#include "conntrack.h"
|
|
#include "vport.h"
|
|
#include "vport.h"
|
|
|
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
@@ -52,6 +55,20 @@ struct deferred_action {
|
|
struct sw_flow_key pkt_key;
|
|
struct sw_flow_key pkt_key;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
|
|
|
|
+struct ovs_frag_data {
|
|
|
|
+ unsigned long dst;
|
|
|
|
+ struct vport *vport;
|
|
|
|
+ struct ovs_skb_cb cb;
|
|
|
|
+ __be16 inner_protocol;
|
|
|
|
+ __u16 vlan_tci;
|
|
|
|
+ __be16 vlan_proto;
|
|
|
|
+ unsigned int l2_len;
|
|
|
|
+ u8 l2_data[MAX_L2_LEN];
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
|
|
|
|
+
|
|
#define DEFERRED_ACTION_FIFO_SIZE 10
|
|
#define DEFERRED_ACTION_FIFO_SIZE 10
|
|
struct action_fifo {
|
|
struct action_fifo {
|
|
int head;
|
|
int head;
|
|
@@ -185,10 +202,6 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-/* 'KEY' must not have any bits set outside of the 'MASK' */
|
|
|
|
-#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
|
|
|
|
-#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
|
|
|
|
-
|
|
|
|
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
const __be32 *mpls_lse, const __be32 *mask)
|
|
const __be32 *mpls_lse, const __be32 *mask)
|
|
{
|
|
{
|
|
@@ -201,7 +214,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
return err;
|
|
return err;
|
|
|
|
|
|
stack = (__be32 *)skb_mpls_header(skb);
|
|
stack = (__be32 *)skb_mpls_header(skb);
|
|
- lse = MASKED(*stack, *mpls_lse, *mask);
|
|
|
|
|
|
+ lse = OVS_MASKED(*stack, *mpls_lse, *mask);
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
|
__be32 diff[] = { ~(*stack), lse };
|
|
__be32 diff[] = { ~(*stack), lse };
|
|
|
|
|
|
@@ -244,9 +257,9 @@ static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
|
|
const u16 *src = (const u16 *)src_;
|
|
const u16 *src = (const u16 *)src_;
|
|
const u16 *mask = (const u16 *)mask_;
|
|
const u16 *mask = (const u16 *)mask_;
|
|
|
|
|
|
- SET_MASKED(dst[0], src[0], mask[0]);
|
|
|
|
- SET_MASKED(dst[1], src[1], mask[1]);
|
|
|
|
- SET_MASKED(dst[2], src[2], mask[2]);
|
|
|
|
|
|
+ OVS_SET_MASKED(dst[0], src[0], mask[0]);
|
|
|
|
+ OVS_SET_MASKED(dst[1], src[1], mask[1]);
|
|
|
|
+ OVS_SET_MASKED(dst[2], src[2], mask[2]);
|
|
}
|
|
}
|
|
|
|
|
|
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
@@ -338,10 +351,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
|
|
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
|
|
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
|
|
const __be32 mask[4], __be32 masked[4])
|
|
const __be32 mask[4], __be32 masked[4])
|
|
{
|
|
{
|
|
- masked[0] = MASKED(old[0], addr[0], mask[0]);
|
|
|
|
- masked[1] = MASKED(old[1], addr[1], mask[1]);
|
|
|
|
- masked[2] = MASKED(old[2], addr[2], mask[2]);
|
|
|
|
- masked[3] = MASKED(old[3], addr[3], mask[3]);
|
|
|
|
|
|
+ masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
|
|
|
|
+ masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
|
|
|
|
+ masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
|
|
|
|
+ masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
|
|
}
|
|
}
|
|
|
|
|
|
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
|
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
|
@@ -358,15 +371,15 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
|
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
|
|
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
|
|
{
|
|
{
|
|
/* Bits 21-24 are always unmasked, so this retains their values. */
|
|
/* Bits 21-24 are always unmasked, so this retains their values. */
|
|
- SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
|
|
|
|
- SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
|
|
|
|
- SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
|
|
|
|
|
|
+ OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
|
|
|
|
+ OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
|
|
|
|
+ OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
|
|
}
|
|
}
|
|
|
|
|
|
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
|
|
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
|
|
u8 mask)
|
|
u8 mask)
|
|
{
|
|
{
|
|
- new_ttl = MASKED(nh->ttl, new_ttl, mask);
|
|
|
|
|
|
+ new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
|
|
|
|
|
|
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
|
|
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
|
|
nh->ttl = new_ttl;
|
|
nh->ttl = new_ttl;
|
|
@@ -392,7 +405,7 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
* makes sense to check if the value actually changed.
|
|
* makes sense to check if the value actually changed.
|
|
*/
|
|
*/
|
|
if (mask->ipv4_src) {
|
|
if (mask->ipv4_src) {
|
|
- new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
|
|
|
|
|
|
+ new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
|
|
|
|
|
|
if (unlikely(new_addr != nh->saddr)) {
|
|
if (unlikely(new_addr != nh->saddr)) {
|
|
set_ip_addr(skb, nh, &nh->saddr, new_addr);
|
|
set_ip_addr(skb, nh, &nh->saddr, new_addr);
|
|
@@ -400,7 +413,7 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (mask->ipv4_dst) {
|
|
if (mask->ipv4_dst) {
|
|
- new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
|
|
|
|
|
|
+ new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
|
|
|
|
|
|
if (unlikely(new_addr != nh->daddr)) {
|
|
if (unlikely(new_addr != nh->daddr)) {
|
|
set_ip_addr(skb, nh, &nh->daddr, new_addr);
|
|
set_ip_addr(skb, nh, &nh->daddr, new_addr);
|
|
@@ -488,7 +501,8 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
|
|
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
|
|
}
|
|
}
|
|
if (mask->ipv6_hlimit) {
|
|
if (mask->ipv6_hlimit) {
|
|
- SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
|
|
|
|
|
|
+ OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
|
|
|
|
+ mask->ipv6_hlimit);
|
|
flow_key->ip.ttl = nh->hop_limit;
|
|
flow_key->ip.ttl = nh->hop_limit;
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
@@ -517,8 +531,8 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
|
|
uh = udp_hdr(skb);
|
|
uh = udp_hdr(skb);
|
|
/* Either of the masks is non-zero, so do not bother checking them. */
|
|
/* Either of the masks is non-zero, so do not bother checking them. */
|
|
- src = MASKED(uh->source, key->udp_src, mask->udp_src);
|
|
|
|
- dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
|
|
|
|
|
|
+ src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
|
|
|
|
+ dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
|
|
|
|
|
|
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
|
|
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
|
|
if (likely(src != uh->source)) {
|
|
if (likely(src != uh->source)) {
|
|
@@ -558,12 +572,12 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
return err;
|
|
return err;
|
|
|
|
|
|
th = tcp_hdr(skb);
|
|
th = tcp_hdr(skb);
|
|
- src = MASKED(th->source, key->tcp_src, mask->tcp_src);
|
|
|
|
|
|
+ src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
|
|
if (likely(src != th->source)) {
|
|
if (likely(src != th->source)) {
|
|
set_tp_port(skb, &th->source, src, &th->check);
|
|
set_tp_port(skb, &th->source, src, &th->check);
|
|
flow_key->tp.src = src;
|
|
flow_key->tp.src = src;
|
|
}
|
|
}
|
|
- dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
|
|
|
|
|
|
+ dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
|
|
if (likely(dst != th->dest)) {
|
|
if (likely(dst != th->dest)) {
|
|
set_tp_port(skb, &th->dest, dst, &th->check);
|
|
set_tp_port(skb, &th->dest, dst, &th->check);
|
|
flow_key->tp.dst = dst;
|
|
flow_key->tp.dst = dst;
|
|
@@ -590,8 +604,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
old_csum = sh->checksum;
|
|
old_csum = sh->checksum;
|
|
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
|
|
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
|
|
|
|
|
|
- sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
|
|
|
|
- sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
|
|
|
|
|
|
+ sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
|
|
|
|
+ sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
|
|
|
|
|
|
new_csum = sctp_compute_cksum(skb, sctphoff);
|
|
new_csum = sctp_compute_cksum(skb, sctphoff);
|
|
|
|
|
|
@@ -605,14 +619,145 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
|
|
|
|
|
|
+static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
|
|
|
|
+ struct vport *vport = data->vport;
|
|
|
|
+
|
|
|
|
+ if (skb_cow_head(skb, data->l2_len) < 0) {
|
|
|
|
+ kfree_skb(skb);
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ __skb_dst_copy(skb, data->dst);
|
|
|
|
+ *OVS_CB(skb) = data->cb;
|
|
|
|
+ skb->inner_protocol = data->inner_protocol;
|
|
|
|
+ skb->vlan_tci = data->vlan_tci;
|
|
|
|
+ skb->vlan_proto = data->vlan_proto;
|
|
|
|
+
|
|
|
|
+ /* Reconstruct the MAC header. */
|
|
|
|
+ skb_push(skb, data->l2_len);
|
|
|
|
+ memcpy(skb->data, &data->l2_data, data->l2_len);
|
|
|
|
+ ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
|
|
|
|
+ skb_reset_mac_header(skb);
|
|
|
|
+
|
|
|
|
+ ovs_vport_send(vport, skb);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static unsigned int
|
|
|
|
+ovs_dst_get_mtu(const struct dst_entry *dst)
|
|
|
|
+{
|
|
|
|
+ return dst->dev->mtu;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct dst_ops ovs_dst_ops = {
|
|
|
|
+ .family = AF_UNSPEC,
|
|
|
|
+ .mtu = ovs_dst_get_mtu,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
|
|
|
|
+ * ovs_vport_output(), which is called once per fragmented packet.
|
|
|
|
+ */
|
|
|
|
+static void prepare_frag(struct vport *vport, struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ unsigned int hlen = skb_network_offset(skb);
|
|
|
|
+ struct ovs_frag_data *data;
|
|
|
|
+
|
|
|
|
+ data = this_cpu_ptr(&ovs_frag_data_storage);
|
|
|
|
+ data->dst = skb->_skb_refdst;
|
|
|
|
+ data->vport = vport;
|
|
|
|
+ data->cb = *OVS_CB(skb);
|
|
|
|
+ data->inner_protocol = skb->inner_protocol;
|
|
|
|
+ data->vlan_tci = skb->vlan_tci;
|
|
|
|
+ data->vlan_proto = skb->vlan_proto;
|
|
|
|
+ data->l2_len = hlen;
|
|
|
|
+ memcpy(&data->l2_data, skb->data, hlen);
|
|
|
|
+
|
|
|
|
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
|
|
|
+ skb_pull(skb, hlen);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
|
|
|
|
+ __be16 ethertype)
|
|
|
|
+{
|
|
|
|
+ if (skb_network_offset(skb) > MAX_L2_LEN) {
|
|
|
|
+ OVS_NLERR(1, "L2 header too long to fragment");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ethertype == htons(ETH_P_IP)) {
|
|
|
|
+ struct dst_entry ovs_dst;
|
|
|
|
+ unsigned long orig_dst;
|
|
|
|
+
|
|
|
|
+ prepare_frag(vport, skb);
|
|
|
|
+ dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
|
|
|
|
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
|
|
|
|
+ ovs_dst.dev = vport->dev;
|
|
|
|
+
|
|
|
|
+ orig_dst = skb->_skb_refdst;
|
|
|
|
+ skb_dst_set_noref(skb, &ovs_dst);
|
|
|
|
+ IPCB(skb)->frag_max_size = mru;
|
|
|
|
+
|
|
|
|
+ ip_do_fragment(skb->sk, skb, ovs_vport_output);
|
|
|
|
+ refdst_drop(orig_dst);
|
|
|
|
+ } else if (ethertype == htons(ETH_P_IPV6)) {
|
|
|
|
+ const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
|
|
|
|
+ unsigned long orig_dst;
|
|
|
|
+ struct rt6_info ovs_rt;
|
|
|
|
+
|
|
|
|
+ if (!v6ops) {
|
|
|
|
+ kfree_skb(skb);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ prepare_frag(vport, skb);
|
|
|
|
+ memset(&ovs_rt, 0, sizeof(ovs_rt));
|
|
|
|
+ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
|
|
|
|
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
|
|
|
|
+ ovs_rt.dst.dev = vport->dev;
|
|
|
|
+
|
|
|
|
+ orig_dst = skb->_skb_refdst;
|
|
|
|
+ skb_dst_set_noref(skb, &ovs_rt.dst);
|
|
|
|
+ IP6CB(skb)->frag_max_size = mru;
|
|
|
|
+
|
|
|
|
+ v6ops->fragment(skb->sk, skb, ovs_vport_output);
|
|
|
|
+ refdst_drop(orig_dst);
|
|
|
|
+ } else {
|
|
|
|
+ WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
|
|
|
|
+ ovs_vport_name(vport), ntohs(ethertype), mru,
|
|
|
|
+ vport->dev->mtu);
|
|
|
|
+ kfree_skb(skb);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
|
|
|
|
+ struct sw_flow_key *key)
|
|
{
|
|
{
|
|
struct vport *vport = ovs_vport_rcu(dp, out_port);
|
|
struct vport *vport = ovs_vport_rcu(dp, out_port);
|
|
|
|
|
|
- if (likely(vport))
|
|
|
|
- ovs_vport_send(vport, skb);
|
|
|
|
- else
|
|
|
|
|
|
+ if (likely(vport)) {
|
|
|
|
+ u16 mru = OVS_CB(skb)->mru;
|
|
|
|
+
|
|
|
|
+ if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
|
|
|
|
+ ovs_vport_send(vport, skb);
|
|
|
|
+ } else if (mru <= vport->dev->mtu) {
|
|
|
|
+ __be16 ethertype = key->eth.type;
|
|
|
|
+
|
|
|
|
+ if (!is_flow_key_valid(key)) {
|
|
|
|
+ if (eth_p_mpls(skb->protocol))
|
|
|
|
+ ethertype = skb->inner_protocol;
|
|
|
|
+ else
|
|
|
|
+ ethertype = vlan_get_protocol(skb);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ovs_fragment(vport, skb, mru, ethertype);
|
|
|
|
+ } else {
|
|
|
|
+ kfree_skb(skb);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
kfree_skb(skb);
|
|
kfree_skb(skb);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
|
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
|
@@ -626,6 +771,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
|
memset(&upcall, 0, sizeof(upcall));
|
|
memset(&upcall, 0, sizeof(upcall));
|
|
upcall.cmd = OVS_PACKET_CMD_ACTION;
|
|
upcall.cmd = OVS_PACKET_CMD_ACTION;
|
|
|
|
+ upcall.mru = OVS_CB(skb)->mru;
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
a = nla_next(a, &rem)) {
|
|
a = nla_next(a, &rem)) {
|
|
@@ -770,12 +916,13 @@ static int execute_masked_set_action(struct sk_buff *skb,
|
|
|
|
|
|
switch (nla_type(a)) {
|
|
switch (nla_type(a)) {
|
|
case OVS_KEY_ATTR_PRIORITY:
|
|
case OVS_KEY_ATTR_PRIORITY:
|
|
- SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
|
|
|
|
|
|
+ OVS_SET_MASKED(skb->priority, nla_get_u32(a),
|
|
|
|
+ *get_mask(a, u32 *));
|
|
flow_key->phy.priority = skb->priority;
|
|
flow_key->phy.priority = skb->priority;
|
|
break;
|
|
break;
|
|
|
|
|
|
case OVS_KEY_ATTR_SKB_MARK:
|
|
case OVS_KEY_ATTR_SKB_MARK:
|
|
- SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
|
|
|
|
|
|
+ OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
|
|
flow_key->phy.skb_mark = skb->mark;
|
|
flow_key->phy.skb_mark = skb->mark;
|
|
break;
|
|
break;
|
|
|
|
|
|
@@ -818,6 +965,13 @@ static int execute_masked_set_action(struct sk_buff *skb,
|
|
err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
|
|
err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
|
|
__be32 *));
|
|
__be32 *));
|
|
break;
|
|
break;
|
|
|
|
+
|
|
|
|
+ case OVS_KEY_ATTR_CT_STATE:
|
|
|
|
+ case OVS_KEY_ATTR_CT_ZONE:
|
|
|
|
+ case OVS_KEY_ATTR_CT_MARK:
|
|
|
|
+ case OVS_KEY_ATTR_CT_LABEL:
|
|
|
|
+ err = -EINVAL;
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
|
|
return err;
|
|
return err;
|
|
@@ -887,7 +1041,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
|
|
struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
|
if (out_skb)
|
|
if (out_skb)
|
|
- do_output(dp, out_skb, prev_port);
|
|
|
|
|
|
+ do_output(dp, out_skb, prev_port, key);
|
|
|
|
|
|
prev_port = -1;
|
|
prev_port = -1;
|
|
}
|
|
}
|
|
@@ -944,6 +1098,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
case OVS_ACTION_ATTR_SAMPLE:
|
|
case OVS_ACTION_ATTR_SAMPLE:
|
|
err = sample(dp, skb, key, a, attr, len);
|
|
err = sample(dp, skb, key, a, attr, len);
|
|
break;
|
|
break;
|
|
|
|
+
|
|
|
|
+ case OVS_ACTION_ATTR_CT:
|
|
|
|
+ err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
|
|
|
|
+ nla_data(a));
|
|
|
|
+
|
|
|
|
+ /* Hide stolen IP fragments from user space. */
|
|
|
|
+ if (err == -EINPROGRESS)
|
|
|
|
+ return 0;
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
|
|
if (unlikely(err)) {
|
|
if (unlikely(err)) {
|
|
@@ -953,7 +1116,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
}
|
|
}
|
|
|
|
|
|
if (prev_port != -1)
|
|
if (prev_port != -1)
|
|
- do_output(dp, skb, prev_port);
|
|
|
|
|
|
+ do_output(dp, skb, prev_port, key);
|
|
else
|
|
else
|
|
consume_skb(skb);
|
|
consume_skb(skb);
|
|
|
|
|