|
@@ -891,6 +891,16 @@ static size_t rtnl_port_size(const struct net_device *dev,
|
|
|
return port_self_size;
|
|
|
}
|
|
|
|
|
|
+static size_t rtnl_xdp_size(const struct net_device *dev)
|
|
|
+{
|
|
|
+ size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */
|
|
|
+
|
|
|
+ if (!dev->netdev_ops->ndo_xdp)
|
|
|
+ return 0;
|
|
|
+ else
|
|
|
+ return xdp_size;
|
|
|
+}
|
|
|
+
|
|
|
static noinline size_t if_nlmsg_size(const struct net_device *dev,
|
|
|
u32 ext_filter_mask)
|
|
|
{
|
|
@@ -927,6 +937,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
|
|
|
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
|
|
|
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
|
|
|
+ nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
|
|
|
+ + rtnl_xdp_size(dev) /* IFLA_XDP */
|
|
|
+ nla_total_size(1); /* IFLA_PROTO_DOWN */
|
|
|
|
|
|
}
|
|
@@ -1211,6 +1222,33 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
|
|
|
+{
|
|
|
+ struct netdev_xdp xdp_op = {};
|
|
|
+ struct nlattr *xdp;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!dev->netdev_ops->ndo_xdp)
|
|
|
+ return 0;
|
|
|
+ xdp = nla_nest_start(skb, IFLA_XDP);
|
|
|
+ if (!xdp)
|
|
|
+ return -EMSGSIZE;
|
|
|
+ xdp_op.command = XDP_QUERY_PROG;
|
|
|
+ err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
|
|
|
+ if (err)
|
|
|
+ goto err_cancel;
|
|
|
+ err = nla_put_u8(skb, IFLA_XDP_ATTACHED, xdp_op.prog_attached);
|
|
|
+ if (err)
|
|
|
+ goto err_cancel;
|
|
|
+
|
|
|
+ nla_nest_end(skb, xdp);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_cancel:
|
|
|
+ nla_nest_cancel(skb, xdp);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
|
|
|
int type, u32 pid, u32 seq, u32 change,
|
|
|
unsigned int flags, u32 ext_filter_mask)
|
|
@@ -1307,6 +1345,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
|
|
|
if (rtnl_port_fill(skb, dev, ext_filter_mask))
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
+ if (rtnl_xdp_fill(skb, dev))
|
|
|
+ goto nla_put_failure;
|
|
|
+
|
|
|
if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
|
|
|
if (rtnl_link_fill(skb, dev) < 0)
|
|
|
goto nla_put_failure;
|
|
@@ -1392,6 +1433,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
|
|
|
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
|
|
|
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
|
|
|
[IFLA_PROTO_DOWN] = { .type = NLA_U8 },
|
|
|
+ [IFLA_XDP] = { .type = NLA_NESTED },
|
|
|
};
|
|
|
|
|
|
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
|
|
@@ -1429,6 +1471,11 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
|
|
|
[IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
|
|
|
};
|
|
|
|
|
|
+static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
|
|
|
+ [IFLA_XDP_FD] = { .type = NLA_S32 },
|
|
|
+ [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
|
|
|
+};
|
|
|
+
|
|
|
static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
|
|
|
{
|
|
|
const struct rtnl_link_ops *ops = NULL;
|
|
@@ -2054,6 +2101,23 @@ static int do_setlink(const struct sk_buff *skb,
|
|
|
status |= DO_SETLINK_NOTIFY;
|
|
|
}
|
|
|
|
|
|
+ if (tb[IFLA_XDP]) {
|
|
|
+ struct nlattr *xdp[IFLA_XDP_MAX + 1];
|
|
|
+
|
|
|
+ err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
|
|
|
+ ifla_xdp_policy);
|
|
|
+ if (err < 0)
|
|
|
+ goto errout;
|
|
|
+
|
|
|
+ if (xdp[IFLA_XDP_FD]) {
|
|
|
+ err = dev_change_xdp_fd(dev,
|
|
|
+ nla_get_s32(xdp[IFLA_XDP_FD]));
|
|
|
+ if (err)
|
|
|
+ goto errout;
|
|
|
+ status |= DO_SETLINK_NOTIFY;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
errout:
|
|
|
if (status & DO_SETLINK_MODIFIED) {
|
|
|
if (status & DO_SETLINK_NOTIFY)
|