|
@@ -3246,31 +3246,33 @@ static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
|
|
|
|
- unsigned long aux)
|
|
|
|
|
|
+void bpf_clear_redirect_map(struct bpf_map *map)
|
|
{
|
|
{
|
|
- return (unsigned long)xdp_prog->aux != aux;
|
|
|
|
|
|
+ struct bpf_redirect_info *ri;
|
|
|
|
+ int cpu;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ ri = per_cpu_ptr(&bpf_redirect_info, cpu);
|
|
|
|
+ /* Avoid polluting remote cacheline due to writes if
|
|
|
|
+ * not needed. Once we pass this test, we need the
|
|
|
|
+ * cmpxchg() to make sure it hasn't been changed in
|
|
|
|
+ * the meantime by remote CPU.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(READ_ONCE(ri->map) == map))
|
|
|
|
+ cmpxchg(&ri->map, map, NULL);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
|
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
|
- struct bpf_prog *xdp_prog)
|
|
|
|
|
|
+ struct bpf_prog *xdp_prog, struct bpf_map *map)
|
|
{
|
|
{
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
- unsigned long map_owner = ri->map_owner;
|
|
|
|
- struct bpf_map *map = ri->map;
|
|
|
|
u32 index = ri->ifindex;
|
|
u32 index = ri->ifindex;
|
|
void *fwd = NULL;
|
|
void *fwd = NULL;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
ri->ifindex = 0;
|
|
ri->ifindex = 0;
|
|
- ri->map = NULL;
|
|
|
|
- ri->map_owner = 0;
|
|
|
|
-
|
|
|
|
- if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
|
|
|
|
- err = -EFAULT;
|
|
|
|
- map = NULL;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
|
|
+ WRITE_ONCE(ri->map, NULL);
|
|
|
|
|
|
fwd = __xdp_map_lookup_elem(map, index);
|
|
fwd = __xdp_map_lookup_elem(map, index);
|
|
if (!fwd) {
|
|
if (!fwd) {
|
|
@@ -3296,12 +3298,13 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
|
struct bpf_prog *xdp_prog)
|
|
struct bpf_prog *xdp_prog)
|
|
{
|
|
{
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
|
|
+ struct bpf_map *map = READ_ONCE(ri->map);
|
|
struct net_device *fwd;
|
|
struct net_device *fwd;
|
|
u32 index = ri->ifindex;
|
|
u32 index = ri->ifindex;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
- if (ri->map)
|
|
|
|
- return xdp_do_redirect_map(dev, xdp, xdp_prog);
|
|
|
|
|
|
+ if (map)
|
|
|
|
+ return xdp_do_redirect_map(dev, xdp, xdp_prog, map);
|
|
|
|
|
|
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
|
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
|
ri->ifindex = 0;
|
|
ri->ifindex = 0;
|
|
@@ -3325,24 +3328,17 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
|
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
|
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
|
struct sk_buff *skb,
|
|
struct sk_buff *skb,
|
|
struct xdp_buff *xdp,
|
|
struct xdp_buff *xdp,
|
|
- struct bpf_prog *xdp_prog)
|
|
|
|
|
|
+ struct bpf_prog *xdp_prog,
|
|
|
|
+ struct bpf_map *map)
|
|
{
|
|
{
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
- unsigned long map_owner = ri->map_owner;
|
|
|
|
- struct bpf_map *map = ri->map;
|
|
|
|
u32 index = ri->ifindex;
|
|
u32 index = ri->ifindex;
|
|
void *fwd = NULL;
|
|
void *fwd = NULL;
|
|
int err = 0;
|
|
int err = 0;
|
|
|
|
|
|
ri->ifindex = 0;
|
|
ri->ifindex = 0;
|
|
- ri->map = NULL;
|
|
|
|
- ri->map_owner = 0;
|
|
|
|
|
|
+ WRITE_ONCE(ri->map, NULL);
|
|
|
|
|
|
- if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
|
|
|
|
- err = -EFAULT;
|
|
|
|
- map = NULL;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
fwd = __xdp_map_lookup_elem(map, index);
|
|
fwd = __xdp_map_lookup_elem(map, index);
|
|
if (unlikely(!fwd)) {
|
|
if (unlikely(!fwd)) {
|
|
err = -EINVAL;
|
|
err = -EINVAL;
|
|
@@ -3379,13 +3375,14 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
|
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
|
|
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
|
|
{
|
|
{
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
|
|
+ struct bpf_map *map = READ_ONCE(ri->map);
|
|
u32 index = ri->ifindex;
|
|
u32 index = ri->ifindex;
|
|
struct net_device *fwd;
|
|
struct net_device *fwd;
|
|
int err = 0;
|
|
int err = 0;
|
|
|
|
|
|
- if (ri->map)
|
|
|
|
- return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
|
|
|
|
-
|
|
|
|
|
|
+ if (map)
|
|
|
|
+ return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
|
|
|
|
+ map);
|
|
ri->ifindex = 0;
|
|
ri->ifindex = 0;
|
|
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
|
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
|
if (unlikely(!fwd)) {
|
|
if (unlikely(!fwd)) {
|
|
@@ -3416,8 +3413,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
|
|
|
|
|
|
ri->ifindex = ifindex;
|
|
ri->ifindex = ifindex;
|
|
ri->flags = flags;
|
|
ri->flags = flags;
|
|
- ri->map = NULL;
|
|
|
|
- ri->map_owner = 0;
|
|
|
|
|
|
+ WRITE_ONCE(ri->map, NULL);
|
|
|
|
|
|
return XDP_REDIRECT;
|
|
return XDP_REDIRECT;
|
|
}
|
|
}
|
|
@@ -3430,8 +3426,8 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
|
|
.arg2_type = ARG_ANYTHING,
|
|
.arg2_type = ARG_ANYTHING,
|
|
};
|
|
};
|
|
|
|
|
|
-BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
|
|
|
|
- unsigned long, map_owner)
|
|
|
|
|
|
+BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
|
|
|
|
+ u64, flags)
|
|
{
|
|
{
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
|
|
|
|
@@ -3440,15 +3436,11 @@ BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags
|
|
|
|
|
|
ri->ifindex = ifindex;
|
|
ri->ifindex = ifindex;
|
|
ri->flags = flags;
|
|
ri->flags = flags;
|
|
- ri->map = map;
|
|
|
|
- ri->map_owner = map_owner;
|
|
|
|
|
|
+ WRITE_ONCE(ri->map, map);
|
|
|
|
|
|
return XDP_REDIRECT;
|
|
return XDP_REDIRECT;
|
|
}
|
|
}
|
|
|
|
|
|
-/* Note, arg4 is hidden from users and populated by the verifier
|
|
|
|
- * with the right pointer.
|
|
|
|
- */
|
|
|
|
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
|
|
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
|
|
.func = bpf_xdp_redirect_map,
|
|
.func = bpf_xdp_redirect_map,
|
|
.gpl_only = false,
|
|
.gpl_only = false,
|