|
@@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
|
|
|
|
|
|
/* Map specifics */
|
|
/* Map specifics */
|
|
struct xdp_buff;
|
|
struct xdp_buff;
|
|
|
|
+struct sk_buff;
|
|
|
|
|
|
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
|
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
|
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
|
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
|
void __dev_map_flush(struct bpf_map *map);
|
|
void __dev_map_flush(struct bpf_map *map);
|
|
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
|
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
|
struct net_device *dev_rx);
|
|
struct net_device *dev_rx);
|
|
|
|
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
|
|
|
+ struct bpf_prog *xdp_prog);
|
|
|
|
|
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
|
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
|
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
|
@@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct sk_buff;
|
|
|
|
+
|
|
|
|
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
|
|
|
|
+ struct sk_buff *skb,
|
|
|
|
+ struct bpf_prog *xdp_prog)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static inline
|
|
static inline
|
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
|
{
|
|
{
|