|
@@ -3110,6 +3110,36 @@ void bpf_warn_invalid_xdp_action(u32 act)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
|
|
|
|
|
|
+static bool __is_valid_sock_ops_access(int off, int size)
|
|
|
+{
|
|
|
+ if (off < 0 || off >= sizeof(struct bpf_sock_ops))
|
|
|
+ return false;
|
|
|
+ /* The verifier guarantees that size > 0. */
|
|
|
+ if (off % size != 0)
|
|
|
+ return false;
|
|
|
+ if (size != sizeof(__u32))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+static bool sock_ops_is_valid_access(int off, int size,
|
|
|
+ enum bpf_access_type type,
|
|
|
+ struct bpf_insn_access_aux *info)
|
|
|
+{
|
|
|
+ if (type == BPF_WRITE) {
|
|
|
+ switch (off) {
|
|
|
+ case offsetof(struct bpf_sock_ops, op) ...
|
|
|
+ offsetof(struct bpf_sock_ops, replylong[3]):
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return __is_valid_sock_ops_access(off, size);
|
|
|
+}
|
|
|
+
|
|
|
static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
|
|
const struct bpf_insn *si,
|
|
|
struct bpf_insn *insn_buf,
|
|
@@ -3379,6 +3409,138 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
|
|
|
return insn - insn_buf;
|
|
|
}
|
|
|
|
|
|
+static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
|
|
|
+ const struct bpf_insn *si,
|
|
|
+ struct bpf_insn *insn_buf,
|
|
|
+ struct bpf_prog *prog)
|
|
|
+{
|
|
|
+ struct bpf_insn *insn = insn_buf;
|
|
|
+ int off;
|
|
|
+
|
|
|
+ switch (si->off) {
|
|
|
+ case offsetof(struct bpf_sock_ops, op) ...
|
|
|
+ offsetof(struct bpf_sock_ops, replylong[3]):
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
|
|
|
+ FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
|
|
|
+ FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
|
|
|
+ FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
|
|
|
+ off = si->off;
|
|
|
+ off -= offsetof(struct bpf_sock_ops, op);
|
|
|
+ off += offsetof(struct bpf_sock_ops_kern, op);
|
|
|
+ if (type == BPF_WRITE)
|
|
|
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
|
|
|
+ off);
|
|
|
+ else
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
|
|
|
+ off);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case offsetof(struct bpf_sock_ops, family):
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
|
|
|
+
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
|
|
|
+ struct bpf_sock_ops_kern, sk),
|
|
|
+ si->dst_reg, si->src_reg,
|
|
|
+ offsetof(struct bpf_sock_ops_kern, sk));
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
|
|
|
+ offsetof(struct sock_common, skc_family));
|
|
|
+ break;
|
|
|
+
|
|
|
+ case offsetof(struct bpf_sock_ops, remote_ip4):
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
|
|
|
+
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
|
|
|
+ struct bpf_sock_ops_kern, sk),
|
|
|
+ si->dst_reg, si->src_reg,
|
|
|
+ offsetof(struct bpf_sock_ops_kern, sk));
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
|
|
|
+ offsetof(struct sock_common, skc_daddr));
|
|
|
+ break;
|
|
|
+
|
|
|
+ case offsetof(struct bpf_sock_ops, local_ip4):
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4);
|
|
|
+
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
|
|
|
+ struct bpf_sock_ops_kern, sk),
|
|
|
+ si->dst_reg, si->src_reg,
|
|
|
+ offsetof(struct bpf_sock_ops_kern, sk));
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
|
|
|
+ offsetof(struct sock_common,
|
|
|
+ skc_rcv_saddr));
|
|
|
+ break;
|
|
|
+
|
|
|
+ case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
|
|
|
+ offsetof(struct bpf_sock_ops, remote_ip6[3]):
|
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
|
|
|
+ skc_v6_daddr.s6_addr32[0]) != 4);
|
|
|
+
|
|
|
+ off = si->off;
|
|
|
+ off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
|
|
|
+ struct bpf_sock_ops_kern, sk),
|
|
|
+ si->dst_reg, si->src_reg,
|
|
|
+ offsetof(struct bpf_sock_ops_kern, sk));
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
|
|
|
+ offsetof(struct sock_common,
|
|
|
+ skc_v6_daddr.s6_addr32[0]) +
|
|
|
+ off);
|
|
|
+#else
|
|
|
+ *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
|
|
|
+#endif
|
|
|
+ break;
|
|
|
+
|
|
|
+ case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
|
|
|
+ offsetof(struct bpf_sock_ops, local_ip6[3]):
|
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
|
|
|
+ skc_v6_rcv_saddr.s6_addr32[0]) != 4);
|
|
|
+
|
|
|
+ off = si->off;
|
|
|
+ off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
|
|
|
+ struct bpf_sock_ops_kern, sk),
|
|
|
+ si->dst_reg, si->src_reg,
|
|
|
+ offsetof(struct bpf_sock_ops_kern, sk));
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
|
|
|
+ offsetof(struct sock_common,
|
|
|
+ skc_v6_rcv_saddr.s6_addr32[0]) +
|
|
|
+ off);
|
|
|
+#else
|
|
|
+ *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
|
|
|
+#endif
|
|
|
+ break;
|
|
|
+
|
|
|
+ case offsetof(struct bpf_sock_ops, remote_port):
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
|
|
|
+
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
|
|
|
+ struct bpf_sock_ops_kern, sk),
|
|
|
+ si->dst_reg, si->src_reg,
|
|
|
+ offsetof(struct bpf_sock_ops_kern, sk));
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
|
|
|
+ offsetof(struct sock_common, skc_dport));
|
|
|
+#ifndef __BIG_ENDIAN_BITFIELD
|
|
|
+ *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
|
|
|
+#endif
|
|
|
+ break;
|
|
|
+
|
|
|
+ case offsetof(struct bpf_sock_ops, local_port):
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
|
|
|
+
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
|
|
|
+ struct bpf_sock_ops_kern, sk),
|
|
|
+ si->dst_reg, si->src_reg,
|
|
|
+ offsetof(struct bpf_sock_ops_kern, sk));
|
|
|
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
|
|
|
+ offsetof(struct sock_common, skc_num));
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return insn - insn_buf;
|
|
|
+}
|
|
|
+
|
|
|
const struct bpf_verifier_ops sk_filter_prog_ops = {
|
|
|
.get_func_proto = sk_filter_func_proto,
|
|
|
.is_valid_access = sk_filter_is_valid_access,
|
|
@@ -3428,6 +3590,12 @@ const struct bpf_verifier_ops cg_sock_prog_ops = {
|
|
|
.convert_ctx_access = sock_filter_convert_ctx_access,
|
|
|
};
|
|
|
|
|
|
+const struct bpf_verifier_ops sock_ops_prog_ops = {
|
|
|
+ .get_func_proto = bpf_base_func_proto,
|
|
|
+ .is_valid_access = sock_ops_is_valid_access,
|
|
|
+ .convert_ctx_access = sock_ops_convert_ctx_access,
|
|
|
+};
|
|
|
+
|
|
|
int sk_detach_filter(struct sock *sk)
|
|
|
{
|
|
|
int ret = -ENOENT;
|