|
@@ -285,6 +285,72 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
|
|
|
+{
|
|
|
+ static const char * const names[] = {
|
|
|
+ [NFP_MAP_UNUSED] = "unused",
|
|
|
+ [NFP_MAP_USE_READ] = "read",
|
|
|
+ [NFP_MAP_USE_WRITE] = "write",
|
|
|
+ [NFP_MAP_USE_ATOMIC_CNT] = "atomic",
|
|
|
+ };
|
|
|
+
|
|
|
+ if (use >= ARRAY_SIZE(names) || !names[use])
|
|
|
+ return "unknown";
|
|
|
+ return names[use];
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
|
|
|
+ struct nfp_bpf_map *nfp_map,
|
|
|
+ unsigned int off, enum nfp_bpf_map_use use)
|
|
|
+{
|
|
|
+ if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
|
|
|
+ nfp_map->use_map[off / 4] != use) {
|
|
|
+ pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
|
|
|
+ nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
|
|
|
+ nfp_bpf_map_use_name(use), off);
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+ }
|
|
|
+
|
|
|
+ nfp_map->use_map[off / 4] = use;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
|
|
|
+ const struct bpf_reg_state *reg,
|
|
|
+ enum nfp_bpf_map_use use)
|
|
|
+{
|
|
|
+ struct bpf_offloaded_map *offmap;
|
|
|
+ struct nfp_bpf_map *nfp_map;
|
|
|
+ unsigned int size, off;
|
|
|
+ int i, err;
|
|
|
+
|
|
|
+ if (!tnum_is_const(reg->var_off)) {
|
|
|
+ pr_vlog(env, "map value offset is variable\n");
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+ }
|
|
|
+
|
|
|
+ off = reg->var_off.value + meta->insn.off + reg->off;
|
|
|
+ size = BPF_LDST_BYTES(&meta->insn);
|
|
|
+ offmap = map_to_offmap(reg->map_ptr);
|
|
|
+ nfp_map = offmap->dev_priv;
|
|
|
+
|
|
|
+ if (off + size > offmap->map.value_size) {
|
|
|
+ pr_vlog(env, "map value access out-of-bounds\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < size; i += 4 - (off + i) % 4) {
|
|
|
+ err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int
|
|
|
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
|
struct bpf_verifier_env *env, u8 reg_no)
|
|
@@ -307,10 +373,22 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
|
}
|
|
|
|
|
|
if (reg->type == PTR_TO_MAP_VALUE) {
|
|
|
+ if (is_mbpf_load(meta)) {
|
|
|
+ err = nfp_bpf_map_mark_used(env, meta, reg,
|
|
|
+ NFP_MAP_USE_READ);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
if (is_mbpf_store(meta)) {
|
|
|
pr_vlog(env, "map writes not supported\n");
|
|
|
return -EOPNOTSUPP;
|
|
|
}
|
|
|
+ if (is_mbpf_xadd(meta)) {
|
|
|
+ err = nfp_bpf_map_mark_used(env, meta, reg,
|
|
|
+ NFP_MAP_USE_ATOMIC_CNT);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
|
|
@@ -324,6 +402,31 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
|
+ struct bpf_verifier_env *env)
|
|
|
+{
|
|
|
+ const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
|
|
|
+ const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
|
|
|
+
|
|
|
+ if (dreg->type != PTR_TO_MAP_VALUE) {
|
|
|
+ pr_vlog(env, "atomic add not to a map value pointer: %d\n",
|
|
|
+ dreg->type);
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+ }
|
|
|
+ if (sreg->type != SCALAR_VALUE ||
|
|
|
+ sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff) {
|
|
|
+ char tn_buf[48];
|
|
|
+
|
|
|
+ tnum_strn(tn_buf, sizeof(tn_buf), sreg->var_off);
|
|
|
+ pr_vlog(env, "atomic add not of a small constant scalar: %s\n",
|
|
|
+ tn_buf);
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+ }
|
|
|
+
|
|
|
+ return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
|
|
|
+}
|
|
|
+
|
|
|
static int
|
|
|
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
|
|
|
{
|
|
@@ -356,6 +459,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
|
|
|
if (is_mbpf_store(meta))
|
|
|
return nfp_bpf_check_ptr(nfp_prog, meta, env,
|
|
|
meta->insn.dst_reg);
|
|
|
+ if (is_mbpf_xadd(meta))
|
|
|
+ return nfp_bpf_check_xadd(nfp_prog, meta, env);
|
|
|
|
|
|
return 0;
|
|
|
}
|