|
@@ -80,6 +80,46 @@ exit_set_location:
|
|
|
nfp_prog->adjust_head_location = location;
|
|
|
}
|
|
|
|
|
|
+static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
|
|
|
+{
|
|
|
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
|
|
|
+ const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
|
|
|
+ struct bpf_offloaded_map *offmap;
|
|
|
+ struct bpf_func_state *state;
|
|
|
+ struct nfp_bpf_map *nfp_map;
|
|
|
+ int off, i;
|
|
|
+
|
|
|
+ state = env->cur_state->frame[reg3->frameno];
|
|
|
+
|
|
|
+ /* We need to record each time update happens with non-zero words,
|
|
|
+ * in case such word is used in atomic operations.
|
|
|
+ * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
|
|
|
+ */
|
|
|
+
|
|
|
+ offmap = map_to_offmap(reg1->map_ptr);
|
|
|
+ nfp_map = offmap->dev_priv;
|
|
|
+ off = reg3->off + reg3->var_off.value;
|
|
|
+
|
|
|
+ for (i = 0; i < offmap->map.value_size; i++) {
|
|
|
+ struct bpf_stack_state *stack_entry;
|
|
|
+ unsigned int soff;
|
|
|
+
|
|
|
+ soff = -(off + i) - 1;
|
|
|
+ stack_entry = &state->stack[soff / BPF_REG_SIZE];
|
|
|
+ if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
|
|
|
+ pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
|
|
|
+ i, soff);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ nfp_map->use_map[i / 4].non_zero_update = 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
static int
|
|
|
nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
|
|
|
const struct bpf_reg_state *reg,
|
|
@@ -171,7 +211,8 @@ nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog,
|
|
|
bpf->helpers.map_update, reg1) ||
|
|
|
!nfp_bpf_stack_arg_ok("map_update", env, reg2,
|
|
|
meta->func_id ? &meta->arg2 : NULL) ||
|
|
|
- !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
|
|
|
+ !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
|
|
|
+ !nfp_bpf_map_update_value_ok(env))
|
|
|
return -EOPNOTSUPP;
|
|
|
break;
|
|
|
|
|
@@ -352,15 +393,22 @@ nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
|
|
|
struct nfp_bpf_map *nfp_map,
|
|
|
unsigned int off, enum nfp_bpf_map_use use)
|
|
|
{
|
|
|
- if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
|
|
|
- nfp_map->use_map[off / 4] != use) {
|
|
|
+ if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
|
|
|
+ nfp_map->use_map[off / 4].type != use) {
|
|
|
pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
|
|
|
- nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
|
|
|
+ nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
|
|
|
nfp_bpf_map_use_name(use), off);
|
|
|
return -EOPNOTSUPP;
|
|
|
}
|
|
|
|
|
|
- nfp_map->use_map[off / 4] = use;
|
|
|
+ if (nfp_map->use_map[off / 4].non_zero_update &&
|
|
|
+ use == NFP_MAP_USE_ATOMIC_CNT) {
|
|
|
+ pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
|
|
|
+ off);
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+ }
|
|
|
+
|
|
|
+ nfp_map->use_map[off / 4].type = use;
|
|
|
|
|
|
return 0;
|
|
|
}
|