|
|
@@ -1273,7 +1273,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int check_call(struct bpf_verifier_env *env, int func_id)
|
|
|
+static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
|
|
|
{
|
|
|
struct bpf_verifier_state *state = &env->cur_state;
|
|
|
const struct bpf_func_proto *fn = NULL;
|
|
|
@@ -1369,6 +1369,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
|
|
|
}
|
|
|
regs[BPF_REG_0].map_ptr = meta.map_ptr;
|
|
|
regs[BPF_REG_0].id = ++env->id_gen;
|
|
|
+ env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
|
|
|
} else {
|
|
|
verbose("unknown return type %d of func %s#%d\n",
|
|
|
fn->ret_type, func_id_name(func_id), func_id);
|
|
|
@@ -2940,7 +2941,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- err = check_call(env, insn->imm);
|
|
|
+ err = check_call(env, insn->imm, insn_idx);
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
@@ -3162,6 +3163,41 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
|
|
|
insn->src_reg = 0;
|
|
|
}
|
|
|
|
|
|
+/* single env->prog->insni[off] instruction was replaced with the range
|
|
|
+ * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
|
|
|
+ * [0, off) and [off, end) to new locations, so the patched range stays zero
|
|
|
+ */
|
|
|
+static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
|
|
|
+ u32 off, u32 cnt)
|
|
|
+{
|
|
|
+ struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
|
|
|
+
|
|
|
+ if (cnt == 1)
|
|
|
+ return 0;
|
|
|
+ new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
|
|
|
+ if (!new_data)
|
|
|
+ return -ENOMEM;
|
|
|
+ memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
|
|
|
+ memcpy(new_data + off + cnt - 1, old_data + off,
|
|
|
+ sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
|
|
+ env->insn_aux_data = new_data;
|
|
|
+ vfree(old_data);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
|
|
|
+ const struct bpf_insn *patch, u32 len)
|
|
|
+{
|
|
|
+ struct bpf_prog *new_prog;
|
|
|
+
|
|
|
+ new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
|
|
|
+ if (!new_prog)
|
|
|
+ return NULL;
|
|
|
+ if (adjust_insn_aux_data(env, new_prog->len, off, len))
|
|
|
+ return NULL;
|
|
|
+ return new_prog;
|
|
|
+}
|
|
|
+
|
|
|
/* convert load instructions that access fields of 'struct __sk_buff'
|
|
|
* into sequence of instructions that access fields of 'struct sk_buff'
|
|
|
*/
|
|
|
@@ -3181,10 +3217,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
verbose("bpf verifier is misconfigured\n");
|
|
|
return -EINVAL;
|
|
|
} else if (cnt) {
|
|
|
- new_prog = bpf_patch_insn_single(env->prog, 0,
|
|
|
- insn_buf, cnt);
|
|
|
+ new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
|
|
|
if (!new_prog)
|
|
|
return -ENOMEM;
|
|
|
+
|
|
|
env->prog = new_prog;
|
|
|
delta += cnt - 1;
|
|
|
}
|
|
|
@@ -3209,7 +3245,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
else
|
|
|
continue;
|
|
|
|
|
|
- if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
|
|
|
+ if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
|
|
|
continue;
|
|
|
|
|
|
cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
|
|
|
@@ -3218,8 +3254,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
|
|
|
- cnt);
|
|
|
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
|
|
if (!new_prog)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
@@ -3233,6 +3268,83 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/* fixup insn->imm field of bpf_call instructions
|
|
|
+ * and inline eligible helpers as explicit sequence of BPF instructions
|
|
|
+ *
|
|
|
+ * this function is called after eBPF program passed verification
|
|
|
+ */
|
|
|
+static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|
|
+{
|
|
|
+ struct bpf_prog *prog = env->prog;
|
|
|
+ struct bpf_insn *insn = prog->insnsi;
|
|
|
+ const struct bpf_func_proto *fn;
|
|
|
+ const int insn_cnt = prog->len;
|
|
|
+ struct bpf_insn insn_buf[16];
|
|
|
+ struct bpf_prog *new_prog;
|
|
|
+ struct bpf_map *map_ptr;
|
|
|
+ int i, cnt, delta = 0;
|
|
|
+
|
|
|
+ for (i = 0; i < insn_cnt; i++, insn++) {
|
|
|
+ if (insn->code != (BPF_JMP | BPF_CALL))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (insn->imm == BPF_FUNC_get_route_realm)
|
|
|
+ prog->dst_needed = 1;
|
|
|
+ if (insn->imm == BPF_FUNC_get_prandom_u32)
|
|
|
+ bpf_user_rnd_init_once();
|
|
|
+ if (insn->imm == BPF_FUNC_xdp_adjust_head)
|
|
|
+ prog->xdp_adjust_head = 1;
|
|
|
+ if (insn->imm == BPF_FUNC_tail_call) {
|
|
|
+ /* mark bpf_tail_call as different opcode to avoid
|
|
|
+ * conditional branch in the interpeter for every normal
|
|
|
+ * call and to prevent accidental JITing by JIT compiler
|
|
|
+ * that doesn't support bpf_tail_call yet
|
|
|
+ */
|
|
|
+ insn->imm = 0;
|
|
|
+ insn->code |= BPF_X;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
|
|
|
+ map_ptr = env->insn_aux_data[i + delta].map_ptr;
|
|
|
+ if (!map_ptr->ops->map_gen_lookup)
|
|
|
+ goto patch_call_imm;
|
|
|
+
|
|
|
+ cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
|
|
|
+ if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
|
|
|
+ verbose("bpf verifier is misconfigured\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
|
|
|
+ cnt);
|
|
|
+ if (!new_prog)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ delta += cnt - 1;
|
|
|
+
|
|
|
+ /* keep walking new program and skip insns we just inserted */
|
|
|
+ env->prog = prog = new_prog;
|
|
|
+ insn = new_prog->insnsi + i + delta;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+patch_call_imm:
|
|
|
+ fn = prog->aux->ops->get_func_proto(insn->imm);
|
|
|
+ /* all functions that have prototype and verifier allowed
|
|
|
+ * programs to call them, must be real in-kernel functions
|
|
|
+ */
|
|
|
+ if (!fn->func) {
|
|
|
+ verbose("kernel subsystem misconfigured func %s#%d\n",
|
|
|
+ func_id_name(insn->imm), insn->imm);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+ insn->imm = fn->func - __bpf_call_base;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void free_states(struct bpf_verifier_env *env)
|
|
|
{
|
|
|
struct bpf_verifier_state_list *sl, *sln;
|
|
|
@@ -3328,6 +3440,9 @@ skip_full_check:
|
|
|
/* program is valid, convert *(u32*)(ctx + off) accesses */
|
|
|
ret = convert_ctx_accesses(env);
|
|
|
|
|
|
+ if (ret == 0)
|
|
|
+ ret = fixup_bpf_calls(env);
|
|
|
+
|
|
|
if (log_level && log_len >= log_size - 1) {
|
|
|
BUG_ON(log_len >= log_size);
|
|
|
/* verifier log exceeded user supplied buffer */
|