|
@@ -466,6 +466,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
|
|
|
free_func_state(dst_state->frame[i]);
|
|
free_func_state(dst_state->frame[i]);
|
|
|
dst_state->frame[i] = NULL;
|
|
dst_state->frame[i] = NULL;
|
|
|
}
|
|
}
|
|
|
|
|
+ dst_state->speculative = src->speculative;
|
|
|
dst_state->curframe = src->curframe;
|
|
dst_state->curframe = src->curframe;
|
|
|
dst_state->parent = src->parent;
|
|
dst_state->parent = src->parent;
|
|
|
for (i = 0; i <= src->curframe; i++) {
|
|
for (i = 0; i <= src->curframe; i++) {
|
|
@@ -511,7 +512,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
|
- int insn_idx, int prev_insn_idx)
|
|
|
|
|
|
|
+ int insn_idx, int prev_insn_idx,
|
|
|
|
|
+ bool speculative)
|
|
|
{
|
|
{
|
|
|
struct bpf_verifier_state *cur = env->cur_state;
|
|
struct bpf_verifier_state *cur = env->cur_state;
|
|
|
struct bpf_verifier_stack_elem *elem;
|
|
struct bpf_verifier_stack_elem *elem;
|
|
@@ -529,6 +531,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
|
err = copy_verifier_state(&elem->st, cur);
|
|
err = copy_verifier_state(&elem->st, cur);
|
|
|
if (err)
|
|
if (err)
|
|
|
goto err;
|
|
goto err;
|
|
|
|
|
+ elem->st.speculative |= speculative;
|
|
|
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
|
|
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
|
|
|
verbose(env, "BPF program is too complex\n");
|
|
verbose(env, "BPF program is too complex\n");
|
|
|
goto err;
|
|
goto err;
|
|
@@ -2698,6 +2701,102 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
|
|
return true;
|
|
return true;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
|
|
|
|
|
+{
|
|
|
|
|
+ return &env->insn_aux_data[env->insn_idx];
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
|
|
|
|
|
+ u32 *ptr_limit, u8 opcode, bool off_is_neg)
|
|
|
|
|
+{
|
|
|
|
|
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
|
|
|
|
|
+ (opcode == BPF_SUB && !off_is_neg);
|
|
|
|
|
+ u32 off;
|
|
|
|
|
+
|
|
|
|
|
+ switch (ptr_reg->type) {
|
|
|
|
|
+ case PTR_TO_STACK:
|
|
|
|
|
+ off = ptr_reg->off + ptr_reg->var_off.value;
|
|
|
|
|
+ if (mask_to_left)
|
|
|
|
|
+ *ptr_limit = MAX_BPF_STACK + off;
|
|
|
|
|
+ else
|
|
|
|
|
+ *ptr_limit = -off;
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ case PTR_TO_MAP_VALUE:
|
|
|
|
|
+ if (mask_to_left) {
|
|
|
|
|
+ *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ off = ptr_reg->smin_value + ptr_reg->off;
|
|
|
|
|
+ *ptr_limit = ptr_reg->map_ptr->value_size - off;
|
|
|
|
|
+ }
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ default:
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
|
|
|
|
+ struct bpf_insn *insn,
|
|
|
|
|
+ const struct bpf_reg_state *ptr_reg,
|
|
|
|
|
+ struct bpf_reg_state *dst_reg,
|
|
|
|
|
+ bool off_is_neg)
|
|
|
|
|
+{
|
|
|
|
|
+ struct bpf_verifier_state *vstate = env->cur_state;
|
|
|
|
|
+ struct bpf_insn_aux_data *aux = cur_aux(env);
|
|
|
|
|
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
|
|
|
|
|
+ u8 opcode = BPF_OP(insn->code);
|
|
|
|
|
+ u32 alu_state, alu_limit;
|
|
|
|
|
+ struct bpf_reg_state tmp;
|
|
|
|
|
+ bool ret;
|
|
|
|
|
+
|
|
|
|
|
+ if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* We already marked aux for masking from non-speculative
|
|
|
|
|
+ * paths, thus we got here in the first place. We only care
|
|
|
|
|
+ * to explore bad access from here.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (vstate->speculative)
|
|
|
|
|
+ goto do_sim;
|
|
|
|
|
+
|
|
|
|
|
+ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
|
|
|
|
|
+ alu_state |= ptr_is_dst_reg ?
|
|
|
|
|
+ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
|
|
|
|
|
+
|
|
|
|
|
+ if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* If we arrived here from different branches with different
|
|
|
|
|
+ * limits to sanitize, then this won't work.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (aux->alu_state &&
|
|
|
|
|
+ (aux->alu_state != alu_state ||
|
|
|
|
|
+ aux->alu_limit != alu_limit))
|
|
|
|
|
+ return -EACCES;
|
|
|
|
|
+
|
|
|
|
|
+ /* Corresponding fixup done in fixup_bpf_calls(). */
|
|
|
|
|
+ aux->alu_state = alu_state;
|
|
|
|
|
+ aux->alu_limit = alu_limit;
|
|
|
|
|
+
|
|
|
|
|
+do_sim:
|
|
|
|
|
+ /* Simulate and find potential out-of-bounds access under
|
|
|
|
|
+ * speculative execution from truncation as a result of
|
|
|
|
|
+ * masking when off was not within expected range. If off
|
|
|
|
|
+ * sits in dst, then we temporarily need to move ptr there
|
|
|
|
|
+ * to simulate dst (== 0) +/-= ptr. Needed, for example,
|
|
|
|
|
+ * for cases where we use K-based arithmetic in one direction
|
|
|
|
|
+ * and truncated reg-based in the other in order to explore
|
|
|
|
|
+ * bad access.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (!ptr_is_dst_reg) {
|
|
|
|
|
+ tmp = *dst_reg;
|
|
|
|
|
+ *dst_reg = *ptr_reg;
|
|
|
|
|
+ }
|
|
|
|
|
+ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
|
|
|
|
|
+ if (!ptr_is_dst_reg)
|
|
|
|
|
+ *dst_reg = tmp;
|
|
|
|
|
+ return !ret ? -EFAULT : 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
|
|
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
|
|
|
* Caller should also handle BPF_MOV case separately.
|
|
* Caller should also handle BPF_MOV case separately.
|
|
|
* If we return -EACCES, caller may want to try again treating pointer as a
|
|
* If we return -EACCES, caller may want to try again treating pointer as a
|
|
@@ -2718,6 +2817,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
|
|
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
|
|
|
u32 dst = insn->dst_reg, src = insn->src_reg;
|
|
u32 dst = insn->dst_reg, src = insn->src_reg;
|
|
|
u8 opcode = BPF_OP(insn->code);
|
|
u8 opcode = BPF_OP(insn->code);
|
|
|
|
|
+ int ret;
|
|
|
|
|
|
|
|
dst_reg = ®s[dst];
|
|
dst_reg = ®s[dst];
|
|
|
|
|
|
|
@@ -2772,6 +2872,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
|
|
|
|
switch (opcode) {
|
|
switch (opcode) {
|
|
|
case BPF_ADD:
|
|
case BPF_ADD:
|
|
|
|
|
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
|
|
|
+ if (ret < 0) {
|
|
|
|
|
+ verbose(env, "R%d tried to add from different maps or paths\n", dst);
|
|
|
|
|
+ return ret;
|
|
|
|
|
+ }
|
|
|
/* We can take a fixed offset as long as it doesn't overflow
|
|
/* We can take a fixed offset as long as it doesn't overflow
|
|
|
* the s32 'off' field
|
|
* the s32 'off' field
|
|
|
*/
|
|
*/
|
|
@@ -2822,6 +2927,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
}
|
|
}
|
|
|
break;
|
|
break;
|
|
|
case BPF_SUB:
|
|
case BPF_SUB:
|
|
|
|
|
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
|
|
|
+ if (ret < 0) {
|
|
|
|
|
+ verbose(env, "R%d tried to sub from different maps or paths\n", dst);
|
|
|
|
|
+ return ret;
|
|
|
|
|
+ }
|
|
|
if (dst_reg == off_reg) {
|
|
if (dst_reg == off_reg) {
|
|
|
/* scalar -= pointer. Creates an unknown scalar */
|
|
/* scalar -= pointer. Creates an unknown scalar */
|
|
|
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
|
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
|
@@ -3997,7 +4107,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
|
|
|
|
|
|
|
+ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
|
|
|
|
|
+ false);
|
|
|
if (!other_branch)
|
|
if (!other_branch)
|
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
|
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
|
|
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
|
|
@@ -4712,6 +4823,12 @@ static bool states_equal(struct bpf_verifier_env *env,
|
|
|
if (old->curframe != cur->curframe)
|
|
if (old->curframe != cur->curframe)
|
|
|
return false;
|
|
return false;
|
|
|
|
|
|
|
|
|
|
+ /* Verification state from speculative execution simulation
|
|
|
|
|
+ * must never prune a non-speculative execution one.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (old->speculative && !cur->speculative)
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
/* for states to be equal callsites have to be the same
|
|
/* for states to be equal callsites have to be the same
|
|
|
* and all frame states need to be equivalent
|
|
* and all frame states need to be equivalent
|
|
|
*/
|
|
*/
|
|
@@ -4863,7 +4980,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
if (!state)
|
|
if (!state)
|
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
state->curframe = 0;
|
|
state->curframe = 0;
|
|
|
- state->parent = NULL;
|
|
|
|
|
|
|
+ state->speculative = false;
|
|
|
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
|
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
|
|
if (!state->frame[0]) {
|
|
if (!state->frame[0]) {
|
|
|
kfree(state);
|
|
kfree(state);
|
|
@@ -4903,8 +5020,10 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
/* found equivalent state, can prune the search */
|
|
/* found equivalent state, can prune the search */
|
|
|
if (env->log.level) {
|
|
if (env->log.level) {
|
|
|
if (do_print_state)
|
|
if (do_print_state)
|
|
|
- verbose(env, "\nfrom %d to %d: safe\n",
|
|
|
|
|
- env->prev_insn_idx, env->insn_idx);
|
|
|
|
|
|
|
+ verbose(env, "\nfrom %d to %d%s: safe\n",
|
|
|
|
|
+ env->prev_insn_idx, env->insn_idx,
|
|
|
|
|
+ env->cur_state->speculative ?
|
|
|
|
|
+ " (speculative execution)" : "");
|
|
|
else
|
|
else
|
|
|
verbose(env, "%d: safe\n", env->insn_idx);
|
|
verbose(env, "%d: safe\n", env->insn_idx);
|
|
|
}
|
|
}
|
|
@@ -4921,8 +5040,10 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
if (env->log.level > 1)
|
|
if (env->log.level > 1)
|
|
|
verbose(env, "%d:", env->insn_idx);
|
|
verbose(env, "%d:", env->insn_idx);
|
|
|
else
|
|
else
|
|
|
- verbose(env, "\nfrom %d to %d:",
|
|
|
|
|
- env->prev_insn_idx, env->insn_idx);
|
|
|
|
|
|
|
+ verbose(env, "\nfrom %d to %d%s:",
|
|
|
|
|
+ env->prev_insn_idx, env->insn_idx,
|
|
|
|
|
+ env->cur_state->speculative ?
|
|
|
|
|
+ " (speculative execution)" : "");
|
|
|
print_verifier_state(env, state->frame[state->curframe]);
|
|
print_verifier_state(env, state->frame[state->curframe]);
|
|
|
do_print_state = false;
|
|
do_print_state = false;
|
|
|
}
|
|
}
|
|
@@ -5869,6 +5990,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|
|
continue;
|
|
continue;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
|
|
|
|
|
+ insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
|
|
|
|
|
+ const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
|
|
|
|
|
+ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
|
|
|
|
|
+ struct bpf_insn insn_buf[16];
|
|
|
|
|
+ struct bpf_insn *patch = &insn_buf[0];
|
|
|
|
|
+ bool issrc, isneg;
|
|
|
|
|
+ u32 off_reg;
|
|
|
|
|
+
|
|
|
|
|
+ aux = &env->insn_aux_data[i + delta];
|
|
|
|
|
+ if (!aux->alu_state)
|
|
|
|
|
+ continue;
|
|
|
|
|
+
|
|
|
|
|
+ isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
|
|
|
|
|
+ issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
|
|
|
|
|
+ BPF_ALU_SANITIZE_SRC;
|
|
|
|
|
+
|
|
|
|
|
+ off_reg = issrc ? insn->src_reg : insn->dst_reg;
|
|
|
|
|
+ if (isneg)
|
|
|
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
|
|
|
+ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
|
|
|
|
|
+ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
|
|
|
|
|
+ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
|
|
|
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
|
|
|
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
|
|
|
|
|
+ if (issrc) {
|
|
|
|
|
+ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
|
|
|
|
|
+ off_reg);
|
|
|
|
|
+ insn->src_reg = BPF_REG_AX;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
|
|
|
|
|
+ BPF_REG_AX);
|
|
|
|
|
+ }
|
|
|
|
|
+ if (isneg)
|
|
|
|
|
+ insn->code = insn->code == code_add ?
|
|
|
|
|
+ code_sub : code_add;
|
|
|
|
|
+ *patch++ = *insn;
|
|
|
|
|
+ if (issrc && isneg)
|
|
|
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
|
|
|
+ cnt = patch - insn_buf;
|
|
|
|
|
+
|
|
|
|
|
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
|
|
|
|
+ if (!new_prog)
|
|
|
|
|
+ return -ENOMEM;
|
|
|
|
|
+
|
|
|
|
|
+ delta += cnt - 1;
|
|
|
|
|
+ env->prog = prog = new_prog;
|
|
|
|
|
+ insn = new_prog->insnsi + i + delta;
|
|
|
|
|
+ continue;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
if (insn->code != (BPF_JMP | BPF_CALL))
|
|
if (insn->code != (BPF_JMP | BPF_CALL))
|
|
|
continue;
|
|
continue;
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|