|
@@ -481,6 +481,13 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
|
|
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
|
|
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs,
|
|
|
|
+ u32 regno)
|
|
|
|
+{
|
|
|
|
+ mark_reg_unknown_value(regs, regno);
|
|
|
|
+ reset_reg_range_values(regs, regno);
|
|
|
|
+}
|
|
|
|
+
|
|
enum reg_arg_type {
|
|
enum reg_arg_type {
|
|
SRC_OP, /* register is used as source operand */
|
|
SRC_OP, /* register is used as source operand */
|
|
DST_OP, /* register is used as destination operand */
|
|
DST_OP, /* register is used as destination operand */
|
|
@@ -532,6 +539,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
|
switch (type) {
|
|
switch (type) {
|
|
case PTR_TO_MAP_VALUE:
|
|
case PTR_TO_MAP_VALUE:
|
|
case PTR_TO_MAP_VALUE_OR_NULL:
|
|
case PTR_TO_MAP_VALUE_OR_NULL:
|
|
|
|
+ case PTR_TO_MAP_VALUE_ADJ:
|
|
case PTR_TO_STACK:
|
|
case PTR_TO_STACK:
|
|
case PTR_TO_CTX:
|
|
case PTR_TO_CTX:
|
|
case PTR_TO_PACKET:
|
|
case PTR_TO_PACKET:
|
|
@@ -616,7 +624,8 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
|
|
}
|
|
}
|
|
if (value_regno >= 0)
|
|
if (value_regno >= 0)
|
|
/* have read misc data from the stack */
|
|
/* have read misc data from the stack */
|
|
- mark_reg_unknown_value(state->regs, value_regno);
|
|
|
|
|
|
+ mark_reg_unknown_value_and_range(state->regs,
|
|
|
|
+ value_regno);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -825,7 +834,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
else
|
|
else
|
|
err = check_map_access(env, regno, off, size);
|
|
err = check_map_access(env, regno, off, size);
|
|
if (!err && t == BPF_READ && value_regno >= 0)
|
|
if (!err && t == BPF_READ && value_regno >= 0)
|
|
- mark_reg_unknown_value(state->regs, value_regno);
|
|
|
|
|
|
+ mark_reg_unknown_value_and_range(state->regs,
|
|
|
|
+ value_regno);
|
|
|
|
|
|
} else if (reg->type == PTR_TO_CTX) {
|
|
} else if (reg->type == PTR_TO_CTX) {
|
|
enum bpf_reg_type reg_type = UNKNOWN_VALUE;
|
|
enum bpf_reg_type reg_type = UNKNOWN_VALUE;
|
|
@@ -837,7 +847,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
}
|
|
}
|
|
err = check_ctx_access(env, off, size, t, ®_type);
|
|
err = check_ctx_access(env, off, size, t, ®_type);
|
|
if (!err && t == BPF_READ && value_regno >= 0) {
|
|
if (!err && t == BPF_READ && value_regno >= 0) {
|
|
- mark_reg_unknown_value(state->regs, value_regno);
|
|
|
|
|
|
+ mark_reg_unknown_value_and_range(state->regs,
|
|
|
|
+ value_regno);
|
|
/* note that reg.[id|off|range] == 0 */
|
|
/* note that reg.[id|off|range] == 0 */
|
|
state->regs[value_regno].type = reg_type;
|
|
state->regs[value_regno].type = reg_type;
|
|
}
|
|
}
|
|
@@ -870,7 +881,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
}
|
|
}
|
|
err = check_packet_access(env, regno, off, size);
|
|
err = check_packet_access(env, regno, off, size);
|
|
if (!err && t == BPF_READ && value_regno >= 0)
|
|
if (!err && t == BPF_READ && value_regno >= 0)
|
|
- mark_reg_unknown_value(state->regs, value_regno);
|
|
|
|
|
|
+ mark_reg_unknown_value_and_range(state->regs,
|
|
|
|
+ value_regno);
|
|
} else {
|
|
} else {
|
|
verbose("R%d invalid mem access '%s'\n",
|
|
verbose("R%d invalid mem access '%s'\n",
|
|
regno, reg_type_str[reg->type]);
|
|
regno, reg_type_str[reg->type]);
|
|
@@ -2744,7 +2756,6 @@ static int do_check(struct bpf_verifier_env *env)
|
|
if (err)
|
|
if (err)
|
|
return err;
|
|
return err;
|
|
|
|
|
|
- reset_reg_range_values(regs, insn->dst_reg);
|
|
|
|
if (BPF_SIZE(insn->code) != BPF_W &&
|
|
if (BPF_SIZE(insn->code) != BPF_W &&
|
|
BPF_SIZE(insn->code) != BPF_DW) {
|
|
BPF_SIZE(insn->code) != BPF_DW) {
|
|
insn_idx++;
|
|
insn_idx++;
|