|
|
@@ -5341,10 +5341,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
int i, cnt, size, ctx_field_size, delta = 0;
|
|
|
const int insn_cnt = env->prog->len;
|
|
|
struct bpf_insn insn_buf[16], *insn;
|
|
|
+ u32 target_size, size_default, off;
|
|
|
struct bpf_prog *new_prog;
|
|
|
enum bpf_access_type type;
|
|
|
bool is_narrower_load;
|
|
|
- u32 target_size;
|
|
|
|
|
|
if (ops->gen_prologue) {
|
|
|
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
|
|
|
@@ -5421,9 +5421,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
* we will apply proper mask to the result.
|
|
|
*/
|
|
|
is_narrower_load = size < ctx_field_size;
|
|
|
+ size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
|
|
|
+ off = insn->off;
|
|
|
if (is_narrower_load) {
|
|
|
- u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
|
|
|
- u32 off = insn->off;
|
|
|
u8 size_code;
|
|
|
|
|
|
if (type == BPF_WRITE) {
|
|
|
@@ -5451,12 +5451,23 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
}
|
|
|
|
|
|
if (is_narrower_load && size < target_size) {
|
|
|
- if (ctx_field_size <= 4)
|
|
|
+ u8 shift = (off & (size_default - 1)) * 8;
|
|
|
+
|
|
|
+ if (ctx_field_size <= 4) {
|
|
|
+ if (shift)
|
|
|
+ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
|
|
|
+ insn->dst_reg,
|
|
|
+ shift);
|
|
|
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
|
|
|
(1 << size * 8) - 1);
|
|
|
- else
|
|
|
+ } else {
|
|
|
+ if (shift)
|
|
|
+ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
|
|
|
+ insn->dst_reg,
|
|
|
+ shift);
|
|
|
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
|
|
|
(1 << size * 8) - 1);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|