|
|
@@ -364,7 +364,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|
|
const int i = insn - ctx->prog->insnsi;
|
|
|
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
|
|
|
const bool isdw = BPF_SIZE(code) == BPF_DW;
|
|
|
- u8 jmp_cond;
|
|
|
+ u8 jmp_cond, reg;
|
|
|
s32 jmp_offset;
|
|
|
|
|
|
#define check_imm(bits, imm) do { \
|
|
|
@@ -730,18 +730,28 @@ emit_cond_jmp:
|
|
|
break;
|
|
|
}
|
|
|
break;
|
|
|
+
|
|
|
/* STX XADD: lock *(u32 *)(dst + off) += src */
|
|
|
case BPF_STX | BPF_XADD | BPF_W:
|
|
|
/* STX XADD: lock *(u64 *)(dst + off) += src */
|
|
|
case BPF_STX | BPF_XADD | BPF_DW:
|
|
|
- emit_a64_mov_i(1, tmp, off, ctx);
|
|
|
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
|
|
|
- emit(A64_LDXR(isdw, tmp2, tmp), ctx);
|
|
|
- emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
|
|
|
- emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
|
|
|
- jmp_offset = -3;
|
|
|
- check_imm19(jmp_offset);
|
|
|
- emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
|
|
|
+ if (!off) {
|
|
|
+ reg = dst;
|
|
|
+ } else {
|
|
|
+ emit_a64_mov_i(1, tmp, off, ctx);
|
|
|
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
|
|
|
+ reg = tmp;
|
|
|
+ }
|
|
|
+ if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
|
|
|
+ emit(A64_STADD(isdw, reg, src), ctx);
|
|
|
+ } else {
|
|
|
+ emit(A64_LDXR(isdw, tmp2, reg), ctx);
|
|
|
+ emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
|
|
|
+ emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
|
|
|
+ jmp_offset = -3;
|
|
|
+ check_imm19(jmp_offset);
|
|
|
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
|
|
|
+ }
|
|
|
break;
|
|
|
|
|
|
default:
|