diff options
Diffstat (limited to 'arch/arm64/net/bpf_jit_comp.c')
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 138 |
1 files changed, 110 insertions, 28 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 66708b95493a..70d7c89d3ac9 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -267,6 +267,19 @@ static bool is_addsub_imm(u32 imm) return !(imm & ~0xfff) || !(imm & ~0xfff000); } +static inline void emit_a64_add_i(const bool is64, const int dst, const int src, + const int tmp, const s32 imm, struct jit_ctx *ctx) +{ + if (is_addsub_imm(imm)) { + emit(A64_ADD_I(is64, dst, src, imm), ctx); + } else if (is_addsub_imm(-(u32)imm)) { + emit(A64_SUB_I(is64, dst, src, -imm), ctx); + } else { + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_ADD(is64, dst, src, tmp), ctx); + } +} + /* * There are 3 types of AArch64 LDR/STR (immediate) instruction: * Post-index, Pre-index, Unsigned offset. @@ -634,6 +647,81 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) return 0; } +static int emit_atomic_ld_st(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const s32 imm = insn->imm; + const s16 off = insn->off; + const u8 code = insn->code; + const bool arena = BPF_MODE(code) == BPF_PROBE_ATOMIC; + const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; + const u8 dst = bpf2a64[insn->dst_reg]; + const u8 src = bpf2a64[insn->src_reg]; + const u8 tmp = bpf2a64[TMP_REG_1]; + u8 reg; + + switch (imm) { + case BPF_LOAD_ACQ: + reg = src; + break; + case BPF_STORE_REL: + reg = dst; + break; + default: + pr_err_once("unknown atomic load/store op code %02x\n", imm); + return -EINVAL; + } + + if (off) { + emit_a64_add_i(1, tmp, reg, tmp, off, ctx); + reg = tmp; + } + if (arena) { + emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx); + reg = tmp; + } + + switch (imm) { + case BPF_LOAD_ACQ: + switch (BPF_SIZE(code)) { + case BPF_B: + emit(A64_LDARB(dst, reg), ctx); + break; + case BPF_H: + emit(A64_LDARH(dst, reg), ctx); + break; + case BPF_W: + emit(A64_LDAR32(dst, reg), ctx); + break; + case BPF_DW: + emit(A64_LDAR64(dst, reg), ctx); + break; + } + break; + case BPF_STORE_REL: + switch (BPF_SIZE(code)) { + case BPF_B: + emit(A64_STLRB(src, reg), ctx); + break; + case BPF_H: + emit(A64_STLRH(src, reg), ctx); + break; + case BPF_W: + emit(A64_STLR32(src, reg), ctx); + break; + case BPF_DW: + emit(A64_STLR64(src, reg), ctx); + break; + } + break; + default: + pr_err_once("unexpected atomic load/store op code %02x\n", + imm); + return -EINVAL; + } + + return 0; +} + #ifdef CONFIG_ARM64_LSE_ATOMICS static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) { @@ -648,16 +736,13 @@ static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) const s16 off = insn->off; u8 reg = dst; - if (off || arena) { - if (off) { - emit_a64_mov_i(1, tmp, off, ctx); - emit(A64_ADD(1, tmp, tmp, dst), ctx); - reg = tmp; - } - if (arena) { - emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx); - reg = tmp; - } + if (off) { + emit_a64_add_i(1, tmp, reg, tmp, off, ctx); + reg = tmp; + } + if (arena) { + emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx); + reg = tmp; } switch (insn->imm) { @@ -723,7 +808,7 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) const s32 imm = insn->imm; const s16 off = insn->off; const bool isdw = BPF_SIZE(code) == BPF_DW; - u8 reg; + u8 reg = dst; s32 jmp_offset; if (BPF_MODE(code) == BPF_PROBE_ATOMIC) { @@ -732,11 +817,8 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) return -EINVAL; } - if (!off) { - reg = dst; - } else { - emit_a64_mov_i(1, tmp, off, ctx); - emit(A64_ADD(1, tmp, tmp, dst), ctx); + if (off) { + emit_a64_add_i(1, tmp, reg, tmp, off, ctx); reg = tmp; } @@ -1146,20 +1228,13 @@ emit_bswap_uxt: /* dst = dst OP imm */ case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_K: - if (is_addsub_imm(imm)) { - emit(A64_ADD_I(is64, dst, dst, imm), ctx); - } else if (is_addsub_imm(-imm)) { - emit(A64_SUB_I(is64, dst, dst, -imm), ctx); - } else { - emit_a64_mov_i(is64, tmp, imm, ctx); - emit(A64_ADD(is64, dst, dst, tmp), ctx); - } + emit_a64_add_i(is64, dst, dst, tmp, imm, ctx); break; case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU64 | BPF_SUB | BPF_K: if (is_addsub_imm(imm)) { emit(A64_SUB_I(is64, dst, dst, imm), ctx); - } else if (is_addsub_imm(-imm)) { + } else if (is_addsub_imm(-(u32)imm)) { emit(A64_ADD_I(is64, dst, dst, -imm), ctx); } else { emit_a64_mov_i(is64, tmp, imm, ctx); @@ -1330,7 +1405,7 @@ emit_cond_jmp: case BPF_JMP32 | BPF_JSLE | BPF_K: if (is_addsub_imm(imm)) { emit(A64_CMP_I(is64, dst, imm), ctx); - } else if (is_addsub_imm(-imm)) { + } else if (is_addsub_imm(-(u32)imm)) { emit(A64_CMN_I(is64, dst, -imm), ctx); } else { emit_a64_mov_i(is64, tmp, imm, ctx); @@ -1641,11 +1716,17 @@ emit_cond_jmp: return ret; break; + case BPF_STX | BPF_ATOMIC | BPF_B: + case BPF_STX | BPF_ATOMIC | BPF_H: case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_B: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_H: case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: - if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) + if (bpf_atomic_is_load_store(insn)) + ret = emit_atomic_ld_st(insn, ctx); + else if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) ret = emit_lse_atomic(insn, ctx); else ret = emit_ll_sc_atomic(insn, ctx); @@ -2669,7 +2750,8 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) switch (insn->code) { case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: - if (!cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) + if (!bpf_atomic_is_load_store(insn) && + !cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) return false; } return true; |