diff options
author | David S. Miller <davem@davemloft.net> | 2020-05-14 20:31:21 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-14 20:31:21 -0700 |
commit | d00f26b623333f2419f4c3b95ff11c8b1bb96f56 (patch) | |
tree | fa1ae8e845b1b788168ecbba8bcec77633f4f683 /arch | |
parent | 9b65d2ffe853e4cf81585eaf60ce00237b277dc0 (diff) | |
parent | b92d44b5c2efe70dbe7fc44fdd2ad46f8612418a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says:
====================
pull-request: bpf-next 2020-05-14
The following pull-request contains BPF updates for your *net-next* tree.
The main changes are:
1) Merged tag 'perf-for-bpf-2020-05-06' from tip tree that includes CAP_PERFMON.
2) support for narrow loads in bpf_sock_addr progs and additional
helpers in cg-skb progs, from Andrey.
3) bpf benchmark runner, from Andrii.
4) arm and riscv JIT optimizations, from Luke.
5) bpf iterator infrastructure, from Yonghong.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/net/bpf_jit_32.c | 14 | ||||
-rw-r--r-- | arch/arm/net/bpf_jit_32.h | 3 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit_comp64.c | 64 | ||||
-rw-r--r-- | arch/x86/net/bpf_jit_comp32.c | 4 |
4 files changed, 58 insertions, 27 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index bf85d6db4931..0207b6ea6e8a 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -795,6 +795,9 @@ static inline void emit_a32_alu_i(const s8 dst, const u32 val, case BPF_RSH: emit(ARM_LSR_I(rd, rd, val), ctx); break; + case BPF_ARSH: + emit(ARM_ASR_I(rd, rd, val), ctx); + break; case BPF_NEG: emit(ARM_RSB_I(rd, rd, val), ctx); break; @@ -860,8 +863,8 @@ static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); - _emit(ARM_COND_MI, ARM_B(0), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx); + _emit(ARM_COND_PL, + ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx); arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); @@ -1408,7 +1411,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_RSH | BPF_X: - case BPF_ALU | BPF_ARSH | BPF_K: case BPF_ALU | BPF_ARSH | BPF_X: case BPF_ALU64 | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_X: @@ -1465,10 +1467,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_X: goto notyet; - /* dst = dst >> imm */ /* dst = dst << imm */ - case BPF_ALU | BPF_RSH | BPF_K: + /* dst = dst >> imm */ + /* dst = dst >> imm (signed) */ case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: if (unlikely(imm > 31)) return -EINVAL; if (imm) diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index fb67cbc589e0..e0b593a1498d 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -94,6 +94,9 @@ #define ARM_INST_LSR_I 0x01a00020 #define ARM_INST_LSR_R 0x01a00030 +#define ARM_INST_ASR_I 0x01a00040 +#define ARM_INST_ASR_R 0x01a00050 + #define ARM_INST_MOV_R 0x01a00000 #define ARM_INST_MOVS_R 0x01b00000 #define ARM_INST_MOV_I 0x03a00000 diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index d208a9fd6c52..6cfd164cbe88 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -515,7 +515,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU64 | BPF_LSH | BPF_X: emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_RSH | BPF_X: @@ -542,13 +542,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* dst = BSWAP##imm(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: - { - int shift = 64 - imm; - - emit(rv_slli(rd, rd, shift), ctx); - emit(rv_srli(rd, rd, shift), ctx); + switch (imm) { + case 16: + emit(rv_slli(rd, rd, 48), ctx); + emit(rv_srli(rd, rd, 48), ctx); + break; + case 32: + if (!aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case 64: + /* Do nothing */ + break; + } break; - } + case BPF_ALU | BPF_END | BPF_FROM_BE: emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx); @@ -692,19 +700,19 @@ out_be: case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU64 | BPF_LSH | BPF_K: emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_RSH | BPF_K: emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_ARSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -784,11 +792,15 @@ out_be: case BPF_JMP32 | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP32 | BPF_JSLE | BPF_K: - case BPF_JMP | BPF_JSET | BPF_K: - case BPF_JMP32 | BPF_JSET | BPF_K: rvoff = rv_offset(i, off, ctx); s = ctx->ninsns; - emit_imm(RV_REG_T1, imm, ctx); + if (imm) { + emit_imm(RV_REG_T1, imm, ctx); + rs = RV_REG_T1; + } else { + /* If imm is 0, simply use zero register. */ + rs = RV_REG_ZERO; + } if (!is64) { if (is_signed_bpf_cond(BPF_OP(code))) emit_sext_32_rd(&rd, ctx); @@ -799,16 +811,28 @@ out_be: /* Adjust for extra insns */ rvoff -= (e - s) << 2; + emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); + break; - if (BPF_OP(code) == BPF_JSET) { - /* Adjust for and */ - rvoff -= 4; - emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); - emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, - ctx); + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + rvoff = rv_offset(i, off, ctx); + s = ctx->ninsns; + if (is_12b_int(imm)) { + emit(rv_andi(RV_REG_T1, rd, imm), ctx); } else { - emit_branch(BPF_OP(code), rd, RV_REG_T1, rvoff, ctx); + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); } + /* For jset32, we should clear the upper 32 bits of t1, but + * sign-extension is sufficient here and saves one instruction, + * as t1 is used only in comparison against zero. + */ + if (!is64 && imm < 0) + emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx); + e = ctx->ninsns; + rvoff -= (e - s) << 2; + emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); break; /* function call */ diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 66cd150b7e54..96fde03aa987 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1475,8 +1475,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, for (i = 0; i < insn_cnt; i++, insn++) { const s32 imm32 = insn->imm; const bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; - const bool dstk = insn->dst_reg == BPF_REG_AX ? false : true; - const bool sstk = insn->src_reg == BPF_REG_AX ? false : true; + const bool dstk = insn->dst_reg != BPF_REG_AX; + const bool sstk = insn->src_reg != BPF_REG_AX; const u8 code = insn->code; const u8 *dst = bpf2ia32[insn->dst_reg]; const u8 *src = bpf2ia32[insn->src_reg]; |