diff options
author | Yauheni Kaliuta <yauheni.kaliuta@redhat.com> | 2019-08-28 21:28:46 +0300 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-08-31 00:35:50 +0200 |
commit | 1c8f9b91c456f5b47a377a0c8c5d4130fc39433a (patch) | |
tree | a19372aee851b8c3d2190dc3ac65bbcd99996c4d /arch/s390/net | |
parent | 47ee6e86e0a3e3a15fbdd6d94aab39e46013c961 (diff) |
bpf: s390: add JIT support for multi-function programs
This adds support for bpf-to-bpf function calls in the s390 JIT
compiler. The JIT compiler converts the bpf call instructions to
native branch instructions. After a round of the usual passes, the
start addresses of the JITed images for the callee functions are
known. Finally, to fixup the branch target addresses, we need to
perform an extra pass.
Because of the address range in which JITed images are allocated on
s390, the offsets of the start addresses of these images from
__bpf_call_base are as large as 64 bits. So, for a function call,
the imm field of the instruction cannot be used to determine the
callee's address. Use bpf_jit_get_func_addr() helper instead.
The patch borrows a lot from:
commit 8c11ea5ce13d ("bpf, arm64: fix getting subprog addr from aux
for calls")
commit e2c95a61656d ("bpf, ppc64: generalize fetching subprog into
bpf_jit_get_func_addr")
commit 8484ce8306f9 ("bpf: powerpc64: add JIT support for
multi-function programs")
(including the commit message).
test_verifier (5.3-rc6 with CONFIG_BPF_JIT_ALWAYS_ON=y):
without patch:
Summary: 1501 PASSED, 0 SKIPPED, 47 FAILED
with patch:
Summary: 1540 PASSED, 0 SKIPPED, 8 FAILED
Signed-off-by: Yauheni Kaliuta <yauheni.kaliuta@redhat.com>
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'arch/s390/net')
-rw-r--r-- | arch/s390/net/bpf_jit_comp.c | 66 |
1 files changed, 55 insertions, 11 deletions
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index e636728ab452..a76cbe4cc7cc 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -502,7 +502,8 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of * stack space for the large switch statement. */ -static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i) +static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, + int i, bool extra_pass) { struct bpf_insn *insn = &fp->insnsi[i]; int jmp_off, last, insn_count = 1; @@ -1011,10 +1012,14 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i */ case BPF_JMP | BPF_CALL: { - /* - * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5) - */ - const u64 func = (u64)__bpf_call_base + imm; + u64 func; + bool func_addr_fixed; + int ret; + + ret = bpf_jit_get_func_addr(fp, insn, extra_pass, + &func, &func_addr_fixed); + if (ret < 0) + return -1; REG_SET_SEEN(BPF_REG_5); jit->seen |= SEEN_FUNC; @@ -1281,7 +1286,8 @@ branch_oc: /* * Compile eBPF program into s390x code */ -static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) +static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, + bool extra_pass) { int i, insn_count; @@ -1290,7 +1296,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) bpf_jit_prologue(jit, fp->aux->stack_depth); for (i = 0; i < fp->len; i += insn_count) { - insn_count = bpf_jit_insn(jit, fp, i); + insn_count = bpf_jit_insn(jit, fp, i, extra_pass); if (insn_count < 0) return -1; /* Next instruction address */ @@ -1309,6 +1315,12 @@ bool bpf_jit_needs_zext(void) return true; } +struct s390_jit_data { + struct bpf_binary_header *header; + struct bpf_jit ctx; + int pass; +}; + /* * Compile eBPF program "fp" */ @@ -1316,7 +1328,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { struct bpf_prog *tmp, *orig_fp = fp; struct bpf_binary_header *header; + struct s390_jit_data *jit_data; bool tmp_blinded = false; + bool extra_pass = false; struct bpf_jit jit; int pass; @@ -1335,6 +1349,23 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) fp = tmp; } + jit_data = fp->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + fp = orig_fp; + goto out; + } + fp->aux->jit_data = jit_data; + } + if (jit_data->ctx.addrs) { + jit = jit_data->ctx; + header = jit_data->header; + extra_pass = true; + pass = jit_data->pass + 1; + goto skip_init_ctx; + } + memset(&jit, 0, sizeof(jit)); jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); if (jit.addrs == NULL) { @@ -1347,7 +1378,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) * - 3: Calculate program size and addrs arrray */ for (pass = 1; pass <= 3; pass++) { - if (bpf_jit_prog(&jit, fp)) { + if (bpf_jit_prog(&jit, fp, extra_pass)) { fp = orig_fp; goto free_addrs; } @@ -1359,12 +1390,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) fp = orig_fp; goto free_addrs; } + header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole); if (!header) { fp = orig_fp; goto free_addrs; } - if (bpf_jit_prog(&jit, fp)) { +skip_init_ctx: + if (bpf_jit_prog(&jit, fp, extra_pass)) { bpf_jit_binary_free(header); fp = orig_fp; goto free_addrs; @@ -1373,12 +1406,23 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); print_fn_code(jit.prg_buf, jit.size_prg); } - bpf_jit_binary_lock_ro(header); + if (!fp->is_func || extra_pass) { + bpf_jit_binary_lock_ro(header); + } else { + jit_data->header = header; + jit_data->ctx = jit; + jit_data->pass = pass; + } fp->bpf_func = (void *) jit.prg_buf; fp->jited = 1; fp->jited_len = jit.size; + + if (!fp->is_func || extra_pass) { free_addrs: - kfree(jit.addrs); + kfree(jit.addrs); + kfree(jit_data); + fp->aux->jit_data = NULL; + } out: if (tmp_blinded) bpf_jit_prog_release_other(fp, fp == orig_fp ? |