summaryrefslogtreecommitdiff
path: root/arch/arm64/net
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2022-02-26 20:19:06 +0800
committerDaniel Borkmann <daniel@iogearbox.net>2022-02-28 13:50:28 +0100
commitdda7596c109fc382876118627e29db7607cde35d (patch)
treee8a0a045a4ee83def9b91cf48f74d2d3a2e2f6b9 /arch/arm64/net
parent68e4f238b0e9d3670a1612ad900a6e98b2b3f7dd (diff)
bpf, arm64: Feed byte-offset into bpf line info
insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is calculated in instruction granularity instead of bytes granularity, but BPF line info requires byte offset. bpf_prog_fill_jited_linfo() will be the last user of ctx.offset before it is freed, so convert the offset into byte-offset before calling into bpf_prog_fill_jited_linfo() in order to fix the line info dump on arm64. Fixes: 37ab566c178d ("bpf: arm64: Enable arm64 jit to provide bpf_line_info") Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20220226121906.5709-3-houtao1@huawei.com
Diffstat (limited to 'arch/arm64/net')
-rw-r--r--arch/arm64/net/bpf_jit_comp.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 7152ee28facc..4f137424935f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1133,6 +1133,11 @@ skip_init_ctx:
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
+ int i;
+
+ /* offset[prog->len] is the size of program */
+ for (i = 0; i <= prog->len; i++)
+ ctx.offset[i] *= AARCH64_INSN_SIZE;
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
kfree(ctx.offset);