summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorXu Kuohai <xukuohai@huawei.com>2022-08-08 00:07:35 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2022-08-10 16:50:57 +0200
commitaada476655461a9ab491d8298a415430cdd10278 (patch)
tree0dcd914c4efc99785d8471fbb189f5e639b1c970 /arch/arm64
parent46c8229c4317ba8576a206d285a34783390ba6ab (diff)
bpf, arm64: Fix bpf trampoline instruction endianness
The sparse tool complains as follows: arch/arm64/net/bpf_jit_comp.c:1684:16: warning: incorrect type in assignment (different base types) arch/arm64/net/bpf_jit_comp.c:1684:16: expected unsigned int [usertype] *branch arch/arm64/net/bpf_jit_comp.c:1684:16: got restricted __le32 [usertype] * arch/arm64/net/bpf_jit_comp.c:1700:52: error: subtraction of different types can't work (different base types) arch/arm64/net/bpf_jit_comp.c:1734:29: warning: incorrect type in assignment (different base types) arch/arm64/net/bpf_jit_comp.c:1734:29: expected unsigned int [usertype] * arch/arm64/net/bpf_jit_comp.c:1734:29: got restricted __le32 [usertype] * arch/arm64/net/bpf_jit_comp.c:1918:52: error: subtraction of different types can't work (different base types) This is because the variable branch in function invoke_bpf_prog and the variable branches in function prepare_trampoline are defined as type u32 *, which conflicts with ctx->image's type __le32 *, so sparse complains when assignment or arithmetic operation are performed on these two variables and ctx->image. Since arm64 instructions are always little-endian, change the type of these two variables to __le32 * and call cpu_to_le32() to convert instruction to little-endian before writing it to memory. This is also in line with emit() which internally does cpu_to_le32(), too. Fixes: efc9909fdce0 ("bpf, arm64: Add bpf trampoline for arm64") Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: Xu Kuohai <xukuohai@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/bpf/20220808040735.1232002-1-xukuohai@huawei.com
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/net/bpf_jit_comp.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 40aa3e744beb..389623ae5a91 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1643,7 +1643,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
int args_off, int retval_off, int run_ctx_off,
bool save_ret)
{
- u32 *branch;
+ __le32 *branch;
u64 enter_prog;
u64 exit_prog;
struct bpf_prog *p = l->link.prog;
@@ -1698,7 +1698,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
if (ctx->image) {
int offset = &ctx->image[ctx->idx] - branch;
- *branch = A64_CBZ(1, A64_R(0), offset);
+ *branch = cpu_to_le32(A64_CBZ(1, A64_R(0), offset));
}
/* arg1: prog */
@@ -1713,7 +1713,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
int args_off, int retval_off, int run_ctx_off,
- u32 **branches)
+ __le32 **branches)
{
int i;
@@ -1784,7 +1784,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
bool save_ret;
- u32 **branches = NULL;
+ __le32 **branches = NULL;
/* trampoline stack layout:
* [ parent ip ]
@@ -1892,7 +1892,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
flags & BPF_TRAMP_F_RET_FENTRY_RET);
if (fmod_ret->nr_links) {
- branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *),
+ branches = kcalloc(fmod_ret->nr_links, sizeof(__le32 *),
GFP_KERNEL);
if (!branches)
return -ENOMEM;
@@ -1916,7 +1916,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* update the branches saved in invoke_bpf_mod_ret with cbnz */
for (i = 0; i < fmod_ret->nr_links && ctx->image != NULL; i++) {
int offset = &ctx->image[ctx->idx] - branches[i];
- *branches[i] = A64_CBNZ(1, A64_R(10), offset);
+ *branches[i] = cpu_to_le32(A64_CBNZ(1, A64_R(10), offset));
}
for (i = 0; i < fexit->nr_links; i++)