From 9cb61e50bf6bf54db712bba6cf20badca4383f96 Mon Sep 17 00:00:00 2001 From: Connor O'Brien Date: Sat, 7 Jan 2023 02:53:31 +0000 Subject: bpf: btf: limit logging of ignored BTF mismatches Enabling CONFIG_MODULE_ALLOW_BTF_MISMATCH is an indication that BTF mismatches are expected and module loading should proceed anyway. Logging with pr_warn() on every one of these "benign" mismatches creates unnecessary noise when many such modules are loaded. Instead, handle this case with a single log warning that BTF info may be unavailable. Mismatches also result in calls to __btf_verifier_log() via __btf_verifier_log_type() or btf_verifier_log_member(), adding several additional lines of logging per mismatched module. Add checks to these paths to skip logging for module BTF mismatches in the "allow mismatch" case. All existing logging behavior is preserved in the default CONFIG_MODULE_ALLOW_BTF_MISMATCH=n case. Signed-off-by: Connor O'Brien Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20230107025331.3240536-1-connoro@google.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/btf.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 578cee398550..4ba749fcce9d 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1397,12 +1397,18 @@ __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, if (!bpf_verifier_log_needed(log)) return; - /* btf verifier prints all types it is processing via - * btf_verifier_log_type(..., fmt = NULL). - * Skip those prints for in-kernel BTF verification. - */ - if (log->level == BPF_LOG_KERNEL && !fmt) - return; + if (log->level == BPF_LOG_KERNEL) { + /* btf verifier prints all types it is processing via + * btf_verifier_log_type(..., fmt = NULL). + * Skip those prints for in-kernel BTF verification. + */ + if (!fmt) + return; + + /* Skip logging when loading module BTF with mismatches permitted */ + if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) + return; + } __btf_verifier_log(log, "[%u] %s %s%s", env->log_type_id, @@ -1441,8 +1447,15 @@ static void btf_verifier_log_member(struct btf_verifier_env *env, if (!bpf_verifier_log_needed(log)) return; - if (log->level == BPF_LOG_KERNEL && !fmt) - return; + if (log->level == BPF_LOG_KERNEL) { + if (!fmt) + return; + + /* Skip logging when loading module BTF with mismatches permitted */ + if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) + return; + } + /* The CHECK_META phase already did a btf dump. * * If member is logged again, it must hit an error in @@ -7261,11 +7274,14 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, } btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); if (IS_ERR(btf)) { - pr_warn("failed to validate module [%s] BTF: %ld\n", - mod->name, PTR_ERR(btf)); kfree(btf_mod); - if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) + if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) { + pr_warn("failed to validate module [%s] BTF: %ld\n", + mod->name, PTR_ERR(btf)); err = PTR_ERR(btf); + } else { + pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n"); + } goto out; } err = btf_alloc_id(btf); -- cgit v1.2.3-58-ga151 From 700e6f853eb3977cd1cf172b717baa9daa328ea4 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 17 Jan 2023 23:37:04 +0100 Subject: bpf: Do not allow to load sleepable BPF_TRACE_RAW_TP program Currently we allow to load any tracing program as sleepable, but BPF_TRACE_RAW_TP can't sleep. Making the check explicit for tracing programs attach types, so sleepable BPF_TRACE_RAW_TP will fail to load. Updating the verifier error to mention iter programs as well. Acked-by: Song Liu Acked-by: Yonghong Song Signed-off-by: Jiri Olsa Link: https://lore.kernel.org/r/20230117223705.440975-1-jolsa@kernel.org Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fa4c911603e9..ca7db2ce70b9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16743,6 +16743,23 @@ BTF_ID(func, rcu_read_unlock_strict) #endif BTF_SET_END(btf_id_deny) +static bool can_be_sleepable(struct bpf_prog *prog) +{ + if (prog->type == BPF_PROG_TYPE_TRACING) { + switch (prog->expected_attach_type) { + case BPF_TRACE_FENTRY: + case BPF_TRACE_FEXIT: + case BPF_MODIFY_RETURN: + case BPF_TRACE_ITER: + return true; + default: + return false; + } + } + return prog->type == BPF_PROG_TYPE_LSM || + prog->type == BPF_PROG_TYPE_KPROBE; /* only for uprobes */ +} + static int check_attach_btf_id(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; @@ -16761,9 +16778,8 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) return -EINVAL; } - if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && - prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) { - verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n"); + if (prog->aux->sleepable && !can_be_sleepable(prog)) { + verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter and uprobe programs can be sleepable\n"); return -EINVAL; } -- cgit v1.2.3-58-ga151 From d6fefa1105dacc8a742cdcf2f4bfb501c9e61349 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 21 Jan 2023 05:52:30 +0530 Subject: bpf: Fix state pruning for STACK_DYNPTR stack slots The root of the problem is missing liveness marking for STACK_DYNPTR slots. This leads to all kinds of problems inside stacksafe. The verifier by default inside stacksafe ignores spilled_ptr in stack slots which do not have REG_LIVE_READ marks. Since this is being checked in the 'old' explored state, it must have already done clean_live_states for this old bpf_func_state. Hence, it won't be receiving any more liveness marks from to be explored insns (it has received REG_LIVE_DONE marking from liveness point of view). What this means is that verifier considers that it's safe to not compare the stack slot if was never read by children states. While liveness marks are usually propagated correctly following the parentage chain for spilled registers (SCALAR_VALUE and PTR_* types), the same is not the case for STACK_DYNPTR. clean_live_states hence simply rewrites these stack slots to the type STACK_INVALID since it sees no REG_LIVE_READ marks. The end result is that we will never see STACK_DYNPTR slots in explored state. Even if verifier was conservatively matching !REG_LIVE_READ slots, very next check continuing the stacksafe loop on seeing STACK_INVALID would again prevent further checks. Now as long as verifier stores an explored state which we can compare to when reaching a pruning point, we can abuse this bug to make verifier prune search for obviously unsafe paths using STACK_DYNPTR slots thinking they are never used hence safe. Doing this in unprivileged mode is a bit challenging. add_new_state is only set when seeing BPF_F_TEST_STATE_FREQ (which requires privileges) or when jmps_processed difference is >= 2 and insn_processed difference is >= 8. So coming up with the unprivileged case requires a little more work, but it is still totally possible. The test case being discussed below triggers the heuristic even in unprivileged mode. However, it no longer works since commit 8addbfc7b308 ("bpf: Gate dynptr API behind CAP_BPF"). Let's try to study the test step by step. Consider the following program (C style BPF ASM): 0 r0 = 0; 1 r6 = &ringbuf_map; 3 r1 = r6; 4 r2 = 8; 5 r3 = 0; 6 r4 = r10; 7 r4 -= -16; 8 call bpf_ringbuf_reserve_dynptr; 9 if r0 == 0 goto pc+1; 10 goto pc+1; 11 *(r10 - 16) = 0xeB9F; 12 r1 = r10; 13 r1 -= -16; 14 r2 = 0; 15 call bpf_ringbuf_discard_dynptr; 16 r0 = 0; 17 exit; We know that insn 12 will be a pruning point, hence if we force add_new_state for it, it will first verify the following path as safe in straight line exploration: 0 1 3 4 5 6 7 8 9 -> 10 -> (12) 13 14 15 16 17 Then, when we arrive at insn 12 from the following path: 0 1 3 4 5 6 7 8 9 -> 11 (12) We will find a state that has been verified as safe already at insn 12. Since register state is same at this point, regsafe will pass. Next, in stacksafe, for spi = 0 and spi = 1 (location of our dynptr) is skipped seeing !REG_LIVE_READ. The rest matches, so stacksafe returns true. Next, refsafe is also true as reference state is unchanged in both states. The states are considered equivalent and search is pruned. Hence, we are able to construct a dynptr with arbitrary contents and use the dynptr API to operate on this arbitrary pointer and arbitrary size + offset. To fix this, first define a mark_dynptr_read function that propagates liveness marks whenever a valid initialized dynptr is accessed by dynptr helpers. REG_LIVE_WRITTEN is marked whenever we initialize an uninitialized dynptr. This is done in mark_stack_slots_dynptr. It allows screening off mark_reg_read and not propagating marks upwards from that point. This ensures that we either set REG_LIVE_READ64 on both dynptr slots, or none, so clean_live_states either sets both slots to STACK_INVALID or none of them. This is the invariant the checks inside stacksafe rely on. Next, do a complete comparison of both stack slots whenever they have STACK_DYNPTR. Compare the dynptr type stored in the spilled_ptr, and also whether both form the same first_slot. Only then is the later path safe. Fixes: 97e03f521050 ("bpf: Add verifier support for dynptrs") Acked-by: Eduard Zingerman Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230121002241.2113993-2-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 4 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ca7db2ce70b9..39d8ee38c338 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -781,6 +781,9 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ state->stack[spi - 1].spilled_ptr.ref_obj_id = id; } + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; + return 0; } @@ -805,6 +808,31 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); + + /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot? + * + * While we don't allow reading STACK_INVALID, it is still possible to + * do <8 byte writes marking some but not all slots as STACK_MISC. Then, + * helpers or insns can do partial read of that part without failing, + * but check_stack_range_initialized, check_stack_read_var_off, and + * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of + * the slot conservatively. Hence we need to prevent those liveness + * marking walks. + * + * This was not a problem before because STACK_INVALID is only set by + * default (where the default reg state has its reg->parent as NULL), or + * in clean_live_states after REG_LIVE_DONE (at which point + * mark_reg_read won't walk reg->parent chain), but not randomly during + * verifier state exploration (like we did above). Hence, for our case + * parentage chain will still be live (i.e. reg->parent may be + * non-NULL), while earlier reg->parent was NULL, so we need + * REG_LIVE_WRITTEN to screen off read marker propagation when it is + * done later on reads or by mark_dynptr_read as well to unnecessary + * mark registers in verifier state. + */ + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; + return 0; } @@ -2390,6 +2418,30 @@ static int mark_reg_read(struct bpf_verifier_env *env, return 0; } +static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + struct bpf_func_state *state = func(env, reg); + int spi, ret; + + /* For CONST_PTR_TO_DYNPTR, it must have already been done by + * check_reg_arg in check_helper_call and mark_btf_func_reg_size in + * check_kfunc_call. + */ + if (reg->type == CONST_PTR_TO_DYNPTR) + return 0; + spi = get_spi(reg->off); + /* Caller ensures dynptr is valid and initialized, which means spi is in + * bounds and spi is the first dynptr slot. Simply mark stack slot as + * read. + */ + ret = mark_reg_read(env, &state->stack[spi].spilled_ptr, + state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64); + if (ret) + return ret; + return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr, + state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64); +} + /* This function is supposed to be used by the following 32-bit optimization * code only. It returns TRUE if the source or destination register operates * on 64-bit, otherwise return FALSE. @@ -5977,6 +6029,8 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, meta->uninit_dynptr_regno = regno; } else /* MEM_RDONLY and None case from above */ { + int err; + /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n"); @@ -6010,6 +6064,10 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, err_extra, regno); return -EINVAL; } + + err = mark_dynptr_read(env, reg); + if (err) + return err; } return 0; } @@ -13215,10 +13273,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, return false; if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) continue; - if (!is_spilled_reg(&old->stack[spi])) - continue; - if (!regsafe(env, &old->stack[spi].spilled_ptr, - &cur->stack[spi].spilled_ptr, idmap)) + /* Both old and cur are having same slot_type */ + switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { + case STACK_SPILL: /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. @@ -13229,7 +13286,30 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, * such verifier states are not equivalent. * return false to continue verification of this path */ + if (!regsafe(env, &old->stack[spi].spilled_ptr, + &cur->stack[spi].spilled_ptr, idmap)) + return false; + break; + case STACK_DYNPTR: + { + const struct bpf_reg_state *old_reg, *cur_reg; + + old_reg = &old->stack[spi].spilled_ptr; + cur_reg = &cur->stack[spi].spilled_ptr; + if (old_reg->dynptr.type != cur_reg->dynptr.type || + old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || + !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) + return false; + break; + } + case STACK_MISC: + case STACK_ZERO: + case STACK_INVALID: + continue; + /* Ensure that new unhandled slot types return false by default */ + default: return false; + } } return true; } -- cgit v1.2.3-58-ga151 From 79168a669d8125453c8a271115f1ffd4294e61f6 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 21 Jan 2023 05:52:31 +0530 Subject: bpf: Fix missing var_off check for ARG_PTR_TO_DYNPTR Currently, the dynptr function is not checking the variable offset part of PTR_TO_STACK that it needs to check. The fixed offset is considered when computing the stack pointer index, but if the variable offset was not a constant (such that it could not be accumulated in reg->off), we will end up a discrepency where runtime pointer does not point to the actual stack slot we mark as STACK_DYNPTR. It is impossible to precisely track dynptr state when variable offset is not constant, hence, just like bpf_timer, kptr, bpf_spin_lock, etc. simply reject the case where reg->var_off is not constant. Then, consider both reg->off and reg->var_off.value when computing the stack pointer index. A new helper dynptr_get_spi is introduced to hide over these details since the dynptr needs to be located in multiple places outside the process_dynptr_func checks, hence once we know it's a PTR_TO_STACK, we need to enforce these checks in all places. Note that it is disallowed for unprivileged users to have a non-constant var_off, so this problem should only be possible to trigger from programs having CAP_PERFMON. However, its effects can vary. Without the fix, it is possible to replace the contents of the dynptr arbitrarily by making verifier mark different stack slots than actual location and then doing writes to the actual stack address of dynptr at runtime. Fixes: 97e03f521050 ("bpf: Add verifier support for dynptrs") Acked-by: Joanne Koong Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230121002241.2113993-3-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 84 +++++++++++++++++----- .../selftests/bpf/prog_tests/kfunc_dynptr_param.c | 2 +- tools/testing/selftests/bpf/progs/dynptr_fail.c | 4 +- 3 files changed, 69 insertions(+), 21 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 39d8ee38c338..76afdbea425a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -638,11 +638,34 @@ static void print_liveness(struct bpf_verifier_env *env, verbose(env, "D"); } -static int get_spi(s32 off) +static int __get_spi(s32 off) { return (-off - 1) / BPF_REG_SIZE; } +static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + int off, spi; + + if (!tnum_is_const(reg->var_off)) { + verbose(env, "dynptr has to be at a constant offset\n"); + return -EINVAL; + } + + off = reg->off + reg->var_off.value; + if (off % BPF_REG_SIZE) { + verbose(env, "cannot pass in dynptr at an offset=%d\n", off); + return -EINVAL; + } + + spi = __get_spi(off); + if (spi < 1) { + verbose(env, "cannot pass in dynptr at an offset=%d\n", off); + return -EINVAL; + } + return spi; +} + static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) { int allocated_slots = state->allocated_stack / BPF_REG_SIZE; @@ -754,7 +777,9 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ enum bpf_dynptr_type type; int spi, i, id; - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return spi; if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) return -EINVAL; @@ -792,7 +817,9 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re struct bpf_func_state *state = func(env, reg); int spi, i; - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return spi; if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) return -EINVAL; @@ -844,7 +871,11 @@ static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_ if (reg->type == CONST_PTR_TO_DYNPTR) return false; - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return false; + + /* We will do check_mem_access to check and update stack bounds later */ if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) return true; @@ -860,14 +891,15 @@ static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_ static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { struct bpf_func_state *state = func(env, reg); - int spi; - int i; + int spi, i; /* This already represents first slot of initialized bpf_dynptr */ if (reg->type == CONST_PTR_TO_DYNPTR) return true; - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return false; if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || !state->stack[spi].spilled_ptr.dynptr.first_slot) return false; @@ -896,7 +928,9 @@ static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg if (reg->type == CONST_PTR_TO_DYNPTR) { return reg->dynptr.type == dynptr_type; } else { - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return false; return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; } } @@ -2429,7 +2463,9 @@ static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state * */ if (reg->type == CONST_PTR_TO_DYNPTR) return 0; - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return spi; /* Caller ensures dynptr is valid and initialized, which means spi is in * bounds and spi is the first dynptr slot. Simply mark stack slot as * read. @@ -5992,12 +6028,15 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, } /* CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to * check_func_arg_reg_off's logic. We only need to check offset - * alignment for PTR_TO_STACK. + * and its alignment for PTR_TO_STACK. */ - if (reg->type == PTR_TO_STACK && (reg->off % BPF_REG_SIZE)) { - verbose(env, "cannot pass in dynptr at an offset=%d\n", reg->off); - return -EINVAL; + if (reg->type == PTR_TO_STACK) { + int err = dynptr_get_spi(env, reg); + + if (err < 0) + return err; } + /* MEM_UNINIT - Points to memory that is an appropriate candidate for * constructing a mutable bpf_dynptr object. * @@ -6405,15 +6444,16 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, } } -static u32 dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { struct bpf_func_state *state = func(env, reg); int spi; if (reg->type == CONST_PTR_TO_DYNPTR) return reg->ref_obj_id; - - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return spi; return state->stack[spi].spilled_ptr.ref_obj_id; } @@ -6487,7 +6527,9 @@ skip_type_check: * PTR_TO_STACK. */ if (reg->type == PTR_TO_STACK) { - spi = get_spi(reg->off); + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return spi; if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || !state->stack[spi].spilled_ptr.ref_obj_id) { verbose(env, "arg %d is an unacquired reference\n", regno); @@ -7977,13 +8019,19 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { if (arg_type_is_dynptr(fn->arg_type[i])) { struct bpf_reg_state *reg = ®s[BPF_REG_1 + i]; + int ref_obj_id; if (meta.ref_obj_id) { verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); return -EFAULT; } - meta.ref_obj_id = dynptr_ref_obj_id(env, reg); + ref_obj_id = dynptr_ref_obj_id(env, reg); + if (ref_obj_id < 0) { + verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n"); + return ref_obj_id; + } + meta.ref_obj_id = ref_obj_id; break; } } diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c index a9229260a6ce..72800b1e8395 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c @@ -18,7 +18,7 @@ static struct { const char *expected_verifier_err_msg; int expected_runtime_err; } kfunc_dynptr_tests[] = { - {"not_valid_dynptr", "Expected an initialized dynptr as arg #1", 0}, + {"not_valid_dynptr", "cannot pass in dynptr at an offset=-8", 0}, {"not_ptr_to_stack", "arg#0 expected pointer to stack or dynptr_ptr", 0}, {"dynptr_data_null", NULL, -EBADMSG}, }; diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 78debc1b3820..02d57b95cf6e 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -382,7 +382,7 @@ int invalid_helper1(void *ctx) /* A dynptr can't be passed into a helper function at a non-zero offset */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("cannot pass in dynptr at an offset=-8") int invalid_helper2(void *ctx) { struct bpf_dynptr ptr; @@ -584,7 +584,7 @@ int invalid_read4(void *ctx) /* Initializing a dynptr on an offset should fail */ SEC("?raw_tp") -__failure __msg("invalid write to stack") +__failure __msg("cannot pass in dynptr at an offset=0") int invalid_offset(void *ctx) { struct bpf_dynptr ptr; -- cgit v1.2.3-58-ga151 From ef8fc7a07c0e161841779d6fe3f6acd5a05c547c Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 21 Jan 2023 05:52:32 +0530 Subject: bpf: Fix partial dynptr stack slot reads/writes Currently, while reads are disallowed for dynptr stack slots, writes are not. Reads don't work from both direct access and helpers, while writes do work in both cases, but have the effect of overwriting the slot_type. While this is fine, handling for a few edge cases is missing. Firstly, a user can overwrite the stack slots of dynptr partially. Consider the following layout: spi: [d][d][?] 2 1 0 First slot is at spi 2, second at spi 1. Now, do a write of 1 to 8 bytes for spi 1. This will essentially either write STACK_MISC for all slot_types or STACK_MISC and STACK_ZERO (in case of size < BPF_REG_SIZE partial write of zeroes). The end result is that slot is scrubbed. Now, the layout is: spi: [d][m][?] 2 1 0 Suppose if user initializes spi = 1 as dynptr. We get: spi: [d][d][d] 2 1 0 But this time, both spi 2 and spi 1 have first_slot = true. Now, when passing spi 2 to dynptr helper, it will consider it as initialized as it does not check whether second slot has first_slot == false. And spi 1 should already work as normal. This effectively replaced size + offset of first dynptr, hence allowing invalid OOB reads and writes. Make a few changes to protect against this: When writing to PTR_TO_STACK using BPF insns, when we touch spi of a STACK_DYNPTR type, mark both first and second slot (regardless of which slot we touch) as STACK_INVALID. Reads are already prevented. Second, prevent writing to stack memory from helpers if the range may contain any STACK_DYNPTR slots. Reads are already prevented. For helpers, we cannot allow it to destroy dynptrs from the writes as depending on arguments, helper may take uninit_mem and dynptr both at the same time. This would mean that helper may write to uninit_mem before it reads the dynptr, which would be bad. PTR_TO_MEM: [?????dd] Depending on the code inside the helper, it may end up overwriting the dynptr contents first and then read those as the dynptr argument. Verifier would only simulate destruction when it does byte by byte access simulation in check_helper_call for meta.access_size, and fail to catch this case, as it happens after argument checks. The same would need to be done for any other non-trivial objects created on the stack in the future, such as bpf_list_head on stack, or bpf_rb_root on stack. A common misunderstanding in the current code is that MEM_UNINIT means writes, but note that writes may also be performed even without MEM_UNINIT in case of helpers, in that case the code after handling meta && meta->raw_mode will complain when it sees STACK_DYNPTR. So that invalid read case also covers writes to potential STACK_DYNPTR slots. The only loophole was in case of meta->raw_mode which simulated writes through instructions which could overwrite them. A future series sequenced after this will focus on the clean up of helper access checks and bugs around that. Fixes: 97e03f521050 ("bpf: Add verifier support for dynptrs") Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230121002241.2113993-4-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 88 +++++++++++++++++++++++++ tools/testing/selftests/bpf/progs/dynptr_fail.c | 6 +- 2 files changed, 91 insertions(+), 3 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 76afdbea425a..5c7f29ca94ec 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -769,6 +769,8 @@ static void mark_dynptr_cb_reg(struct bpf_reg_state *reg, __mark_dynptr_reg(reg, type, true); } +static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, + struct bpf_func_state *state, int spi); static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, enum bpf_arg_type arg_type, int insn_idx) @@ -863,6 +865,55 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re return 0; } +static void __mark_reg_unknown(const struct bpf_verifier_env *env, + struct bpf_reg_state *reg); + +static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, + struct bpf_func_state *state, int spi) +{ + int i; + + /* We always ensure that STACK_DYNPTR is never set partially, + * hence just checking for slot_type[0] is enough. This is + * different for STACK_SPILL, where it may be only set for + * 1 byte, so code has to use is_spilled_reg. + */ + if (state->stack[spi].slot_type[0] != STACK_DYNPTR) + return 0; + + /* Reposition spi to first slot */ + if (!state->stack[spi].spilled_ptr.dynptr.first_slot) + spi = spi + 1; + + if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { + verbose(env, "cannot overwrite referenced dynptr\n"); + return -EINVAL; + } + + mark_stack_slot_scratched(env, spi); + mark_stack_slot_scratched(env, spi - 1); + + /* Writing partially to one dynptr stack slot destroys both. */ + for (i = 0; i < BPF_REG_SIZE; i++) { + state->stack[spi].slot_type[i] = STACK_INVALID; + state->stack[spi - 1].slot_type[i] = STACK_INVALID; + } + + /* TODO: Invalidate any slices associated with this dynptr */ + + /* Do not release reference state, we are destroying dynptr on stack, + * not using some helper to release it. Just reset register. + */ + __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); + __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); + + /* Same reason as unmark_stack_slots_dynptr above */ + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; + + return 0; +} + static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { struct bpf_func_state *state = func(env, reg); @@ -3391,6 +3442,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, env->insn_aux_data[insn_idx].sanitize_stack_spill = true; } + err = destroy_if_dynptr_stack_slot(env, state, spi); + if (err) + return err; + mark_stack_slot_scratched(env, spi); if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) { @@ -3504,6 +3559,14 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, if (err) return err; + for (i = min_off; i < max_off; i++) { + int spi; + + spi = __get_spi(i); + err = destroy_if_dynptr_stack_slot(env, state, spi); + if (err) + return err; + } /* Variable offset writes destroy any spilled pointers in range. */ for (i = min_off; i < max_off; i++) { @@ -5531,6 +5594,31 @@ static int check_stack_range_initialized( } if (meta && meta->raw_mode) { + /* Ensure we won't be overwriting dynptrs when simulating byte + * by byte access in check_helper_call using meta.access_size. + * This would be a problem if we have a helper in the future + * which takes: + * + * helper(uninit_mem, len, dynptr) + * + * Now, uninint_mem may overlap with dynptr pointer. Hence, it + * may end up writing to dynptr itself when touching memory from + * arg 1. This can be relaxed on a case by case basis for known + * safe cases, but reject due to the possibilitiy of aliasing by + * default. + */ + for (i = min_off; i < max_off + access_size; i++) { + int stack_off = -i - 1; + + spi = __get_spi(i); + /* raw_mode may write past allocated_stack */ + if (state->allocated_stack <= stack_off) + continue; + if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { + verbose(env, "potential write to dynptr at off=%d disallowed\n", i); + return -EACCES; + } + } meta->access_size = access_size; meta->regno = regno; return 0; diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 02d57b95cf6e..9dc3f23a8270 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -420,7 +420,7 @@ int invalid_write1(void *ctx) * offset */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("cannot overwrite referenced dynptr") int invalid_write2(void *ctx) { struct bpf_dynptr ptr; @@ -444,7 +444,7 @@ int invalid_write2(void *ctx) * non-const offset */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("cannot overwrite referenced dynptr") int invalid_write3(void *ctx) { struct bpf_dynptr ptr; @@ -476,7 +476,7 @@ static int invalid_write4_callback(__u32 index, void *data) * be invalidated as a dynptr */ SEC("?raw_tp") -__failure __msg("arg 1 is an unacquired reference") +__failure __msg("cannot overwrite referenced dynptr") int invalid_write4(void *ctx) { struct bpf_dynptr ptr; -- cgit v1.2.3-58-ga151 From f8064ab90d6644bc8338d2d7ff6a0d6e7a1b2ef3 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 21 Jan 2023 05:52:33 +0530 Subject: bpf: Invalidate slices on destruction of dynptrs on stack The previous commit implemented destroy_if_dynptr_stack_slot. It destroys the dynptr which given spi belongs to, but still doesn't invalidate the slices that belong to such a dynptr. While for the case of referenced dynptr, we don't allow their overwrite and return an error early, we still allow it and destroy the dynptr for unreferenced dynptr. To be able to enable precise and scoped invalidation of dynptr slices in this case, we must be able to associate the source dynptr of slices that have been obtained using bpf_dynptr_data. When doing destruction, only slices belonging to the dynptr being destructed should be invalidated, and nothing else. Currently, dynptr slices belonging to different dynptrs are indistinguishible. Hence, allocate a unique id to each dynptr (CONST_PTR_TO_DYNPTR and those on stack). This will be stored as part of reg->id. Whenever using bpf_dynptr_data, transfer this unique dynptr id to the returned PTR_TO_MEM_OR_NULL slice pointer, and store it in a new per-PTR_TO_MEM dynptr_id register state member. Finally, after establishing such a relationship between dynptrs and their slices, implement precise invalidation logic that only invalidates slices belong to the destroyed dynptr in destroy_if_dynptr_stack_slot. Acked-by: Joanne Koong Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230121002241.2113993-5-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 5 +- kernel/bpf/verifier.c | 74 +++++++++++++++++++++---- tools/testing/selftests/bpf/progs/dynptr_fail.c | 4 +- 3 files changed, 68 insertions(+), 15 deletions(-) (limited to 'kernel/bpf') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 127058cfec47..aa83de1fe755 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -70,7 +70,10 @@ struct bpf_reg_state { u32 btf_id; }; - u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ + struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ + u32 mem_size; + u32 dynptr_id; /* for dynptr slices */ + }; /* For dynptr stack slots */ struct { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5c7f29ca94ec..01cb802776fd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -255,6 +255,7 @@ struct bpf_call_arg_meta { int mem_size; u64 msize_max_value; int ref_obj_id; + int dynptr_id; int map_uid; int func_id; struct btf *btf; @@ -750,23 +751,27 @@ static bool dynptr_type_refcounted(enum bpf_dynptr_type type) static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type, - bool first_slot); + bool first_slot, int dynptr_id); static void __mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg); -static void mark_dynptr_stack_regs(struct bpf_reg_state *sreg1, +static void mark_dynptr_stack_regs(struct bpf_verifier_env *env, + struct bpf_reg_state *sreg1, struct bpf_reg_state *sreg2, enum bpf_dynptr_type type) { - __mark_dynptr_reg(sreg1, type, true); - __mark_dynptr_reg(sreg2, type, false); + int id = ++env->id_gen; + + __mark_dynptr_reg(sreg1, type, true, id); + __mark_dynptr_reg(sreg2, type, false, id); } -static void mark_dynptr_cb_reg(struct bpf_reg_state *reg, +static void mark_dynptr_cb_reg(struct bpf_verifier_env *env, + struct bpf_reg_state *reg, enum bpf_dynptr_type type) { - __mark_dynptr_reg(reg, type, true); + __mark_dynptr_reg(reg, type, true, ++env->id_gen); } static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, @@ -795,7 +800,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ if (type == BPF_DYNPTR_TYPE_INVALID) return -EINVAL; - mark_dynptr_stack_regs(&state->stack[spi].spilled_ptr, + mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, &state->stack[spi - 1].spilled_ptr, type); if (dynptr_type_refcounted(type)) { @@ -871,7 +876,9 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env, static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi) { - int i; + struct bpf_func_state *fstate; + struct bpf_reg_state *dreg; + int i, dynptr_id; /* We always ensure that STACK_DYNPTR is never set partially, * hence just checking for slot_type[0] is enough. This is @@ -899,7 +906,19 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, state->stack[spi - 1].slot_type[i] = STACK_INVALID; } - /* TODO: Invalidate any slices associated with this dynptr */ + dynptr_id = state->stack[spi].spilled_ptr.id; + /* Invalidate any slices associated with this dynptr */ + bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ + /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */ + if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) + continue; + if (dreg->dynptr_id == dynptr_id) { + if (!env->allow_ptr_leaks) + __mark_reg_not_init(env, dreg); + else + __mark_reg_unknown(env, dreg); + } + })); /* Do not release reference state, we are destroying dynptr on stack, * not using some helper to release it. Just reset register. @@ -1562,7 +1581,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env, } static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type, - bool first_slot) + bool first_slot, int dynptr_id) { /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply @@ -1570,6 +1589,8 @@ static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type ty */ __mark_reg_known_zero(reg); reg->type = CONST_PTR_TO_DYNPTR; + /* Give each dynptr a unique id to uniquely associate slices to it. */ + reg->id = dynptr_id; reg->dynptr.type = type; reg->dynptr.first_slot = first_slot; } @@ -6532,6 +6553,19 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, } } +static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + struct bpf_func_state *state = func(env, reg); + int spi; + + if (reg->type == CONST_PTR_TO_DYNPTR) + return reg->id; + spi = dynptr_get_spi(env, reg); + if (spi < 0) + return spi; + return state->stack[spi].spilled_ptr.id; +} + static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { struct bpf_func_state *state = func(env, reg); @@ -7601,7 +7635,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx); */ __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); - mark_dynptr_cb_reg(&callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); + mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; /* unused */ @@ -8107,18 +8141,31 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { if (arg_type_is_dynptr(fn->arg_type[i])) { struct bpf_reg_state *reg = ®s[BPF_REG_1 + i]; - int ref_obj_id; + int id, ref_obj_id; + + if (meta.dynptr_id) { + verbose(env, "verifier internal error: meta.dynptr_id already set\n"); + return -EFAULT; + } if (meta.ref_obj_id) { verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); return -EFAULT; } + id = dynptr_id(env, reg); + if (id < 0) { + verbose(env, "verifier internal error: failed to obtain dynptr id\n"); + return id; + } + ref_obj_id = dynptr_ref_obj_id(env, reg); if (ref_obj_id < 0) { verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n"); return ref_obj_id; } + + meta.dynptr_id = id; meta.ref_obj_id = ref_obj_id; break; } @@ -8275,6 +8322,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -EFAULT; } + if (is_dynptr_ref_function(func_id)) + regs[BPF_REG_0].dynptr_id = meta.dynptr_id; + if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) { /* For release_reference() */ regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 9dc3f23a8270..e43000c63c66 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -67,7 +67,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr) * bpf_ringbuf_submit/discard_dynptr call */ SEC("?raw_tp") -__failure __msg("Unreleased reference id=1") +__failure __msg("Unreleased reference id=2") int ringbuf_missing_release1(void *ctx) { struct bpf_dynptr ptr; @@ -80,7 +80,7 @@ int ringbuf_missing_release1(void *ctx) } SEC("?raw_tp") -__failure __msg("Unreleased reference id=2") +__failure __msg("Unreleased reference id=4") int ringbuf_missing_release2(void *ctx) { struct bpf_dynptr ptr1, ptr2; -- cgit v1.2.3-58-ga151 From 379d4ba831cfa895d0cc61d88cd0e1402f35818c Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 21 Jan 2023 05:52:34 +0530 Subject: bpf: Allow reinitializing unreferenced dynptr stack slots Consider a program like below: void prog(void) { { struct bpf_dynptr ptr; bpf_dynptr_from_mem(...); } ... { struct bpf_dynptr ptr; bpf_dynptr_from_mem(...); } } Here, the C compiler based on lifetime rules in the C standard would be well within in its rights to share stack storage for dynptr 'ptr' as their lifetimes do not overlap in the two distinct scopes. Currently, such an example would be rejected by the verifier, but this is too strict. Instead, we should allow reinitializing over dynptr stack slots and forget information about the old dynptr object. The destroy_if_dynptr_stack_slot function already makes necessary checks to avoid overwriting referenced dynptr slots. This is done to present a better error message instead of forgetting dynptr information on stack and preserving reference state, leading to an inevitable but undecipherable error at the end about an unreleased reference which has to be associated back to its allocating call instruction to make any sense to the user. Acked-by: Joanne Koong Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230121002241.2113993-6-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 01cb802776fd..e5745b696bfe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -782,7 +782,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ { struct bpf_func_state *state = func(env, reg); enum bpf_dynptr_type type; - int spi, i, id; + int spi, i, id, err; spi = dynptr_get_spi(env, reg); if (spi < 0) @@ -791,6 +791,22 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) return -EINVAL; + /* We cannot assume both spi and spi - 1 belong to the same dynptr, + * hence we need to call destroy_if_dynptr_stack_slot twice for both, + * to ensure that for the following example: + * [d1][d1][d2][d2] + * spi 3 2 1 0 + * So marking spi = 2 should lead to destruction of both d1 and d2. In + * case they do belong to same dynptr, second call won't see slot_type + * as STACK_DYNPTR and will simply skip destruction. + */ + err = destroy_if_dynptr_stack_slot(env, state, spi); + if (err) + return err; + err = destroy_if_dynptr_stack_slot(env, state, spi - 1); + if (err) + return err; + for (i = 0; i < BPF_REG_SIZE; i++) { state->stack[spi].slot_type[i] = STACK_DYNPTR; state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; @@ -936,7 +952,7 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { struct bpf_func_state *state = func(env, reg); - int spi, i; + int spi; if (reg->type == CONST_PTR_TO_DYNPTR) return false; @@ -949,12 +965,14 @@ static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_ if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) return true; - for (i = 0; i < BPF_REG_SIZE; i++) { - if (state->stack[spi].slot_type[i] == STACK_DYNPTR || - state->stack[spi - 1].slot_type[i] == STACK_DYNPTR) - return false; - } - + /* We allow overwriting existing unreferenced STACK_DYNPTR slots, see + * mark_stack_slots_dynptr which calls destroy_if_dynptr_stack_slot to + * ensure dynptr objects at the slots we are touching are completely + * destructed before we reinitialize them for a new one. For referenced + * ones, destroy_if_dynptr_stack_slot returns an error early instead of + * delaying it until the end where the user will get "Unreleased + * reference" error. + */ return true; } -- cgit v1.2.3-58-ga151 From f5b625e5f8bbc6be8bb568a64d7906b091bc7cb0 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 21 Jan 2023 05:52:35 +0530 Subject: bpf: Combine dynptr_get_spi and is_spi_bounds_valid Currently, a check on spi resides in dynptr_get_spi, while others checking its validity for being within the allocated stack slots happens in is_spi_bounds_valid. Almost always barring a couple of cases (where being beyond allocated stack slots is not an error as stack slots need to be populated), both are used together to make checks. Hence, subsume the is_spi_bounds_valid check in dynptr_get_spi, and return -ERANGE to specially distinguish the case where spi is valid but not within allocated slots in the stack state. The is_spi_bounds_valid function is still kept around as it is a generic helper that will be useful for other objects on stack similar to dynptr in the future. Suggested-by: Joanne Koong Acked-by: Joanne Koong Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230121002241.2113993-7-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 75 +++++++++++++++++++++++---------------------------- 1 file changed, 33 insertions(+), 42 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e5745b696bfe..29cbb3ef35e2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -644,6 +644,28 @@ static int __get_spi(s32 off) return (-off - 1) / BPF_REG_SIZE; } +static struct bpf_func_state *func(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg) +{ + struct bpf_verifier_state *cur = env->cur_state; + + return cur->frame[reg->frameno]; +} + +static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) +{ + int allocated_slots = state->allocated_stack / BPF_REG_SIZE; + + /* We need to check that slots between [spi - nr_slots + 1, spi] are + * within [0, allocated_stack). + * + * Please note that the spi grows downwards. For example, a dynptr + * takes the size of two stack slots; the first slot will be at + * spi and the second slot will be at spi - 1. + */ + return spi - nr_slots + 1 >= 0 && spi < allocated_slots; +} + static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { int off, spi; @@ -664,29 +686,10 @@ static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *re verbose(env, "cannot pass in dynptr at an offset=%d\n", off); return -EINVAL; } - return spi; -} - -static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) -{ - int allocated_slots = state->allocated_stack / BPF_REG_SIZE; - /* We need to check that slots between [spi - nr_slots + 1, spi] are - * within [0, allocated_stack). - * - * Please note that the spi grows downwards. For example, a dynptr - * takes the size of two stack slots; the first slot will be at - * spi and the second slot will be at spi - 1. - */ - return spi - nr_slots + 1 >= 0 && spi < allocated_slots; -} - -static struct bpf_func_state *func(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg) -{ - struct bpf_verifier_state *cur = env->cur_state; - - return cur->frame[reg->frameno]; + if (!is_spi_bounds_valid(func(env, reg), spi, BPF_DYNPTR_NR_SLOTS)) + return -ERANGE; + return spi; } static const char *kernel_type_name(const struct btf* btf, u32 id) @@ -788,9 +791,6 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ if (spi < 0) return spi; - if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) - return -EINVAL; - /* We cannot assume both spi and spi - 1 belong to the same dynptr, * hence we need to call destroy_if_dynptr_stack_slot twice for both, * to ensure that for the following example: @@ -844,9 +844,6 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re if (spi < 0) return spi; - if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) - return -EINVAL; - for (i = 0; i < BPF_REG_SIZE; i++) { state->stack[spi].slot_type[i] = STACK_INVALID; state->stack[spi - 1].slot_type[i] = STACK_INVALID; @@ -951,20 +948,18 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { - struct bpf_func_state *state = func(env, reg); int spi; if (reg->type == CONST_PTR_TO_DYNPTR) return false; spi = dynptr_get_spi(env, reg); + /* For -ERANGE (i.e. spi not falling into allocated stack slots), we + * will do check_mem_access to check and update stack bounds later, so + * return true for that case. + */ if (spi < 0) - return false; - - /* We will do check_mem_access to check and update stack bounds later */ - if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) - return true; - + return spi == -ERANGE; /* We allow overwriting existing unreferenced STACK_DYNPTR slots, see * mark_stack_slots_dynptr which calls destroy_if_dynptr_stack_slot to * ensure dynptr objects at the slots we are touching are completely @@ -988,8 +983,7 @@ static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_re spi = dynptr_get_spi(env, reg); if (spi < 0) return false; - if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || - !state->stack[spi].spilled_ptr.dynptr.first_slot) + if (!state->stack[spi].spilled_ptr.dynptr.first_slot) return false; for (i = 0; i < BPF_REG_SIZE; i++) { @@ -6160,7 +6154,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, if (reg->type == PTR_TO_STACK) { int err = dynptr_get_spi(env, reg); - if (err < 0) + if (err < 0 && err != -ERANGE) return err; } @@ -6668,10 +6662,7 @@ skip_type_check: */ if (reg->type == PTR_TO_STACK) { spi = dynptr_get_spi(env, reg); - if (spi < 0) - return spi; - if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || - !state->stack[spi].spilled_ptr.ref_obj_id) { + if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { verbose(env, "arg %d is an unacquired reference\n", regno); return -EINVAL; } -- cgit v1.2.3-58-ga151 From 1ee72bcbe48de6dcfa44d6eba0aec6e42d04cd4d Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 21 Jan 2023 05:52:36 +0530 Subject: bpf: Avoid recomputing spi in process_dynptr_func Currently, process_dynptr_func first calls dynptr_get_spi and then is_dynptr_reg_valid_init and is_dynptr_reg_valid_uninit have to call it again to obtain the spi value. Instead of doing this twice, reuse the already obtained value (which is by default 0, and is only set for PTR_TO_STACK, and only used in that case in aforementioned functions). The input value for these two functions will either be -ERANGE or >= 1, and can either be permitted or rejected based on the respective check. Suggested-by: Joanne Koong Acked-by: Joanne Koong Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230121002241.2113993-8-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 29cbb3ef35e2..ecf7fed7881c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -946,14 +946,12 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, return 0; } -static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + int spi) { - int spi; - if (reg->type == CONST_PTR_TO_DYNPTR) return false; - spi = dynptr_get_spi(env, reg); /* For -ERANGE (i.e. spi not falling into allocated stack slots), we * will do check_mem_access to check and update stack bounds later, so * return true for that case. @@ -971,16 +969,16 @@ static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_ return true; } -static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + int spi) { struct bpf_func_state *state = func(env, reg); - int spi, i; + int i; /* This already represents first slot of initialized bpf_dynptr */ if (reg->type == CONST_PTR_TO_DYNPTR) return true; - spi = dynptr_get_spi(env, reg); if (spi < 0) return false; if (!state->stack[spi].spilled_ptr.dynptr.first_slot) @@ -6139,6 +6137,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + int spi = 0; /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): @@ -6152,10 +6151,9 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, * and its alignment for PTR_TO_STACK. */ if (reg->type == PTR_TO_STACK) { - int err = dynptr_get_spi(env, reg); - - if (err < 0 && err != -ERANGE) - return err; + spi = dynptr_get_spi(env, reg); + if (spi < 0 && spi != -ERANGE) + return spi; } /* MEM_UNINIT - Points to memory that is an appropriate candidate for @@ -6174,7 +6172,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, * to. */ if (arg_type & MEM_UNINIT) { - if (!is_dynptr_reg_valid_uninit(env, reg)) { + if (!is_dynptr_reg_valid_uninit(env, reg, spi)) { verbose(env, "Dynptr has to be an uninitialized dynptr\n"); return -EINVAL; } @@ -6197,7 +6195,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, return -EINVAL; } - if (!is_dynptr_reg_valid_init(env, reg)) { + if (!is_dynptr_reg_valid_init(env, reg, spi)) { verbose(env, "Expected an initialized dynptr as arg #%d\n", regno); -- cgit v1.2.3-58-ga151 From 9d03ebc71a027ca495c60f6e94d3cda81921791f Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 19 Jan 2023 14:15:21 -0800 Subject: bpf: Rename bpf_{prog,map}_is_dev_bound to is_offloaded BPF offloading infra will be reused to implement bound-but-not-offloaded bpf programs. Rename existing helpers for clarity. No functional changes. Cc: John Fastabend Cc: David Ahern Cc: Martin KaFai Lau Cc: Willem de Bruijn Cc: Jesper Dangaard Brouer Cc: Anatoly Burakov Cc: Alexander Lobakin Cc: Magnus Karlsson Cc: Maryam Tahhan Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Reviewed-by: Jakub Kicinski Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230119221536.3349901-3-sdf@google.com Signed-off-by: Martin KaFai Lau --- include/linux/bpf.h | 8 ++++---- kernel/bpf/core.c | 4 ++-- kernel/bpf/offload.c | 4 ++-- kernel/bpf/syscall.c | 22 +++++++++++----------- kernel/bpf/verifier.c | 18 +++++++++--------- net/core/dev.c | 4 ++-- net/core/filter.c | 2 +- 7 files changed, 31 insertions(+), 31 deletions(-) (limited to 'kernel/bpf') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ae7771c7d750..1bb525c0130e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2481,12 +2481,12 @@ void unpriv_ebpf_notify(int new_state); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); -static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) +static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) { return aux->offload_requested; } -static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +static inline bool bpf_map_is_offloaded(struct bpf_map *map) { return unlikely(map->ops == &bpf_map_offload_ops); } @@ -2513,12 +2513,12 @@ static inline int bpf_prog_offload_init(struct bpf_prog *prog, return -EOPNOTSUPP; } -static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) +static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) { return false; } -static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +static inline bool bpf_map_is_offloaded(struct bpf_map *map) { return false; } diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ba3fff17e2f9..515f4f08591c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2182,7 +2182,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) * valid program, which in this case would simply not * be JITed, but falls back to the interpreter. */ - if (!bpf_prog_is_dev_bound(fp->aux)) { + if (!bpf_prog_is_offloaded(fp->aux)) { *err = bpf_prog_alloc_jited_linfo(fp); if (*err) return fp; @@ -2553,7 +2553,7 @@ static void bpf_prog_free_deferred(struct work_struct *work) #endif bpf_free_used_maps(aux); bpf_free_used_btfs(aux); - if (bpf_prog_is_dev_bound(aux)) + if (bpf_prog_is_offloaded(aux)) bpf_prog_offload_destroy(aux->prog); #ifdef CONFIG_PERF_EVENTS if (aux->prog->has_callchain_buf) diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 13e4efc971e6..f5769a8ecbee 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -549,7 +549,7 @@ static bool __bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_offload_netdev *ondev1, *ondev2; struct bpf_prog_offload *offload; - if (!bpf_prog_is_dev_bound(prog->aux)) + if (!bpf_prog_is_offloaded(prog->aux)) return false; offload = prog->aux->offload; @@ -581,7 +581,7 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) struct bpf_offloaded_map *offmap; bool ret; - if (!bpf_map_is_dev_bound(map)) + if (!bpf_map_is_offloaded(map)) return bpf_map_offload_neutral(map); offmap = map_to_offmap(map); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 35ffd808f281..5e90b697f908 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -181,7 +181,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, int err; /* Need to create a kthread, thus must support schedule */ - if (bpf_map_is_dev_bound(map)) { + if (bpf_map_is_offloaded(map)) { return bpf_map_offload_update_elem(map, key, value, flags); } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { @@ -238,7 +238,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, void *ptr; int err; - if (bpf_map_is_dev_bound(map)) + if (bpf_map_is_offloaded(map)) return bpf_map_offload_lookup_elem(map, key, value); bpf_disable_instrumentation(); @@ -1483,7 +1483,7 @@ static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) goto err_put; } - if (bpf_map_is_dev_bound(map)) { + if (bpf_map_is_offloaded(map)) { err = bpf_map_offload_delete_elem(map, key); goto out; } else if (IS_FD_PROG_ARRAY(map) || @@ -1547,7 +1547,7 @@ static int map_get_next_key(union bpf_attr *attr) if (!next_key) goto free_key; - if (bpf_map_is_dev_bound(map)) { + if (bpf_map_is_offloaded(map)) { err = bpf_map_offload_get_next_key(map, key, next_key); goto out; } @@ -1605,7 +1605,7 @@ int generic_map_delete_batch(struct bpf_map *map, map->key_size)) break; - if (bpf_map_is_dev_bound(map)) { + if (bpf_map_is_offloaded(map)) { err = bpf_map_offload_delete_elem(map, key); break; } @@ -1851,7 +1851,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { - if (!bpf_map_is_dev_bound(map)) { + if (!bpf_map_is_offloaded(map)) { bpf_disable_instrumentation(); rcu_read_lock(); err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); @@ -1944,7 +1944,7 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) if (!ops) return -EINVAL; - if (!bpf_prog_is_dev_bound(prog->aux)) + if (!bpf_prog_is_offloaded(prog->aux)) prog->aux->ops = ops; else prog->aux->ops = &bpf_offload_prog_ops; @@ -2255,7 +2255,7 @@ bool bpf_prog_get_ok(struct bpf_prog *prog, if (prog->type != *attach_type) return false; - if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) + if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) return false; return true; @@ -2598,7 +2598,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) atomic64_set(&prog->aux->refcnt, 1); prog->gpl_compatible = is_gpl ? 1 : 0; - if (bpf_prog_is_dev_bound(prog->aux)) { + if (bpf_prog_is_offloaded(prog->aux)) { err = bpf_prog_offload_init(prog, attr); if (err) goto free_prog_sec; @@ -3997,7 +3997,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, return -EFAULT; } - if (bpf_prog_is_dev_bound(prog->aux)) { + if (bpf_prog_is_offloaded(prog->aux)) { err = bpf_prog_offload_info_fill(&info, prog); if (err) return err; @@ -4225,7 +4225,7 @@ static int bpf_map_get_info_by_fd(struct file *file, } info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; - if (bpf_map_is_dev_bound(map)) { + if (bpf_map_is_offloaded(map)) { err = bpf_map_offload_info_fill(&info, map); if (err) return err; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ecf7fed7881c..bba68eefb4b2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -14099,7 +14099,7 @@ static int do_check(struct bpf_verifier_env *env) env->prev_log_len = env->log.len_used; } - if (bpf_prog_is_dev_bound(env->prog->aux)) { + if (bpf_prog_is_offloaded(env->prog->aux)) { err = bpf_prog_offload_verify_insn(env, env->insn_idx, env->prev_insn_idx); if (err) @@ -14579,7 +14579,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, } } - if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && + if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); return -EINVAL; @@ -15060,7 +15060,7 @@ static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) unsigned int orig_prog_len = env->prog->len; int err; - if (bpf_prog_is_dev_bound(env->prog->aux)) + if (bpf_prog_is_offloaded(env->prog->aux)) bpf_prog_offload_remove_insns(env, off, cnt); err = bpf_remove_insns(env->prog, off, cnt); @@ -15141,7 +15141,7 @@ static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) else continue; - if (bpf_prog_is_dev_bound(env->prog->aux)) + if (bpf_prog_is_offloaded(env->prog->aux)) bpf_prog_offload_replace_insn(env, i, &ja); memcpy(insn, &ja, sizeof(ja)); @@ -15328,7 +15328,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } } - if (bpf_prog_is_dev_bound(env->prog->aux)) + if (bpf_prog_is_offloaded(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; @@ -15728,7 +15728,7 @@ static int fixup_call_args(struct bpf_verifier_env *env) int err = 0; if (env->prog->jit_requested && - !bpf_prog_is_dev_bound(env->prog->aux)) { + !bpf_prog_is_offloaded(env->prog->aux)) { err = jit_subprogs(env); if (err == 0) return 0; @@ -17231,7 +17231,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) if (ret < 0) goto skip_full_check; - if (bpf_prog_is_dev_bound(env->prog->aux)) { + if (bpf_prog_is_offloaded(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env->prog); if (ret) goto skip_full_check; @@ -17244,7 +17244,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) ret = do_check_subprogs(env); ret = ret ?: do_check_main(env); - if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) + if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) ret = bpf_prog_offload_finalize(env); skip_full_check: @@ -17279,7 +17279,7 @@ skip_full_check: /* do 32-bit optimization after insn patching has done so those patched * insns could be handled correctly. */ - if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { + if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret : false; diff --git a/net/core/dev.c b/net/core/dev.c index cf78f35bc0b9..a37829de6529 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9224,8 +9224,8 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); return -EEXIST; } - if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) { - NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); + if (!offload && bpf_prog_is_offloaded(new_prog->aux)) { + NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); return -EINVAL; } if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { diff --git a/net/core/filter.c b/net/core/filter.c index b4547a2c02f4..ed08dbf10338 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8760,7 +8760,7 @@ static bool xdp_is_valid_access(int off, int size, } if (type == BPF_WRITE) { - if (bpf_prog_is_dev_bound(prog->aux)) { + if (bpf_prog_is_offloaded(prog->aux)) { switch (off) { case offsetof(struct xdp_md, rx_queue_index): return __is_valid_xdp_access(off, size); -- cgit v1.2.3-58-ga151 From f1fc43d03946d6a2f3dcb05e8c0b874bdf1333c5 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 19 Jan 2023 14:15:22 -0800 Subject: bpf: Move offload initialization into late_initcall So we don't have to initialize it manually from several paths. Cc: John Fastabend Cc: David Ahern Cc: Martin KaFai Lau Cc: Jakub Kicinski Cc: Willem de Bruijn Cc: Jesper Dangaard Brouer Cc: Anatoly Burakov Cc: Alexander Lobakin Cc: Magnus Karlsson Cc: Maryam Tahhan Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230119221536.3349901-4-sdf@google.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/offload.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index f5769a8ecbee..621e8738f304 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -56,7 +56,6 @@ static const struct rhashtable_params offdevs_params = { }; static struct rhashtable offdevs; -static bool offdevs_inited; static int bpf_dev_offload_check(struct net_device *netdev) { @@ -72,8 +71,6 @@ bpf_offload_find_netdev(struct net_device *netdev) { lockdep_assert_held(&bpf_devs_lock); - if (!offdevs_inited) - return NULL; return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); } @@ -673,18 +670,6 @@ struct bpf_offload_dev * bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) { struct bpf_offload_dev *offdev; - int err; - - down_write(&bpf_devs_lock); - if (!offdevs_inited) { - err = rhashtable_init(&offdevs, &offdevs_params); - if (err) { - up_write(&bpf_devs_lock); - return ERR_PTR(err); - } - offdevs_inited = true; - } - up_write(&bpf_devs_lock); offdev = kzalloc(sizeof(*offdev), GFP_KERNEL); if (!offdev) @@ -710,3 +695,10 @@ void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) return offdev->priv; } EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); + +static int __init bpf_offload_init(void) +{ + return rhashtable_init(&offdevs, &offdevs_params); +} + +late_initcall(bpf_offload_init); -- cgit v1.2.3-58-ga151 From 89bbc53a4dbbbdd65962d4bbaeba6c7775ea0bf7 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 19 Jan 2023 14:15:23 -0800 Subject: bpf: Reshuffle some parts of bpf/offload.c To avoid adding forward declarations in the main patch, shuffle some code around. No functional changes. Cc: John Fastabend Cc: David Ahern Cc: Martin KaFai Lau Cc: Jakub Kicinski Cc: Willem de Bruijn Cc: Jesper Dangaard Brouer Cc: Anatoly Burakov Cc: Alexander Lobakin Cc: Magnus Karlsson Cc: Maryam Tahhan Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230119221536.3349901-5-sdf@google.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/offload.c | 222 +++++++++++++++++++++++++++------------------------ 1 file changed, 117 insertions(+), 105 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 621e8738f304..deb06498da0b 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -74,6 +74,121 @@ bpf_offload_find_netdev(struct net_device *netdev) return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); } +static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, + struct net_device *netdev) +{ + struct bpf_offload_netdev *ondev; + int err; + + ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); + if (!ondev) + return -ENOMEM; + + ondev->netdev = netdev; + ondev->offdev = offdev; + INIT_LIST_HEAD(&ondev->progs); + INIT_LIST_HEAD(&ondev->maps); + + down_write(&bpf_devs_lock); + err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); + if (err) { + netdev_warn(netdev, "failed to register for BPF offload\n"); + goto err_unlock_free; + } + + list_add(&ondev->offdev_netdevs, &offdev->netdevs); + up_write(&bpf_devs_lock); + return 0; + +err_unlock_free: + up_write(&bpf_devs_lock); + kfree(ondev); + return err; +} + +static void __bpf_prog_offload_destroy(struct bpf_prog *prog) +{ + struct bpf_prog_offload *offload = prog->aux->offload; + + if (offload->dev_state) + offload->offdev->ops->destroy(prog); + + /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ + bpf_prog_free_id(prog, true); + + list_del_init(&offload->offloads); + kfree(offload); + prog->aux->offload = NULL; +} + +static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, + enum bpf_netdev_command cmd) +{ + struct netdev_bpf data = {}; + struct net_device *netdev; + + ASSERT_RTNL(); + + data.command = cmd; + data.offmap = offmap; + /* Caller must make sure netdev is valid */ + netdev = offmap->netdev; + + return netdev->netdev_ops->ndo_bpf(netdev, &data); +} + +static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) +{ + WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); + /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ + bpf_map_free_id(&offmap->map, true); + list_del_init(&offmap->offloads); + offmap->netdev = NULL; +} + +static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, + struct net_device *netdev) +{ + struct bpf_offload_netdev *ondev, *altdev; + struct bpf_offloaded_map *offmap, *mtmp; + struct bpf_prog_offload *offload, *ptmp; + + ASSERT_RTNL(); + + down_write(&bpf_devs_lock); + ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); + if (WARN_ON(!ondev)) + goto unlock; + + WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); + list_del(&ondev->offdev_netdevs); + + /* Try to move the objects to another netdev of the device */ + altdev = list_first_entry_or_null(&offdev->netdevs, + struct bpf_offload_netdev, + offdev_netdevs); + if (altdev) { + list_for_each_entry(offload, &ondev->progs, offloads) + offload->netdev = altdev->netdev; + list_splice_init(&ondev->progs, &altdev->progs); + + list_for_each_entry(offmap, &ondev->maps, offloads) + offmap->netdev = altdev->netdev; + list_splice_init(&ondev->maps, &altdev->maps); + } else { + list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) + __bpf_prog_offload_destroy(offload->prog); + list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) + __bpf_map_offload_destroy(offmap); + } + + WARN_ON(!list_empty(&ondev->progs)); + WARN_ON(!list_empty(&ondev->maps)); + kfree(ondev); +unlock: + up_write(&bpf_devs_lock); +} + int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) { struct bpf_offload_netdev *ondev; @@ -206,21 +321,6 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) up_read(&bpf_devs_lock); } -static void __bpf_prog_offload_destroy(struct bpf_prog *prog) -{ - struct bpf_prog_offload *offload = prog->aux->offload; - - if (offload->dev_state) - offload->offdev->ops->destroy(prog); - - /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ - bpf_prog_free_id(prog, true); - - list_del_init(&offload->offloads); - kfree(offload); - prog->aux->offload = NULL; -} - void bpf_prog_offload_destroy(struct bpf_prog *prog) { down_write(&bpf_devs_lock); @@ -340,22 +440,6 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info, const struct bpf_prog_ops bpf_offload_prog_ops = { }; -static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, - enum bpf_netdev_command cmd) -{ - struct netdev_bpf data = {}; - struct net_device *netdev; - - ASSERT_RTNL(); - - data.command = cmd; - data.offmap = offmap; - /* Caller must make sure netdev is valid */ - netdev = offmap->netdev; - - return netdev->netdev_ops->ndo_bpf(netdev, &data); -} - struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) { struct net *net = current->nsproxy->net_ns; @@ -405,15 +489,6 @@ err_unlock: return ERR_PTR(err); } -static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) -{ - WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); - /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ - bpf_map_free_id(&offmap->map, true); - list_del_init(&offmap->offloads); - offmap->netdev = NULL; -} - void bpf_map_offload_map_free(struct bpf_map *map) { struct bpf_offloaded_map *offmap = map_to_offmap(map); @@ -592,77 +667,14 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct net_device *netdev) { - struct bpf_offload_netdev *ondev; - int err; - - ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); - if (!ondev) - return -ENOMEM; - - ondev->netdev = netdev; - ondev->offdev = offdev; - INIT_LIST_HEAD(&ondev->progs); - INIT_LIST_HEAD(&ondev->maps); - - down_write(&bpf_devs_lock); - err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); - if (err) { - netdev_warn(netdev, "failed to register for BPF offload\n"); - goto err_unlock_free; - } - - list_add(&ondev->offdev_netdevs, &offdev->netdevs); - up_write(&bpf_devs_lock); - return 0; - -err_unlock_free: - up_write(&bpf_devs_lock); - kfree(ondev); - return err; + return __bpf_offload_dev_netdev_register(offdev, netdev); } EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, struct net_device *netdev) { - struct bpf_offload_netdev *ondev, *altdev; - struct bpf_offloaded_map *offmap, *mtmp; - struct bpf_prog_offload *offload, *ptmp; - - ASSERT_RTNL(); - - down_write(&bpf_devs_lock); - ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); - if (WARN_ON(!ondev)) - goto unlock; - - WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); - list_del(&ondev->offdev_netdevs); - - /* Try to move the objects to another netdev of the device */ - altdev = list_first_entry_or_null(&offdev->netdevs, - struct bpf_offload_netdev, - offdev_netdevs); - if (altdev) { - list_for_each_entry(offload, &ondev->progs, offloads) - offload->netdev = altdev->netdev; - list_splice_init(&ondev->progs, &altdev->progs); - - list_for_each_entry(offmap, &ondev->maps, offloads) - offmap->netdev = altdev->netdev; - list_splice_init(&ondev->maps, &altdev->maps); - } else { - list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) - __bpf_prog_offload_destroy(offload->prog); - list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) - __bpf_map_offload_destroy(offmap); - } - - WARN_ON(!list_empty(&ondev->progs)); - WARN_ON(!list_empty(&ondev->maps)); - kfree(ondev); -unlock: - up_write(&bpf_devs_lock); + __bpf_offload_dev_netdev_unregister(offdev, netdev); } EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); -- cgit v1.2.3-58-ga151 From 2b3486bc2d237ec345b3942b7be5deabf8c8fed1 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 19 Jan 2023 14:15:24 -0800 Subject: bpf: Introduce device-bound XDP programs New flag BPF_F_XDP_DEV_BOUND_ONLY plus all the infra to have a way to associate a netdev with a BPF program at load time. netdevsim checks are dropped in favor of generic check in dev_xdp_attach. Cc: John Fastabend Cc: David Ahern Cc: Martin KaFai Lau Cc: Jakub Kicinski Cc: Willem de Bruijn Cc: Jesper Dangaard Brouer Cc: Anatoly Burakov Cc: Alexander Lobakin Cc: Magnus Karlsson Cc: Maryam Tahhan Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230119221536.3349901-6-sdf@google.com Signed-off-by: Martin KaFai Lau --- drivers/net/netdevsim/bpf.c | 4 -- include/linux/bpf.h | 24 +++++++++-- include/uapi/linux/bpf.h | 5 +++ kernel/bpf/core.c | 4 +- kernel/bpf/offload.c | 95 +++++++++++++++++++++++++++++++----------- kernel/bpf/syscall.c | 9 ++-- net/core/dev.c | 5 +++ tools/include/uapi/linux/bpf.h | 5 +++ 8 files changed, 113 insertions(+), 38 deletions(-) (limited to 'kernel/bpf') diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 50854265864d..f60eb97e3a62 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -315,10 +315,6 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf) NSIM_EA(bpf->extack, "xdpoffload of non-bound program"); return -EINVAL; } - if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) { - NSIM_EA(bpf->extack, "program bound to different dev"); - return -EINVAL; - } state = bpf->prog->aux->offload->dev_priv; if (WARN_ON(strcmp(state->state, "xlated"))) { diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1bb525c0130e..b97a05bb47be 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1261,7 +1261,8 @@ struct bpf_prog_aux { enum bpf_prog_type saved_dst_prog_type; enum bpf_attach_type saved_dst_attach_type; bool verifier_zext; /* Zero extensions has been inserted by verifier. */ - bool offload_requested; + bool dev_bound; /* Program is bound to the netdev. */ + bool offload_requested; /* Program is bound and offloaded to the netdev. */ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ bool func_proto_unreliable; bool sleepable; @@ -2451,7 +2452,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux, bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); int bpf_prog_offload_compile(struct bpf_prog *prog); -void bpf_prog_offload_destroy(struct bpf_prog *prog); +void bpf_prog_dev_bound_destroy(struct bpf_prog *prog); int bpf_prog_offload_info_fill(struct bpf_prog_info *info, struct bpf_prog *prog); @@ -2479,7 +2480,13 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); void unpriv_ebpf_notify(int new_state); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) -int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); +int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); +void bpf_dev_bound_netdev_unregister(struct net_device *dev); + +static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) +{ + return aux->dev_bound; +} static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) { @@ -2507,12 +2514,21 @@ void sock_map_unhash(struct sock *sk); void sock_map_destroy(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); #else -static inline int bpf_prog_offload_init(struct bpf_prog *prog, +static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) { return -EOPNOTSUPP; } +static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) +{ +} + +static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) +{ + return false; +} + static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) { return false; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index adae5b168f9d..ba0f0cfb5e42 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1156,6 +1156,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded + * program becomes device-bound but can access XDP metadata. + */ +#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) + /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 515f4f08591c..1cf19da3c128 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2553,8 +2553,8 @@ static void bpf_prog_free_deferred(struct work_struct *work) #endif bpf_free_used_maps(aux); bpf_free_used_btfs(aux); - if (bpf_prog_is_offloaded(aux)) - bpf_prog_offload_destroy(aux->prog); + if (bpf_prog_is_dev_bound(aux)) + bpf_prog_dev_bound_destroy(aux->prog); #ifdef CONFIG_PERF_EVENTS if (aux->prog->has_callchain_buf) put_callchain_buffers(); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index deb06498da0b..f767455ed732 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -41,7 +41,7 @@ struct bpf_offload_dev { struct bpf_offload_netdev { struct rhash_head l; struct net_device *netdev; - struct bpf_offload_dev *offdev; + struct bpf_offload_dev *offdev; /* NULL when bound-only */ struct list_head progs; struct list_head maps; struct list_head offdev_netdevs; @@ -89,19 +89,17 @@ static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, INIT_LIST_HEAD(&ondev->progs); INIT_LIST_HEAD(&ondev->maps); - down_write(&bpf_devs_lock); err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); if (err) { netdev_warn(netdev, "failed to register for BPF offload\n"); - goto err_unlock_free; + goto err_free; } - list_add(&ondev->offdev_netdevs, &offdev->netdevs); - up_write(&bpf_devs_lock); + if (offdev) + list_add(&ondev->offdev_netdevs, &offdev->netdevs); return 0; -err_unlock_free: - up_write(&bpf_devs_lock); +err_free: kfree(ondev); return err; } @@ -149,24 +147,26 @@ static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, struct net_device *netdev) { - struct bpf_offload_netdev *ondev, *altdev; + struct bpf_offload_netdev *ondev, *altdev = NULL; struct bpf_offloaded_map *offmap, *mtmp; struct bpf_prog_offload *offload, *ptmp; ASSERT_RTNL(); - down_write(&bpf_devs_lock); ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); if (WARN_ON(!ondev)) - goto unlock; + return; WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); - list_del(&ondev->offdev_netdevs); /* Try to move the objects to another netdev of the device */ - altdev = list_first_entry_or_null(&offdev->netdevs, - struct bpf_offload_netdev, - offdev_netdevs); + if (offdev) { + list_del(&ondev->offdev_netdevs); + altdev = list_first_entry_or_null(&offdev->netdevs, + struct bpf_offload_netdev, + offdev_netdevs); + } + if (altdev) { list_for_each_entry(offload, &ondev->progs, offloads) offload->netdev = altdev->netdev; @@ -185,11 +185,9 @@ static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, WARN_ON(!list_empty(&ondev->progs)); WARN_ON(!list_empty(&ondev->maps)); kfree(ondev); -unlock: - up_write(&bpf_devs_lock); } -int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) +int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) { struct bpf_offload_netdev *ondev; struct bpf_prog_offload *offload; @@ -199,7 +197,11 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) attr->prog_type != BPF_PROG_TYPE_XDP) return -EINVAL; - if (attr->prog_flags) + if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY) + return -EINVAL; + + if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && + attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) return -EINVAL; offload = kzalloc(sizeof(*offload), GFP_USER); @@ -214,11 +216,23 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) if (err) goto err_maybe_put; + prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); + down_write(&bpf_devs_lock); ondev = bpf_offload_find_netdev(offload->netdev); if (!ondev) { - err = -EINVAL; - goto err_unlock; + if (bpf_prog_is_offloaded(prog->aux)) { + err = -EINVAL; + goto err_unlock; + } + + /* When only binding to the device, explicitly + * create an entry in the hashtable. + */ + err = __bpf_offload_dev_netdev_register(NULL, offload->netdev); + if (err) + goto err_unlock; + ondev = bpf_offload_find_netdev(offload->netdev); } offload->offdev = ondev->offdev; prog->aux->offload = offload; @@ -321,12 +335,25 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) up_read(&bpf_devs_lock); } -void bpf_prog_offload_destroy(struct bpf_prog *prog) +void bpf_prog_dev_bound_destroy(struct bpf_prog *prog) { + struct bpf_offload_netdev *ondev; + struct net_device *netdev; + + rtnl_lock(); down_write(&bpf_devs_lock); - if (prog->aux->offload) + if (prog->aux->offload) { + list_del_init(&prog->aux->offload->offloads); + + netdev = prog->aux->offload->netdev; __bpf_prog_offload_destroy(prog); + + ondev = bpf_offload_find_netdev(netdev); + if (!ondev->offdev && list_empty(&ondev->progs)) + __bpf_offload_dev_netdev_unregister(NULL, netdev); + } up_write(&bpf_devs_lock); + rtnl_unlock(); } static int bpf_prog_offload_translate(struct bpf_prog *prog) @@ -621,7 +648,7 @@ static bool __bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_offload_netdev *ondev1, *ondev2; struct bpf_prog_offload *offload; - if (!bpf_prog_is_offloaded(prog->aux)) + if (!bpf_prog_is_dev_bound(prog->aux)) return false; offload = prog->aux->offload; @@ -667,14 +694,21 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct net_device *netdev) { - return __bpf_offload_dev_netdev_register(offdev, netdev); + int err; + + down_write(&bpf_devs_lock); + err = __bpf_offload_dev_netdev_register(offdev, netdev); + up_write(&bpf_devs_lock); + return err; } EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, struct net_device *netdev) { + down_write(&bpf_devs_lock); __bpf_offload_dev_netdev_unregister(offdev, netdev); + up_write(&bpf_devs_lock); } EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); @@ -708,6 +742,19 @@ void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) } EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); +void bpf_dev_bound_netdev_unregister(struct net_device *dev) +{ + struct bpf_offload_netdev *ondev; + + ASSERT_RTNL(); + + down_write(&bpf_devs_lock); + ondev = bpf_offload_find_netdev(dev); + if (ondev && !ondev->offdev) + __bpf_offload_dev_netdev_unregister(NULL, ondev->netdev); + up_write(&bpf_devs_lock); +} + static int __init bpf_offload_init(void) { return rhashtable_init(&offdevs, &offdevs_params); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 5e90b697f908..fdf4ff3d5a7f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2491,7 +2491,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) BPF_F_TEST_STATE_FREQ | BPF_F_SLEEPABLE | BPF_F_TEST_RND_HI32 | - BPF_F_XDP_HAS_FRAGS)) + BPF_F_XDP_HAS_FRAGS | + BPF_F_XDP_DEV_BOUND_ONLY)) return -EINVAL; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && @@ -2575,7 +2576,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) prog->aux->attach_btf = attach_btf; prog->aux->attach_btf_id = attr->attach_btf_id; prog->aux->dst_prog = dst_prog; - prog->aux->offload_requested = !!attr->prog_ifindex; + prog->aux->dev_bound = !!attr->prog_ifindex; prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; @@ -2598,8 +2599,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) atomic64_set(&prog->aux->refcnt, 1); prog->gpl_compatible = is_gpl ? 1 : 0; - if (bpf_prog_is_offloaded(prog->aux)) { - err = bpf_prog_offload_init(prog, attr); + if (bpf_prog_is_dev_bound(prog->aux)) { + err = bpf_prog_dev_bound_init(prog, attr); if (err) goto free_prog_sec; } diff --git a/net/core/dev.c b/net/core/dev.c index a37829de6529..e66da626df84 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9228,6 +9228,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); return -EINVAL; } + if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { + NL_SET_ERR_MSG(extack, "Program bound to different device"); + return -EINVAL; + } if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); return -EINVAL; @@ -10830,6 +10834,7 @@ void unregister_netdevice_many_notify(struct list_head *head, dev_shutdown(dev); dev_xdp_uninstall(dev); + bpf_dev_bound_netdev_unregister(dev); netdev_offload_xstats_disable_all(dev); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 142b81bcbb2e..7f024ac22edd 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1156,6 +1156,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded + * program becomes device-bound but can access XDP metadata. + */ +#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) + /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ -- cgit v1.2.3-58-ga151 From 3d76a4d3d4e591af3e789698affaad88a5a8e8ab Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 19 Jan 2023 14:15:26 -0800 Subject: bpf: XDP metadata RX kfuncs Define a new kfunc set (xdp_metadata_kfunc_ids) which implements all possible XDP metatada kfuncs. Not all devices have to implement them. If kfunc is not supported by the target device, the default implementation is called instead. The verifier, at load time, replaces a call to the generic kfunc with a call to the per-device one. Per-device kfunc pointers are stored in separate struct xdp_metadata_ops. Cc: John Fastabend Cc: David Ahern Cc: Martin KaFai Lau Cc: Jakub Kicinski Cc: Willem de Bruijn Cc: Jesper Dangaard Brouer Cc: Anatoly Burakov Cc: Alexander Lobakin Cc: Magnus Karlsson Cc: Maryam Tahhan Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230119221536.3349901-8-sdf@google.com Signed-off-by: Martin KaFai Lau --- include/linux/bpf.h | 17 ++++++++++++- include/linux/netdevice.h | 8 ++++++ include/net/xdp.h | 21 ++++++++++++++++ kernel/bpf/core.c | 8 ++++++ kernel/bpf/offload.c | 44 ++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 25 +++++++++++++++++- net/bpf/test_run.c | 3 +++ net/core/xdp.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 188 insertions(+), 2 deletions(-) (limited to 'kernel/bpf') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b97a05bb47be..bb26c2e18092 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2480,6 +2480,9 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); void unpriv_ebpf_notify(int new_state); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) +int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, + struct bpf_prog_aux *prog_aux); +void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); void bpf_dev_bound_netdev_unregister(struct net_device *dev); @@ -2514,8 +2517,20 @@ void sock_map_unhash(struct sock *sk); void sock_map_destroy(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); #else +static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, + struct bpf_prog_aux *prog_aux) +{ + return -EOPNOTSUPP; +} + +static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, + u32 func_id) +{ + return NULL; +} + static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, - union bpf_attr *attr) + union bpf_attr *attr) { return -EOPNOTSUPP; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index aad12a179e54..90f2be194bc5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -74,6 +74,7 @@ struct udp_tunnel_nic_info; struct udp_tunnel_nic; struct bpf_prog; struct xdp_buff; +struct xdp_md; void synchronize_net(void); void netdev_set_default_ethtool_ops(struct net_device *dev, @@ -1618,6 +1619,11 @@ struct net_device_ops { bool cycles); }; +struct xdp_metadata_ops { + int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp); + int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash); +}; + /** * enum netdev_priv_flags - &struct net_device priv_flags * @@ -1801,6 +1807,7 @@ enum netdev_ml_priv_type { * * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions + * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. * @ethtool_ops: Management operations * @l3mdev_ops: Layer 3 master device operations * @ndisc_ops: Includes callbacks for different IPv6 neighbour @@ -2050,6 +2057,7 @@ struct net_device { unsigned int flags; unsigned long long priv_flags; const struct net_device_ops *netdev_ops; + const struct xdp_metadata_ops *xdp_metadata_ops; int ifindex; unsigned short gflags; unsigned short hard_header_len; diff --git a/include/net/xdp.h b/include/net/xdp.h index 55dbc68bfffc..91292aa13bc0 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -409,4 +409,25 @@ void xdp_attachment_setup(struct xdp_attachment_info *info, #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE +#define XDP_METADATA_KFUNC_xxx \ + XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \ + bpf_xdp_metadata_rx_timestamp) \ + XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \ + bpf_xdp_metadata_rx_hash) \ + +enum { +#define XDP_METADATA_KFUNC(name, _) name, +XDP_METADATA_KFUNC_xxx +#undef XDP_METADATA_KFUNC +MAX_XDP_METADATA_KFUNC, +}; + +#ifdef CONFIG_NET +u32 bpf_xdp_metadata_kfunc_id(int id); +bool bpf_dev_bound_kfunc_id(u32 btf_id); +#else +static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; } +static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; } +#endif + #endif /* __LINUX_NET_XDP_H__ */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 1cf19da3c128..16da51093aff 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2096,6 +2096,14 @@ bool bpf_prog_map_compatible(struct bpf_map *map, if (fp->kprobe_override) return false; + /* XDP programs inserted into maps are not guaranteed to run on + * a particular netdev (and can run outside driver context entirely + * in the case of devmap and cpumap). Until device checks + * are implemented, prohibit adding dev-bound programs to program maps. + */ + if (bpf_prog_is_dev_bound(fp->aux)) + return false; + spin_lock(&map->owner.lock); if (!map->owner.type) { /* There's no owner yet where we could check for diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index f767455ed732..3e173c694bbb 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -755,6 +755,50 @@ void bpf_dev_bound_netdev_unregister(struct net_device *dev) up_write(&bpf_devs_lock); } +int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, + struct bpf_prog_aux *prog_aux) +{ + if (!bpf_prog_is_dev_bound(prog_aux)) { + bpf_log(log, "metadata kfuncs require device-bound program\n"); + return -EINVAL; + } + + if (bpf_prog_is_offloaded(prog_aux)) { + bpf_log(log, "metadata kfuncs can't be offloaded\n"); + return -EINVAL; + } + + return 0; +} + +void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id) +{ + const struct xdp_metadata_ops *ops; + void *p = NULL; + + /* We don't hold bpf_devs_lock while resolving several + * kfuncs and can race with the unregister_netdevice(). + * We rely on bpf_dev_bound_match() check at attach + * to render this program unusable. + */ + down_read(&bpf_devs_lock); + if (!prog->aux->offload) + goto out; + + ops = prog->aux->offload->netdev->xdp_metadata_ops; + if (!ops) + goto out; + + if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP)) + p = ops->xmo_rx_timestamp; + else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH)) + p = ops->xmo_rx_hash; +out: + up_read(&bpf_devs_lock); + + return p; +} + static int __init bpf_offload_init(void) { return rhashtable_init(&offdevs, &offdevs_params); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bba68eefb4b2..9009395206f8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2333,6 +2333,12 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) return -EINVAL; } + if (bpf_dev_bound_kfunc_id(func_id)) { + err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); + if (err) + return err; + } + desc = &tab->descs[tab->nr_descs++]; desc->func_id = func_id; desc->imm = call_imm; @@ -15772,12 +15778,25 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_insn *insn_buf, int insn_idx, int *cnt) { const struct bpf_kfunc_desc *desc; + void *xdp_kfunc; if (!insn->imm) { verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); return -EINVAL; } + *cnt = 0; + + if (bpf_dev_bound_kfunc_id(insn->imm)) { + xdp_kfunc = bpf_dev_bound_resolve_kfunc(env->prog, insn->imm); + if (xdp_kfunc) { + insn->imm = BPF_CALL_IMM(xdp_kfunc); + return 0; + } + + /* fallback to default kfunc when not supported by netdev */ + } + /* insn->imm has the btf func_id. Replace it with * an address (relative to __bpf_call_base). */ @@ -15788,7 +15807,6 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EFAULT; } - *cnt = 0; insn->imm = desc->imm; if (insn->off) return 0; @@ -16795,6 +16813,11 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, if (tgt_prog) { struct bpf_prog_aux *aux = tgt_prog->aux; + if (bpf_prog_is_dev_bound(tgt_prog->aux)) { + bpf_log(log, "Replacing device-bound programs not supported\n"); + return -EINVAL; + } + for (i = 0; i < aux->func_info_cnt; i++) if (aux->func_info[i].type_id == btf_id) { subprog = i; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 2723623429ac..8da0d73b368e 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1300,6 +1300,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) return -EINVAL; + if (bpf_prog_is_dev_bound(prog->aux)) + return -EINVAL; + if (do_live) { if (!batch_size) batch_size = NAPI_POLL_WEIGHT; diff --git a/net/core/xdp.c b/net/core/xdp.c index 844c9d99dc0e..a5a7ecf6391c 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -4,6 +4,7 @@ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. */ #include +#include #include #include #include @@ -709,3 +710,66 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) return nxdpf; } + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); + +/** + * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. + * @ctx: XDP context pointer. + * @timestamp: Return value pointer. + * + * Returns 0 on success or ``-errno`` on error. + */ +int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) +{ + return -EOPNOTSUPP; +} + +/** + * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash. + * @ctx: XDP context pointer. + * @hash: Return value pointer. + * + * Returns 0 on success or ``-errno`` on error. + */ +int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash) +{ + return -EOPNOTSUPP; +} + +__diag_pop(); + +BTF_SET8_START(xdp_metadata_kfunc_ids) +#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0) +XDP_METADATA_KFUNC_xxx +#undef XDP_METADATA_KFUNC +BTF_SET8_END(xdp_metadata_kfunc_ids) + +static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { + .owner = THIS_MODULE, + .set = &xdp_metadata_kfunc_ids, +}; + +BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted) +#define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str) +XDP_METADATA_KFUNC_xxx +#undef XDP_METADATA_KFUNC + +u32 bpf_xdp_metadata_kfunc_id(int id) +{ + /* xdp_metadata_kfunc_ids is sorted and can't be used */ + return xdp_metadata_kfunc_ids_unsorted[id]; +} + +bool bpf_dev_bound_kfunc_id(u32 btf_id) +{ + return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id); +} + +static int __init xdp_metadata_init(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set); +} +late_initcall(xdp_metadata_init); -- cgit v1.2.3-58-ga151 From fd7c211d6875013f81acc09868effe199b5d2c0c Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Thu, 19 Jan 2023 14:15:27 -0800 Subject: bpf: Support consuming XDP HW metadata from fext programs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of rejecting the attaching of PROG_TYPE_EXT programs to XDP programs that consume HW metadata, implement support for propagating the offload information. The extension program doesn't need to set a flag or ifindex, these will just be propagated from the target by the verifier. We need to create a separate offload object for the extension program, though, since it can be reattached to a different program later (which means we can't just inherit the offload information from the target). An additional check is added on attach that the new target is compatible with the offload information in the extension prog. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230119221536.3349901-9-sdf@google.com Signed-off-by: Martin KaFai Lau --- include/linux/bpf.h | 14 +++++++ kernel/bpf/offload.c | 112 ++++++++++++++++++++++++++++++++++++-------------- kernel/bpf/syscall.c | 7 ++++ kernel/bpf/verifier.c | 5 ++- 4 files changed, 106 insertions(+), 32 deletions(-) (limited to 'kernel/bpf') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bb26c2e18092..ad4bb36d4c10 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2484,6 +2484,7 @@ int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, struct bpf_prog_aux *prog_aux); void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); +int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog); void bpf_dev_bound_netdev_unregister(struct net_device *dev); static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) @@ -2496,6 +2497,8 @@ static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) return aux->offload_requested; } +bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs); + static inline bool bpf_map_is_offloaded(struct bpf_map *map) { return unlikely(map->ops == &bpf_map_offload_ops); @@ -2535,6 +2538,12 @@ static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, return -EOPNOTSUPP; } +static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + return -EOPNOTSUPP; +} + static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) { } @@ -2549,6 +2558,11 @@ static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) return false; } +static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) +{ + return false; +} + static inline bool bpf_map_is_offloaded(struct bpf_map *map) { return false; diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 3e173c694bbb..e87cab2ed710 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -187,43 +187,24 @@ static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, kfree(ondev); } -int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) +static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev) { struct bpf_offload_netdev *ondev; struct bpf_prog_offload *offload; int err; - if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && - attr->prog_type != BPF_PROG_TYPE_XDP) - return -EINVAL; - - if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY) - return -EINVAL; - - if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && - attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) - return -EINVAL; - offload = kzalloc(sizeof(*offload), GFP_USER); if (!offload) return -ENOMEM; offload->prog = prog; + offload->netdev = netdev; - offload->netdev = dev_get_by_index(current->nsproxy->net_ns, - attr->prog_ifindex); - err = bpf_dev_offload_check(offload->netdev); - if (err) - goto err_maybe_put; - - prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); - - down_write(&bpf_devs_lock); ondev = bpf_offload_find_netdev(offload->netdev); if (!ondev) { if (bpf_prog_is_offloaded(prog->aux)) { err = -EINVAL; - goto err_unlock; + goto err_free; } /* When only binding to the device, explicitly @@ -231,25 +212,80 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) */ err = __bpf_offload_dev_netdev_register(NULL, offload->netdev); if (err) - goto err_unlock; + goto err_free; ondev = bpf_offload_find_netdev(offload->netdev); } offload->offdev = ondev->offdev; prog->aux->offload = offload; list_add_tail(&offload->offloads, &ondev->progs); - dev_put(offload->netdev); - up_write(&bpf_devs_lock); return 0; -err_unlock: - up_write(&bpf_devs_lock); -err_maybe_put: - if (offload->netdev) - dev_put(offload->netdev); +err_free: kfree(offload); return err; } +int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) +{ + struct net_device *netdev; + int err; + + if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && + attr->prog_type != BPF_PROG_TYPE_XDP) + return -EINVAL; + + if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY) + return -EINVAL; + + if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && + attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) + return -EINVAL; + + netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex); + if (!netdev) + return -EINVAL; + + err = bpf_dev_offload_check(netdev); + if (err) + goto out; + + prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); + + down_write(&bpf_devs_lock); + err = __bpf_prog_dev_bound_init(prog, netdev); + up_write(&bpf_devs_lock); + +out: + dev_put(netdev); + return err; +} + +int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog) +{ + int err; + + if (!bpf_prog_is_dev_bound(old_prog->aux)) + return 0; + + if (bpf_prog_is_offloaded(old_prog->aux)) + return -EINVAL; + + new_prog->aux->dev_bound = old_prog->aux->dev_bound; + new_prog->aux->offload_requested = old_prog->aux->offload_requested; + + down_write(&bpf_devs_lock); + if (!old_prog->aux->offload) { + err = -EINVAL; + goto out; + } + + err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev); + +out: + up_write(&bpf_devs_lock); + return err; +} + int bpf_prog_offload_verifier_prep(struct bpf_prog *prog) { struct bpf_prog_offload *offload; @@ -675,6 +711,22 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) } EXPORT_SYMBOL_GPL(bpf_offload_dev_match); +bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) +{ + bool ret; + + if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux)) + return false; + + down_read(&bpf_devs_lock); + ret = lhs->aux->offload && rhs->aux->offload && + lhs->aux->offload->netdev && + lhs->aux->offload->netdev == rhs->aux->offload->netdev; + up_read(&bpf_devs_lock); + + return ret; +} + bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) { struct bpf_offloaded_map *offmap; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index fdf4ff3d5a7f..d5ffa7a01dfb 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2605,6 +2605,13 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) goto free_prog_sec; } + if (type == BPF_PROG_TYPE_EXT && dst_prog && + bpf_prog_is_dev_bound(dst_prog->aux)) { + err = bpf_prog_dev_bound_inherit(prog, dst_prog); + if (err) + goto free_prog_sec; + } + /* find program type: socket_filter vs tracing_filter */ err = find_prog_type(type, prog); if (err < 0) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9009395206f8..800488289297 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16813,8 +16813,9 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, if (tgt_prog) { struct bpf_prog_aux *aux = tgt_prog->aux; - if (bpf_prog_is_dev_bound(tgt_prog->aux)) { - bpf_log(log, "Replacing device-bound programs not supported\n"); + if (bpf_prog_is_dev_bound(prog->aux) && + !bpf_prog_dev_bound_match(prog, tgt_prog)) { + bpf_log(log, "Target program bound device mismatch"); return -EINVAL; } -- cgit v1.2.3-58-ga151 From 57539b1c0ac2dcccbe64a7675ff466be009c040f Mon Sep 17 00:00:00 2001 From: David Vernet Date: Fri, 20 Jan 2023 13:25:15 -0600 Subject: bpf: Enable annotating trusted nested pointers In kfuncs, a "trusted" pointer is a pointer that the kfunc can assume is safe, and which the verifier will allow to be passed to a KF_TRUSTED_ARGS kfunc. Currently, a KF_TRUSTED_ARGS kfunc disallows any pointer to be passed at a nonzero offset, but sometimes this is in fact safe if the "nested" pointer's lifetime is inherited from its parent. For example, the const cpumask_t *cpus_ptr field in a struct task_struct will remain valid until the task itself is destroyed, and thus would also be safe to pass to a KF_TRUSTED_ARGS kfunc. While it would be conceptually simple to enable this by using BTF tags, gcc unfortunately does not yet support this. In the interim, this patch enables support for this by using a type-naming convention. A new BTF_TYPE_SAFE_NESTED macro is defined in verifier.c which allows a developer to specify the nested fields of a type which are considered trusted if its parent is also trusted. The verifier is also updated to account for this. A patch with selftests will be added in a follow-on change, along with documentation for this feature. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230120192523.3650503-2-void@manifault.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 4 ++++ kernel/bpf/btf.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 32 ++++++++++++++++++++++++--- 3 files changed, 94 insertions(+), 3 deletions(-) (limited to 'kernel/bpf') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ad4bb36d4c10..982213d97668 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2187,6 +2187,10 @@ struct bpf_core_ctx { const struct btf *btf; }; +bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, + const struct bpf_reg_state *reg, + int off); + int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, int relo_idx, void *insn); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 4ba749fcce9d..dd05b5f2c1d8 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8227,3 +8227,64 @@ out: } return err; } + +bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, + const struct bpf_reg_state *reg, + int off) +{ + struct btf *btf = reg->btf; + const struct btf_type *walk_type, *safe_type; + const char *tname; + char safe_tname[64]; + long ret, safe_id; + const struct btf_member *member, *m_walk = NULL; + u32 i; + const char *walk_name; + + walk_type = btf_type_by_id(btf, reg->btf_id); + if (!walk_type) + return false; + + tname = btf_name_by_offset(btf, walk_type->name_off); + + ret = snprintf(safe_tname, sizeof(safe_tname), "%s__safe_fields", tname); + if (ret < 0) + return false; + + safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info)); + if (safe_id < 0) + return false; + + safe_type = btf_type_by_id(btf, safe_id); + if (!safe_type) + return false; + + for_each_member(i, walk_type, member) { + u32 moff; + + /* We're looking for the PTR_TO_BTF_ID member in the struct + * type we're walking which matches the specified offset. + * Below, we'll iterate over the fields in the safe variant of + * the struct and see if any of them has a matching type / + * name. + */ + moff = __btf_member_bit_offset(walk_type, member) / 8; + if (off == moff) { + m_walk = member; + break; + } + } + if (m_walk == NULL) + return false; + + walk_name = __btf_name_by_offset(btf, m_walk->name_off); + for_each_member(i, safe_type, member) { + const char *m_name = __btf_name_by_offset(btf, member->name_off); + + /* If we match on both type and name, the field is considered trusted. */ + if (m_walk->type == member->type && !strcmp(walk_name, m_name)) + return true; + } + + return false; +} diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 800488289297..bc24bdadc193 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4943,6 +4943,25 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) return 0; } +#define BTF_TYPE_SAFE_NESTED(__type) __PASTE(__type, __safe_fields) + +BTF_TYPE_SAFE_NESTED(struct task_struct) { + const cpumask_t *cpus_ptr; +}; + +static bool nested_ptr_is_trusted(struct bpf_verifier_env *env, + struct bpf_reg_state *reg, + int off) +{ + /* If its parent is not trusted, it can't regain its trusted status. */ + if (!is_trusted_reg(reg)) + return false; + + BTF_TYPE_EMIT(BTF_TYPE_SAFE_NESTED(struct task_struct)); + + return btf_nested_type_is_trusted(&env->log, reg, off); +} + static int check_ptr_to_btf_access(struct bpf_verifier_env *env, struct bpf_reg_state *regs, int regno, int off, int size, @@ -5031,10 +5050,17 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, if (type_flag(reg->type) & PTR_UNTRUSTED) flag |= PTR_UNTRUSTED; - /* By default any pointer obtained from walking a trusted pointer is - * no longer trusted except the rcu case below. + /* By default any pointer obtained from walking a trusted pointer is no + * longer trusted, unless the field being accessed has explicitly been + * marked as inheriting its parent's state of trust. + * + * An RCU-protected pointer can also be deemed trusted if we are in an + * RCU read region. This case is handled below. */ - flag &= ~PTR_TRUSTED; + if (nested_ptr_is_trusted(env, reg, off)) + flag |= PTR_TRUSTED; + else + flag &= ~PTR_TRUSTED; if (flag & MEM_RCU) { /* Mark value register as MEM_RCU only if it is protected by -- cgit v1.2.3-58-ga151 From b613d335a743cf0e0ef0ccba9ad129904e2a26fb Mon Sep 17 00:00:00 2001 From: David Vernet Date: Fri, 20 Jan 2023 13:25:16 -0600 Subject: bpf: Allow trusted args to walk struct when checking BTF IDs When validating BTF types for KF_TRUSTED_ARGS kfuncs, the verifier currently enforces that the top-level type must match when calling the kfunc. In other words, the verifier does not allow the BPF program to pass a bitwise equivalent struct, despite it being allowed according to the C standard. For example, if you have the following type: struct nf_conn___init { struct nf_conn ct; }; The C standard stipulates that it would be safe to pass a struct nf_conn___init to a kfunc expecting a struct nf_conn. The verifier currently disallows this, however, as semantically kfuncs may want to enforce that structs that have equivalent types according to the C standard, but have different BTF IDs, are not able to be passed to kfuncs expecting one or the other. For example, struct nf_conn___init may not be queried / looked up, as it is allocated but may not yet be fully initialized. On the other hand, being able to pass types that are equivalent according to the C standard will be useful for other types of kfunc / kptrs enabled by BPF. For example, in a follow-on patch, a series of kfuncs will be added which allow programs to do bitwise queries on cpumasks that are either allocated by the program (in which case they'll be a 'struct bpf_cpumask' type that wraps a cpumask_t as its first element), or a cpumask that was allocated by the main kernel (in which case it will just be a straight cpumask_t, as in task->cpus_ptr). Having the two types of cpumasks allows us to distinguish between the two for when a cpumask is read-only vs. mutatable. A struct bpf_cpumask can be mutated by e.g. bpf_cpumask_clear(), whereas a regular cpumask_t cannot be. On the other hand, a struct bpf_cpumask can of course be queried in the exact same manner as a cpumask_t, with e.g. bpf_cpumask_test_cpu(). If we were to enforce that top level types match, then a user that's passing a struct bpf_cpumask to a read-only cpumask_t argument would have to cast with something like bpf_cast_to_kern_ctx() (which itself would need to be updated to expect the alias, and currently it only accommodates a single alias per prog type). Additionally, not specifying KF_TRUSTED_ARGS is not an option, as some kfuncs take one argument as a struct bpf_cpumask *, and another as a struct cpumask * (i.e. cpumask_t). In order to enable this, this patch relaxes the constraint that a KF_TRUSTED_ARGS kfunc must have strict type matching, and instead only enforces strict type matching if a type is observed to be a "no-cast alias" (i.e., that the type names are equivalent, but one is suffixed with ___init). Additionally, in order to try and be conservative and match existing behavior / expectations, this patch also enforces strict type checking for acquire kfuncs. We were already enforcing it for release kfuncs, so this should also improve the consistency of the semantics for kfuncs. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230120192523.3650503-3-void@manifault.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 4 ++++ kernel/bpf/btf.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 30 ++++++++++++++++++++++++- 3 files changed, 94 insertions(+), 1 deletion(-) (limited to 'kernel/bpf') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 982213d97668..1bec48d9e5d9 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2191,6 +2191,10 @@ bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, const struct bpf_reg_state *reg, int off); +bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, + const struct btf *reg_btf, u32 reg_id, + const struct btf *arg_btf, u32 arg_id); + int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, int relo_idx, void *insn); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index dd05b5f2c1d8..47b8cb96f2c2 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -336,6 +336,12 @@ const char *btf_type_str(const struct btf_type *t) /* Type name size */ #define BTF_SHOW_NAME_SIZE 80 +/* + * The suffix of a type that indicates it cannot alias another type when + * comparing BTF IDs for kfunc invocations. + */ +#define NOCAST_ALIAS_SUFFIX "___init" + /* * Common data to all BTF show operations. Private show functions can add * their own data to a structure containing a struct btf_show and consult it @@ -8288,3 +8294,58 @@ bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, return false; } + +bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, + const struct btf *reg_btf, u32 reg_id, + const struct btf *arg_btf, u32 arg_id) +{ + const char *reg_name, *arg_name, *search_needle; + const struct btf_type *reg_type, *arg_type; + int reg_len, arg_len, cmp_len; + size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char); + + reg_type = btf_type_by_id(reg_btf, reg_id); + if (!reg_type) + return false; + + arg_type = btf_type_by_id(arg_btf, arg_id); + if (!arg_type) + return false; + + reg_name = btf_name_by_offset(reg_btf, reg_type->name_off); + arg_name = btf_name_by_offset(arg_btf, arg_type->name_off); + + reg_len = strlen(reg_name); + arg_len = strlen(arg_name); + + /* Exactly one of the two type names may be suffixed with ___init, so + * if the strings are the same size, they can't possibly be no-cast + * aliases of one another. If you have two of the same type names, e.g. + * they're both nf_conn___init, it would be improper to return true + * because they are _not_ no-cast aliases, they are the same type. + */ + if (reg_len == arg_len) + return false; + + /* Either of the two names must be the other name, suffixed with ___init. */ + if ((reg_len != arg_len + pattern_len) && + (arg_len != reg_len + pattern_len)) + return false; + + if (reg_len < arg_len) { + search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX); + cmp_len = reg_len; + } else { + search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX); + cmp_len = arg_len; + } + + if (!search_needle) + return false; + + /* ___init suffix must come at the end of the name */ + if (*(search_needle + pattern_len) != '\0') + return false; + + return !strncmp(reg_name, arg_name, cmp_len); +} diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bc24bdadc193..66ec577fcb8b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -8820,9 +8820,37 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, reg_ref_id = *reg2btf_ids[base_type(reg->type)]; } - if (is_kfunc_trusted_args(meta) || (is_kfunc_release(meta) && reg->ref_obj_id)) + /* Enforce strict type matching for calls to kfuncs that are acquiring + * or releasing a reference, or are no-cast aliases. We do _not_ + * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default, + * as we want to enable BPF programs to pass types that are bitwise + * equivalent without forcing them to explicitly cast with something + * like bpf_cast_to_kern_ctx(). + * + * For example, say we had a type like the following: + * + * struct bpf_cpumask { + * cpumask_t cpumask; + * refcount_t usage; + * }; + * + * Note that as specified in , cpumask_t is typedef'ed + * to a struct cpumask, so it would be safe to pass a struct + * bpf_cpumask * to a kfunc expecting a struct cpumask *. + * + * The philosophy here is similar to how we allow scalars of different + * types to be passed to kfuncs as long as the size is the same. The + * only difference here is that we're simply allowing + * btf_struct_ids_match() to walk the struct at the 0th offset, and + * resolve types. + */ + if (is_kfunc_acquire(meta) || + (is_kfunc_release(meta) && reg->ref_obj_id) || + btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) strict_type_match = true; + WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off); + reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) { -- cgit v1.2.3-58-ga151 From caf713c338bd95bf9ac003d8985d2c4e46d452dd Mon Sep 17 00:00:00 2001 From: David Vernet Date: Wed, 25 Jan 2023 08:38:10 -0600 Subject: bpf: Disallow NULLable pointers for trusted kfuncs KF_TRUSTED_ARGS kfuncs currently have a subtle and insidious bug in validating pointers to scalars. Say that you have a kfunc like the following, which takes an array as the first argument: bool bpf_cpumask_empty(const struct cpumask *cpumask) { return cpumask_empty(cpumask); } ... BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_TRUSTED_ARGS) ... If a BPF program were to invoke the kfunc with a NULL argument, it would crash the kernel. The reason is that struct cpumask is defined as a bitmap, which is itself defined as an array, and is accessed as a memory address by bitmap operations. So when the verifier analyzes the register, it interprets it as a pointer to a scalar struct, which is an array of size 8. check_mem_reg() then sees that the register is NULL and returns 0, and the kfunc crashes when it passes it down to the cpumask wrappers. To fix this, this patch adds a check for KF_ARG_PTR_TO_MEM which verifies that the register doesn't contain a possibly-NULL pointer if the kfunc is KF_TRUSTED_ARGS. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230125143816.721952-2-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 6 ++++++ tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c | 4 ++-- tools/testing/selftests/bpf/progs/task_kfunc_failure.c | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 66ec577fcb8b..bb38b01b738f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9194,6 +9194,12 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return -EINVAL; } + if (is_kfunc_trusted_args(meta) && + (register_is_null(reg) || type_may_be_null(reg->type))) { + verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); + return -EACCES; + } + if (reg->ref_obj_id) { if (is_kfunc_release(meta) && meta->ref_obj_id) { verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c index 973f0c5af965..f3bb0e16e088 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c @@ -93,11 +93,11 @@ static struct { const char *prog_name; const char *expected_err_msg; } failure_tests[] = { - {"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"}, + {"cgrp_kfunc_acquire_untrusted", "Possibly NULL pointer passed to trusted arg0"}, {"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"}, {"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"}, {"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"}, - {"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"}, + {"cgrp_kfunc_acquire_null", "Possibly NULL pointer passed to trusted arg0"}, {"cgrp_kfunc_acquire_unreleased", "Unreleased reference"}, {"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"}, {"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"}, diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index e6950d6a9cf0..f19d54eda4f1 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -28,7 +28,7 @@ static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *ta } SEC("tp_btf/task_newtask") -__failure __msg("R1 must be referenced or trusted") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -86,7 +86,7 @@ int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 cl SEC("tp_btf/task_newtask") -__failure __msg("arg#0 pointer type STRUCT task_struct must point") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; -- cgit v1.2.3-58-ga151 From 516f4d3397c9e90f4da04f59986c856016269aa1 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Wed, 25 Jan 2023 08:38:11 -0600 Subject: bpf: Enable cpumasks to be queried and used as kptrs Certain programs may wish to be able to query cpumasks. For example, if a program that is tracing percpu operations wishes to track which tasks end up running on which CPUs, it could be useful to associate that with the tasks' cpumasks. Similarly, programs tracking NUMA allocations, CPU scheduling domains, etc, could potentially benefit from being able to see which CPUs a task could be migrated to. This patch enables these types of use cases by introducing a series of bpf_cpumask_* kfuncs. Amongst these kfuncs, there are two separate "classes" of operations: 1. kfuncs which allow the caller to allocate and mutate their own cpumask kptrs in the form of a struct bpf_cpumask * object. Such kfuncs include e.g. bpf_cpumask_create() to allocate the cpumask, and bpf_cpumask_or() to mutate it. "Regular" cpumasks such as p->cpus_ptr may not be passed to these kfuncs, and the verifier will ensure this is the case by comparing BTF IDs. 2. Read-only operations which operate on const struct cpumask * arguments. For example, bpf_cpumask_test_cpu(), which tests whether a CPU is set in the cpumask. Any trusted struct cpumask * or struct bpf_cpumask * may be passed to these kfuncs. The verifier allows struct bpf_cpumask * even though the kfunc is defined with struct cpumask * because the first element of a struct bpf_cpumask is a cpumask_t, so it is safe to cast. A follow-on patch will add selftests which validate these kfuncs, and another will document them. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230125143816.721952-3-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/Makefile | 1 + kernel/bpf/cpumask.c | 268 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 269 insertions(+) create mode 100644 kernel/bpf/cpumask.c (limited to 'kernel/bpf') diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 3a12e6b400a2..02242614dcc7 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o endif ifeq ($(CONFIG_BPF_JIT),y) obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o +obj-$(CONFIG_BPF_SYSCALL) += cpumask.o obj-${CONFIG_BPF_LSM} += bpf_lsm.o endif obj-$(CONFIG_BPF_PRELOAD) += preload/ diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c new file mode 100644 index 000000000000..409ae2447a9b --- /dev/null +++ b/kernel/bpf/cpumask.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Meta, Inc */ +#include +#include +#include +#include +#include + +/** + * struct bpf_cpumask - refcounted BPF cpumask wrapper structure + * @cpumask: The actual cpumask embedded in the struct. + * @usage: Object reference counter. When the refcount goes to 0, the + * memory is released back to the BPF allocator, which provides + * RCU safety. + * + * Note that we explicitly embed a cpumask_t rather than a cpumask_var_t. This + * is done to avoid confusing the verifier due to the typedef of cpumask_var_t + * changing depending on whether CONFIG_CPUMASK_OFFSTACK is defined or not. See + * the details in . The consequence is that this structure is + * likely a bit larger than it needs to be when CONFIG_CPUMASK_OFFSTACK is + * defined due to embedding the whole NR_CPUS-size bitmap, but the extra memory + * overhead is minimal. For the more typical case of CONFIG_CPUMASK_OFFSTACK + * not being defined, the structure is the same size regardless. + */ +struct bpf_cpumask { + cpumask_t cpumask; + refcount_t usage; +}; + +static struct bpf_mem_alloc bpf_cpumask_ma; + +static bool cpu_valid(u32 cpu) +{ + return cpu < nr_cpu_ids; +} + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global kfuncs as their definitions will be in BTF"); + +struct bpf_cpumask *bpf_cpumask_create(void) +{ + struct bpf_cpumask *cpumask; + + cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask)); + if (!cpumask) + return NULL; + + memset(cpumask, 0, sizeof(*cpumask)); + refcount_set(&cpumask->usage, 1); + + return cpumask; +} + +struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) +{ + refcount_inc(&cpumask->usage); + return cpumask; +} + +struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) +{ + struct bpf_cpumask *cpumask; + + /* The BPF memory allocator frees memory backing its caches in an RCU + * callback. Thus, we can safely use RCU to ensure that the cpumask is + * safe to read. + */ + rcu_read_lock(); + + cpumask = READ_ONCE(*cpumaskp); + if (cpumask && !refcount_inc_not_zero(&cpumask->usage)) + cpumask = NULL; + + rcu_read_unlock(); + return cpumask; +} + +void bpf_cpumask_release(struct bpf_cpumask *cpumask) +{ + if (!cpumask) + return; + + if (refcount_dec_and_test(&cpumask->usage)) { + migrate_disable(); + bpf_mem_free(&bpf_cpumask_ma, cpumask); + migrate_enable(); + } +} + +u32 bpf_cpumask_first(const struct cpumask *cpumask) +{ + return cpumask_first(cpumask); +} + +u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) +{ + return cpumask_first_zero(cpumask); +} + +void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) +{ + if (!cpu_valid(cpu)) + return; + + cpumask_set_cpu(cpu, (struct cpumask *)cpumask); +} + +void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) +{ + if (!cpu_valid(cpu)) + return; + + cpumask_clear_cpu(cpu, (struct cpumask *)cpumask); +} + +bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) +{ + if (!cpu_valid(cpu)) + return false; + + return cpumask_test_cpu(cpu, (struct cpumask *)cpumask); +} + +bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) +{ + if (!cpu_valid(cpu)) + return false; + + return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask); +} + +bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) +{ + if (!cpu_valid(cpu)) + return false; + + return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask); +} + +void bpf_cpumask_setall(struct bpf_cpumask *cpumask) +{ + cpumask_setall((struct cpumask *)cpumask); +} + +void bpf_cpumask_clear(struct bpf_cpumask *cpumask) +{ + cpumask_clear((struct cpumask *)cpumask); +} + +bool bpf_cpumask_and(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) +{ + return cpumask_and((struct cpumask *)dst, src1, src2); +} + +void bpf_cpumask_or(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) +{ + cpumask_or((struct cpumask *)dst, src1, src2); +} + +void bpf_cpumask_xor(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) +{ + cpumask_xor((struct cpumask *)dst, src1, src2); +} + +bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) +{ + return cpumask_equal(src1, src2); +} + +bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) +{ + return cpumask_intersects(src1, src2); +} + +bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) +{ + return cpumask_subset(src1, src2); +} + +bool bpf_cpumask_empty(const struct cpumask *cpumask) +{ + return cpumask_empty(cpumask); +} + +bool bpf_cpumask_full(const struct cpumask *cpumask) +{ + return cpumask_full(cpumask); +} + +void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) +{ + cpumask_copy((struct cpumask *)dst, src); +} + +u32 bpf_cpumask_any(const struct cpumask *cpumask) +{ + return cpumask_any(cpumask); +} + +u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) +{ + return cpumask_any_and(src1, src2); +} + +__diag_pop(); + +BTF_SET8_START(cpumask_kfunc_btf_ids) +BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_cpumask_first, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_and, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_or, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_full, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_any, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_any_and, KF_TRUSTED_ARGS) +BTF_SET8_END(cpumask_kfunc_btf_ids) + +static const struct btf_kfunc_id_set cpumask_kfunc_set = { + .owner = THIS_MODULE, + .set = &cpumask_kfunc_btf_ids, +}; + +BTF_ID_LIST(cpumask_dtor_ids) +BTF_ID(struct, bpf_cpumask) +BTF_ID(func, bpf_cpumask_release) + +static int __init cpumask_kfunc_init(void) +{ + int ret; + const struct btf_id_dtor_kfunc cpumask_dtors[] = { + { + .btf_id = cpumask_dtor_ids[0], + .kfunc_btf_id = cpumask_dtor_ids[1] + }, + }; + + ret = bpf_mem_alloc_init(&bpf_cpumask_ma, 0, false); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set); + return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors, + ARRAY_SIZE(cpumask_dtors), + THIS_MODULE); +} + +late_initcall(cpumask_kfunc_init); -- cgit v1.2.3-58-ga151 From bdbda395845e7579c0ce76280104627510af929b Mon Sep 17 00:00:00 2001 From: David Vernet Date: Wed, 25 Jan 2023 08:38:14 -0600 Subject: bpf/docs: Document cpumask kfuncs in a new file Now that we've added a series of new cpumask kfuncs, we should document them so users can easily use them. This patch adds a new cpumasks.rst file to document them. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230125143816.721952-6-void@manifault.com Signed-off-by: Alexei Starovoitov --- Documentation/bpf/cpumasks.rst | 393 +++++++++++++++++++++++++++++++++++++++++ Documentation/bpf/index.rst | 1 + Documentation/bpf/kfuncs.rst | 11 ++ kernel/bpf/cpumask.c | 208 ++++++++++++++++++++++ 4 files changed, 613 insertions(+) create mode 100644 Documentation/bpf/cpumasks.rst (limited to 'kernel/bpf') diff --git a/Documentation/bpf/cpumasks.rst b/Documentation/bpf/cpumasks.rst new file mode 100644 index 000000000000..24bef9cbbeee --- /dev/null +++ b/Documentation/bpf/cpumasks.rst @@ -0,0 +1,393 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. _cpumasks-header-label: + +================== +BPF cpumask kfuncs +================== + +1. Introduction +=============== + +``struct cpumask`` is a bitmap data structure in the kernel whose indices +reflect the CPUs on the system. Commonly, cpumasks are used to track which CPUs +a task is affinitized to, but they can also be used to e.g. track which cores +are associated with a scheduling domain, which cores on a machine are idle, +etc. + +BPF provides programs with a set of :ref:`kfuncs-header-label` that can be +used to allocate, mutate, query, and free cpumasks. + +2. BPF cpumask objects +====================== + +There are two different types of cpumasks that can be used by BPF programs. + +2.1 ``struct bpf_cpumask *`` +---------------------------- + +``struct bpf_cpumask *`` is a cpumask that is allocated by BPF, on behalf of a +BPF program, and whose lifecycle is entirely controlled by BPF. These cpumasks +are RCU-protected, can be mutated, can be used as kptrs, and can be safely cast +to a ``struct cpumask *``. + +2.1.1 ``struct bpf_cpumask *`` lifecycle +---------------------------------------- + +A ``struct bpf_cpumask *`` is allocated, acquired, and released, using the +following functions: + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_create + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_acquire + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_release + +For example: + +.. code-block:: c + + struct cpumask_map_value { + struct bpf_cpumask __kptr_ref * cpumask; + }; + + struct array_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct cpumask_map_value); + __uint(max_entries, 65536); + } cpumask_map SEC(".maps"); + + static int cpumask_map_insert(struct bpf_cpumask *mask, u32 pid) + { + struct cpumask_map_value local, *v; + long status; + struct bpf_cpumask *old; + u32 key = pid; + + local.cpumask = NULL; + status = bpf_map_update_elem(&cpumask_map, &key, &local, 0); + if (status) { + bpf_cpumask_release(mask); + return status; + } + + v = bpf_map_lookup_elem(&cpumask_map, &key); + if (!v) { + bpf_cpumask_release(mask); + return -ENOENT; + } + + old = bpf_kptr_xchg(&v->cpumask, mask); + if (old) + bpf_cpumask_release(old); + + return 0; + } + + /** + * A sample tracepoint showing how a task's cpumask can be queried and + * recorded as a kptr. + */ + SEC("tp_btf/task_newtask") + int BPF_PROG(record_task_cpumask, struct task_struct *task, u64 clone_flags) + { + struct bpf_cpumask *cpumask; + int ret; + + cpumask = bpf_cpumask_create(); + if (!cpumask) + return -ENOMEM; + + if (!bpf_cpumask_full(task->cpus_ptr)) + bpf_printk("task %s has CPU affinity", task->comm); + + bpf_cpumask_copy(cpumask, task->cpus_ptr); + return cpumask_map_insert(cpumask, task->pid); + } + +---- + +2.1.1 ``struct bpf_cpumask *`` as kptrs +--------------------------------------- + +As mentioned and illustrated above, these ``struct bpf_cpumask *`` objects can +also be stored in a map and used as kptrs. If a ``struct bpf_cpumask *`` is in +a map, the reference can be removed from the map with bpf_kptr_xchg(), or +opportunistically acquired with bpf_cpumask_kptr_get(): + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_kptr_get + +Here is an example of a ``struct bpf_cpumask *`` being retrieved from a map: + +.. code-block:: c + + /* struct containing the struct bpf_cpumask kptr which is stored in the map. */ + struct cpumasks_kfunc_map_value { + struct bpf_cpumask __kptr_ref * bpf_cpumask; + }; + + /* The map containing struct cpumasks_kfunc_map_value entries. */ + struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct cpumasks_kfunc_map_value); + __uint(max_entries, 1); + } cpumasks_kfunc_map SEC(".maps"); + + /* ... */ + + /** + * A simple example tracepoint program showing how a + * struct bpf_cpumask * kptr that is stored in a map can + * be acquired using the bpf_cpumask_kptr_get() kfunc. + */ + SEC("tp_btf/cgroup_mkdir") + int BPF_PROG(cgrp_ancestor_example, struct cgroup *cgrp, const char *path) + { + struct bpf_cpumask *kptr; + struct cpumasks_kfunc_map_value *v; + u32 key = 0; + + /* Assume a bpf_cpumask * kptr was previously stored in the map. */ + v = bpf_map_lookup_elem(&cpumasks_kfunc_map, &key); + if (!v) + return -ENOENT; + + /* Acquire a reference to the bpf_cpumask * kptr that's already stored in the map. */ + kptr = bpf_cpumask_kptr_get(&v->cpumask); + if (!kptr) + /* If no bpf_cpumask was present in the map, it's because + * we're racing with another CPU that removed it with + * bpf_kptr_xchg() between the bpf_map_lookup_elem() + * above, and our call to bpf_cpumask_kptr_get(). + * bpf_cpumask_kptr_get() internally safely handles this + * race, and will return NULL if the cpumask is no longer + * present in the map by the time we invoke the kfunc. + */ + return -EBUSY; + + /* Free the reference we just took above. Note that the + * original struct bpf_cpumask * kptr is still in the map. It will + * be freed either at a later time if another context deletes + * it from the map, or automatically by the BPF subsystem if + * it's still present when the map is destroyed. + */ + bpf_cpumask_release(kptr); + + return 0; + } + +---- + +2.2 ``struct cpumask`` +---------------------- + +``struct cpumask`` is the object that actually contains the cpumask bitmap +being queried, mutated, etc. A ``struct bpf_cpumask`` wraps a ``struct +cpumask``, which is why it's safe to cast it as such (note however that it is +**not** safe to cast a ``struct cpumask *`` to a ``struct bpf_cpumask *``, and +the verifier will reject any program that tries to do so). + +As we'll see below, any kfunc that mutates its cpumask argument will take a +``struct bpf_cpumask *`` as that argument. Any argument that simply queries the +cpumask will instead take a ``struct cpumask *``. + +3. cpumask kfuncs +================= + +Above, we described the kfuncs that can be used to allocate, acquire, release, +etc a ``struct bpf_cpumask *``. This section of the document will describe the +kfuncs for mutating and querying cpumasks. + +3.1 Mutating cpumasks +--------------------- + +Some cpumask kfuncs are "read-only" in that they don't mutate any of their +arguments, whereas others mutate at least one argument (which means that the +argument must be a ``struct bpf_cpumask *``, as described above). + +This section will describe all of the cpumask kfuncs which mutate at least one +argument. :ref:`cpumasks-querying-label` below describes the read-only kfuncs. + +3.1.1 Setting and clearing CPUs +------------------------------- + +bpf_cpumask_set_cpu() and bpf_cpumask_clear_cpu() can be used to set and clear +a CPU in a ``struct bpf_cpumask`` respectively: + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_set_cpu bpf_cpumask_clear_cpu + +These kfuncs are pretty straightforward, and can be used, for example, as +follows: + +.. code-block:: c + + /** + * A sample tracepoint showing how a cpumask can be queried. + */ + SEC("tp_btf/task_newtask") + int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags) + { + struct bpf_cpumask *cpumask; + + cpumask = bpf_cpumask_create(); + if (!cpumask) + return -ENOMEM; + + bpf_cpumask_set_cpu(0, cpumask); + if (!bpf_cpumask_test_cpu(0, cast(cpumask))) + /* Should never happen. */ + goto release_exit; + + bpf_cpumask_clear_cpu(0, cpumask); + if (bpf_cpumask_test_cpu(0, cast(cpumask))) + /* Should never happen. */ + goto release_exit; + + /* struct cpumask * pointers such as task->cpus_ptr can also be queried. */ + if (bpf_cpumask_test_cpu(0, task->cpus_ptr)) + bpf_printk("task %s can use CPU %d", task->comm, 0); + + release_exit: + bpf_cpumask_release(cpumask); + return 0; + } + +---- + +bpf_cpumask_test_and_set_cpu() and bpf_cpumask_test_and_clear_cpu() are +complementary kfuncs that allow callers to atomically test and set (or clear) +CPUs: + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_test_and_set_cpu bpf_cpumask_test_and_clear_cpu + +---- + +We can also set and clear entire ``struct bpf_cpumask *`` objects in one +operation using bpf_cpumask_setall() and bpf_cpumask_clear(): + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_setall bpf_cpumask_clear + +3.1.2 Operations between cpumasks +--------------------------------- + +In addition to setting and clearing individual CPUs in a single cpumask, +callers can also perform bitwise operations between multiple cpumasks using +bpf_cpumask_and(), bpf_cpumask_or(), and bpf_cpumask_xor(): + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_and bpf_cpumask_or bpf_cpumask_xor + +The following is an example of how they may be used. Note that some of the +kfuncs shown in this example will be covered in more detail below. + +.. code-block:: c + + /** + * A sample tracepoint showing how a cpumask can be mutated using + bitwise operators (and queried). + */ + SEC("tp_btf/task_newtask") + int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags) + { + struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; + + mask1 = bpf_cpumask_create(); + if (!mask1) + return -ENOMEM; + + mask2 = bpf_cpumask_create(); + if (!mask2) { + bpf_cpumask_release(mask1); + return -ENOMEM; + } + + // ...Safely create the other two masks... */ + + bpf_cpumask_set_cpu(0, mask1); + bpf_cpumask_set_cpu(1, mask2); + bpf_cpumask_and(dst1, (const struct cpumask *)mask1, (const struct cpumask *)mask2); + if (!bpf_cpumask_empty((const struct cpumask *)dst1)) + /* Should never happen. */ + goto release_exit; + + bpf_cpumask_or(dst1, (const struct cpumask *)mask1, (const struct cpumask *)mask2); + if (!bpf_cpumask_test_cpu(0, (const struct cpumask *)dst1)) + /* Should never happen. */ + goto release_exit; + + if (!bpf_cpumask_test_cpu(1, (const struct cpumask *)dst1)) + /* Should never happen. */ + goto release_exit; + + bpf_cpumask_xor(dst2, (const struct cpumask *)mask1, (const struct cpumask *)mask2); + if (!bpf_cpumask_equal((const struct cpumask *)dst1, + (const struct cpumask *)dst2)) + /* Should never happen. */ + goto release_exit; + + release_exit: + bpf_cpumask_release(mask1); + bpf_cpumask_release(mask2); + bpf_cpumask_release(dst1); + bpf_cpumask_release(dst2); + return 0; + } + +---- + +The contents of an entire cpumask may be copied to another using +bpf_cpumask_copy(): + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_copy + +---- + +.. _cpumasks-querying-label: + +3.2 Querying cpumasks +--------------------- + +In addition to the above kfuncs, there is also a set of read-only kfuncs that +can be used to query the contents of cpumasks. + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_first bpf_cpumask_first_zero bpf_cpumask_test_cpu + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_equal bpf_cpumask_intersects bpf_cpumask_subset + bpf_cpumask_empty bpf_cpumask_full + +.. kernel-doc:: kernel/bpf/cpumask.c + :identifiers: bpf_cpumask_any bpf_cpumask_any_and + +---- + +Some example usages of these querying kfuncs were shown above. We will not +replicate those exmaples here. Note, however, that all of the aforementioned +kfuncs are tested in `tools/testing/selftests/bpf/progs/cpumask_success.c`_, so +please take a look there if you're looking for more examples of how they can be +used. + +.. _tools/testing/selftests/bpf/progs/cpumask_success.c: + https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/tools/testing/selftests/bpf/progs/cpumask_success.c + + +4. Adding BPF cpumask kfuncs +============================ + +The set of supported BPF cpumask kfuncs are not (yet) a 1-1 match with the +cpumask operations in include/linux/cpumask.h. Any of those cpumask operations +could easily be encapsulated in a new kfunc if and when required. If you'd like +to support a new cpumask operation, please feel free to submit a patch. If you +do add a new cpumask kfunc, please document it here, and add any relevant +selftest testcases to the cpumask selftest suite. diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index b81533d8b061..dbb39e8f9889 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -20,6 +20,7 @@ that goes into great technical depth about the BPF Architecture. syscall_api helpers kfuncs + cpumasks programs maps bpf_prog_run diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 9fd7fb539f85..a74f9e74087b 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -1,3 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. _kfuncs-header-label: + ============================= BPF Kernel Functions (kfuncs) ============================= @@ -420,3 +424,10 @@ the verifier. bpf_cgroup_ancestor() can be used as follows: bpf_cgroup_release(parent); return 0; } + +3.3 struct cpumask * kfuncs +--------------------------- + +BPF provides a set of kfuncs that can be used to query, allocate, mutate, and +destroy struct cpumask * objects. Please refer to :ref:`cpumasks-header-label` +for more details. diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 409ae2447a9b..25355a0a367a 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -38,6 +38,16 @@ __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global kfuncs as their definitions will be in BTF"); +/** + * bpf_cpumask_create() - Create a mutable BPF cpumask. + * + * Allocates a cpumask that can be queried, mutated, acquired, and released by + * a BPF program. The cpumask returned by this function must either be embedded + * in a map as a kptr, or freed with bpf_cpumask_release(). + * + * bpf_cpumask_create() allocates memory using the BPF memory allocator, and + * will not block. It may return NULL if no memory is available. + */ struct bpf_cpumask *bpf_cpumask_create(void) { struct bpf_cpumask *cpumask; @@ -52,12 +62,31 @@ struct bpf_cpumask *bpf_cpumask_create(void) return cpumask; } +/** + * bpf_cpumask_acquire() - Acquire a reference to a BPF cpumask. + * @cpumask: The BPF cpumask being acquired. The cpumask must be a trusted + * pointer. + * + * Acquires a reference to a BPF cpumask. The cpumask returned by this function + * must either be embedded in a map as a kptr, or freed with + * bpf_cpumask_release(). + */ struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) { refcount_inc(&cpumask->usage); return cpumask; } +/** + * bpf_cpumask_kptr_get() - Attempt to acquire a reference to a BPF cpumask + * stored in a map. + * @cpumaskp: A pointer to a BPF cpumask map value. + * + * Attempts to acquire a reference to a BPF cpumask stored in a map value. The + * cpumask returned by this function must either be embedded in a map as a + * kptr, or freed with bpf_cpumask_release(). This function may return NULL if + * no BPF cpumask was found in the specified map value. + */ struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) { struct bpf_cpumask *cpumask; @@ -76,6 +105,14 @@ struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) return cpumask; } +/** + * bpf_cpumask_release() - Release a previously acquired BPF cpumask. + * @cpumask: The cpumask being released. + * + * Releases a previously acquired reference to a BPF cpumask. When the final + * reference of the BPF cpumask has been released, it is subsequently freed in + * an RCU callback in the BPF memory allocator. + */ void bpf_cpumask_release(struct bpf_cpumask *cpumask) { if (!cpumask) @@ -88,16 +125,36 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask) } } +/** + * bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask. + * @cpumask: The cpumask being queried. + * + * Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask + * pointer may be safely passed to this function. + */ u32 bpf_cpumask_first(const struct cpumask *cpumask) { return cpumask_first(cpumask); } +/** + * bpf_cpumask_first_zero() - Get the index of the first unset bit in the + * cpumask. + * @cpumask: The cpumask being queried. + * + * Find the index of the first unset bit of the cpumask. A struct bpf_cpumask + * pointer may be safely passed to this function. + */ u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) { return cpumask_first_zero(cpumask); } +/** + * bpf_cpumask_set_cpu() - Set a bit for a CPU in a BPF cpumask. + * @cpu: The CPU to be set in the cpumask. + * @cpumask: The BPF cpumask in which a bit is being set. + */ void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) @@ -106,6 +163,11 @@ void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) cpumask_set_cpu(cpu, (struct cpumask *)cpumask); } +/** + * bpf_cpumask_clear_cpu() - Clear a bit for a CPU in a BPF cpumask. + * @cpu: The CPU to be cleared from the cpumask. + * @cpumask: The BPF cpumask in which a bit is being cleared. + */ void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) @@ -114,6 +176,15 @@ void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) cpumask_clear_cpu(cpu, (struct cpumask *)cpumask); } +/** + * bpf_cpumask_test_cpu() - Test whether a CPU is set in a cpumask. + * @cpu: The CPU being queried for. + * @cpumask: The cpumask being queried for containing a CPU. + * + * Return: + * * true - @cpu is set in the cpumask + * * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu. + */ bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) { if (!cpu_valid(cpu)) @@ -122,6 +193,15 @@ bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) return cpumask_test_cpu(cpu, (struct cpumask *)cpumask); } +/** + * bpf_cpumask_test_and_set_cpu() - Atomically test and set a CPU in a BPF cpumask. + * @cpu: The CPU being set and queried for. + * @cpumask: The BPF cpumask being set and queried for containing a CPU. + * + * Return: + * * true - @cpu is set in the cpumask + * * false - @cpu was not set in the cpumask, or @cpu is invalid. + */ bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) @@ -130,6 +210,16 @@ bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask); } +/** + * bpf_cpumask_test_and_clear_cpu() - Atomically test and clear a CPU in a BPF + * cpumask. + * @cpu: The CPU being cleared and queried for. + * @cpumask: The BPF cpumask being cleared and queried for containing a CPU. + * + * Return: + * * true - @cpu is set in the cpumask + * * false - @cpu was not set in the cpumask, or @cpu is invalid. + */ bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) @@ -138,16 +228,36 @@ bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask); } +/** + * bpf_cpumask_setall() - Set all of the bits in a BPF cpumask. + * @cpumask: The BPF cpumask having all of its bits set. + */ void bpf_cpumask_setall(struct bpf_cpumask *cpumask) { cpumask_setall((struct cpumask *)cpumask); } +/** + * bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask. + * @cpumask: The BPF cpumask being cleared. + */ void bpf_cpumask_clear(struct bpf_cpumask *cpumask) { cpumask_clear((struct cpumask *)cpumask); } +/** + * bpf_cpumask_and() - AND two cpumasks and store the result. + * @dst: The BPF cpumask where the result is being stored. + * @src1: The first input. + * @src2: The second input. + * + * Return: + * * true - @dst has at least one bit set following the operation + * * false - @dst is empty following the operation + * + * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. + */ bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, const struct cpumask *src2) @@ -155,6 +265,14 @@ bool bpf_cpumask_and(struct bpf_cpumask *dst, return cpumask_and((struct cpumask *)dst, src1, src2); } +/** + * bpf_cpumask_or() - OR two cpumasks and store the result. + * @dst: The BPF cpumask where the result is being stored. + * @src1: The first input. + * @src2: The second input. + * + * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. + */ void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1, const struct cpumask *src2) @@ -162,6 +280,14 @@ void bpf_cpumask_or(struct bpf_cpumask *dst, cpumask_or((struct cpumask *)dst, src1, src2); } +/** + * bpf_cpumask_xor() - XOR two cpumasks and store the result. + * @dst: The BPF cpumask where the result is being stored. + * @src1: The first input. + * @src2: The second input. + * + * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. + */ void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1, const struct cpumask *src2) @@ -169,41 +295,123 @@ void bpf_cpumask_xor(struct bpf_cpumask *dst, cpumask_xor((struct cpumask *)dst, src1, src2); } +/** + * bpf_cpumask_equal() - Check two cpumasks for equality. + * @src1: The first input. + * @src2: The second input. + * + * Return: + * * true - @src1 and @src2 have the same bits set. + * * false - @src1 and @src2 differ in at least one bit. + * + * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. + */ bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_equal(src1, src2); } +/** + * bpf_cpumask_intersects() - Check two cpumasks for overlap. + * @src1: The first input. + * @src2: The second input. + * + * Return: + * * true - @src1 and @src2 have at least one of the same bits set. + * * false - @src1 and @src2 don't have any of the same bits set. + * + * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. + */ bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_intersects(src1, src2); } +/** + * bpf_cpumask_subset() - Check if a cpumask is a subset of another. + * @src1: The first cpumask being checked as a subset. + * @src2: The second cpumask being checked as a superset. + * + * Return: + * * true - All of the bits of @src1 are set in @src2. + * * false - At least one bit in @src1 is not set in @src2. + * + * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. + */ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_subset(src1, src2); } +/** + * bpf_cpumask_empty() - Check if a cpumask is empty. + * @cpumask: The cpumask being checked. + * + * Return: + * * true - None of the bits in @cpumask are set. + * * false - At least one bit in @cpumask is set. + * + * A struct bpf_cpumask pointer may be safely passed to @cpumask. + */ bool bpf_cpumask_empty(const struct cpumask *cpumask) { return cpumask_empty(cpumask); } +/** + * bpf_cpumask_full() - Check if a cpumask has all bits set. + * @cpumask: The cpumask being checked. + * + * Return: + * * true - All of the bits in @cpumask are set. + * * false - At least one bit in @cpumask is cleared. + * + * A struct bpf_cpumask pointer may be safely passed to @cpumask. + */ bool bpf_cpumask_full(const struct cpumask *cpumask) { return cpumask_full(cpumask); } +/** + * bpf_cpumask_copy() - Copy the contents of a cpumask into a BPF cpumask. + * @dst: The BPF cpumask being copied into. + * @src: The cpumask being copied. + * + * A struct bpf_cpumask pointer may be safely passed to @src. + */ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) { cpumask_copy((struct cpumask *)dst, src); } +/** + * bpf_cpumask_any() - Return a random set CPU from a cpumask. + * @cpumask: The cpumask being queried. + * + * Return: + * * A random set bit within [0, num_cpus) if at least one bit is set. + * * >= num_cpus if no bit is set. + * + * A struct bpf_cpumask pointer may be safely passed to @src. + */ u32 bpf_cpumask_any(const struct cpumask *cpumask) { return cpumask_any(cpumask); } +/** + * bpf_cpumask_any_and() - Return a random set CPU from the AND of two + * cpumasks. + * @src1: The first cpumask. + * @src2: The second cpumask. + * + * Return: + * * A random set bit within [0, num_cpus) if at least one bit is set. + * * >= num_cpus if no bit is set. + * + * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. + */ u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_any_and(src1, src2); -- cgit v1.2.3-58-ga151 From 1e12d3ef47d228e4e7d30f9bc5e6744ede90319c Mon Sep 17 00:00:00 2001 From: David Vernet Date: Wed, 25 Jan 2023 10:47:32 -0600 Subject: bpf: Allow BPF_PROG_TYPE_STRUCT_OPS programs to be sleepable BPF struct_ops programs currently cannot be marked as sleepable. This need not be the case -- struct_ops programs can be sleepable, and e.g. invoke kfuncs that export the KF_SLEEPABLE flag. So as to allow future struct_ops programs to invoke such kfuncs, this patch updates the verifier to allow struct_ops programs to be sleepable. A follow-on patch will add support to libbpf for specifying struct_ops.s as a sleepable struct_ops program, and then another patch will add testcases to the dummy_st_ops selftest suite which test sleepable struct_ops behavior. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230125164735.785732-2-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 5 +++-- tools/testing/selftests/bpf/verifier/sleepable.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bb38b01b738f..c8907df49f81 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -17114,7 +17114,8 @@ static bool can_be_sleepable(struct bpf_prog *prog) } } return prog->type == BPF_PROG_TYPE_LSM || - prog->type == BPF_PROG_TYPE_KPROBE; /* only for uprobes */ + prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || + prog->type == BPF_PROG_TYPE_STRUCT_OPS; } static int check_attach_btf_id(struct bpf_verifier_env *env) @@ -17136,7 +17137,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) } if (prog->aux->sleepable && !can_be_sleepable(prog)) { - verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter and uprobe programs can be sleepable\n"); + verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); return -EINVAL; } diff --git a/tools/testing/selftests/bpf/verifier/sleepable.c b/tools/testing/selftests/bpf/verifier/sleepable.c index bea0daef908a..1f0d2bdc673f 100644 --- a/tools/testing/selftests/bpf/verifier/sleepable.c +++ b/tools/testing/selftests/bpf/verifier/sleepable.c @@ -85,7 +85,7 @@ .expected_attach_type = BPF_TRACE_RAW_TP, .kfunc = "sched_switch", .result = REJECT, - .errstr = "Only fentry/fexit/fmod_ret, lsm, iter and uprobe programs can be sleepable", + .errstr = "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable", .flags = BPF_F_SLEEPABLE, .runs = -1, }, -- cgit v1.2.3-58-ga151 From 51a52a29ebaa8395de090fa415c6e1b2899a50f1 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Wed, 25 Jan 2023 10:47:34 -0600 Subject: bpf: Pass const struct bpf_prog * to .check_member The .check_member field of struct bpf_struct_ops is currently passed the member's btf_type via const struct btf_type *t, and a const struct btf_member *member. This allows the struct_ops implementation to check whether e.g. an ops is supported, but it would be useful to also enforce that the struct_ops prog being loaded for that member has other qualities, like being sleepable (or not). This patch therefore updates the .check_member() callback to also take a const struct bpf_prog *prog argument. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230125164735.785732-4-void@manifault.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 3 ++- kernel/bpf/verifier.c | 2 +- net/ipv4/bpf_tcp_ca.c | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel/bpf') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1bec48d9e5d9..0d868ef1b973 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1422,7 +1422,8 @@ struct bpf_struct_ops { const struct bpf_verifier_ops *verifier_ops; int (*init)(struct btf *btf); int (*check_member)(const struct btf_type *t, - const struct btf_member *member); + const struct btf_member *member, + const struct bpf_prog *prog); int (*init_member)(const struct btf_type *t, const struct btf_member *member, void *kdata, const void *udata); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c8907df49f81..6bd097e0d45f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16792,7 +16792,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) } if (st_ops->check_member) { - int err = st_ops->check_member(t, member); + int err = st_ops->check_member(t, member, prog); if (err) { verbose(env, "attach to unsupported member %s of struct %s\n", diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 4517d2bd186a..13fc0c185cd9 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -248,7 +248,8 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, } static int bpf_tcp_ca_check_member(const struct btf_type *t, - const struct btf_member *member) + const struct btf_member *member, + const struct bpf_prog *prog) { if (is_unsupported(__btf_member_bit_offset(t, member) / 8)) return -ENOTSUPP; -- cgit v1.2.3-58-ga151