diff options
| author | Alexei Starovoitov <ast@kernel.org> | 2026-04-12 08:29:32 -0700 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2026-04-12 12:36:45 -0700 |
| commit | f8a8faceab9953ed074cd4125b31cc6a562237d8 (patch) | |
| tree | d33d7e95e82c4fdab42e6aff2fc38d43006815ce /include | |
| parent | fc150cddeea77561fbc94ac8f02cc75b016b09dd (diff) | |
| download | lwn-f8a8faceab9953ed074cd4125b31cc6a562237d8.tar.gz lwn-f8a8faceab9953ed074cd4125b31cc6a562237d8.zip | |
bpf: Move check_cfg() into cfg.c
verifier.c is huge. Move check_cfg(), compute_postorder(),
compute_scc() into cfg.c
Mechanical move. No functional changes.
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/r/20260412152936.54262-4-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/bpf_verifier.h | 115 |
1 files changed, 114 insertions, 1 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e3f18667e030..aa92a597bc5c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -983,6 +983,41 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env, bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \ }) +static inline void mark_prune_point(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].prune_point = true; +} + +static inline bool bpf_is_prune_point(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].prune_point; +} + +static inline void mark_force_checkpoint(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].force_checkpoint = true; +} + +static inline bool bpf_is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].force_checkpoint; +} + +static inline void mark_calls_callback(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].calls_callback = true; +} + +static inline bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].calls_callback; +} + +static inline void mark_jmp_point(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].jmp_point = true; +} + static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; @@ -1179,13 +1214,91 @@ struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *en int bpf_jmp_offset(struct bpf_insn *insn); struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx); void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask); -bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx); bool bpf_subprog_is_global(const struct bpf_verifier_env *env, int subprog); int bpf_find_subprog(struct bpf_verifier_env *env, int off); int bpf_compute_const_regs(struct bpf_verifier_env *env); int bpf_prune_dead_branches(struct bpf_verifier_env *env); +int bpf_check_cfg(struct bpf_verifier_env *env); int bpf_compute_postorder(struct bpf_verifier_env *env); +int bpf_compute_scc(struct bpf_verifier_env *env); + +struct bpf_map_desc { + struct bpf_map *ptr; + int uid; +}; + +struct bpf_kfunc_call_arg_meta { + /* In parameters */ + struct btf *btf; + u32 func_id; + u32 kfunc_flags; + const struct btf_type *func_proto; + const char *func_name; + /* Out parameters */ + u32 ref_obj_id; + u8 release_regno; + bool r0_rdonly; + u32 ret_btf_id; + u64 r0_size; + u32 subprogno; + struct { + u64 value; + bool found; + } arg_constant; + + /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling, + * generally to pass info about user-defined local kptr types to later + * verification logic + * bpf_obj_drop/bpf_percpu_obj_drop + * Record the local kptr type to be drop'd + * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type) + * Record the local kptr type to be refcount_incr'd and use + * arg_owning_ref to determine whether refcount_acquire should be + * fallible + */ + struct btf *arg_btf; + u32 arg_btf_id; + bool arg_owning_ref; + bool arg_prog; + + struct { + struct btf_field *field; + } arg_list_head; + struct { + struct btf_field *field; + } arg_rbtree_root; + struct { + enum bpf_dynptr_type type; + u32 id; + u32 ref_obj_id; + } initialized_dynptr; + struct { + u8 spi; + u8 frameno; + } iter; + struct bpf_map_desc map; + u64 mem_size; +}; + +int bpf_get_helper_proto(struct bpf_verifier_env *env, int func_id, + const struct bpf_func_proto **ptr); +int bpf_fetch_kfunc_arg_meta(struct bpf_verifier_env *env, s32 func_id, + s16 offset, struct bpf_kfunc_call_arg_meta *meta); +bool bpf_is_async_callback_calling_insn(struct bpf_insn *insn); +bool bpf_is_sync_callback_calling_insn(struct bpf_insn *insn); +static inline bool bpf_is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta) +{ + return meta->kfunc_flags & KF_ITER_NEXT; +} + +static inline bool bpf_is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) +{ + return meta->kfunc_flags & KF_SLEEPABLE; +} +bool bpf_is_kfunc_pkt_changing(struct bpf_kfunc_call_arg_meta *meta); +struct bpf_iarray *bpf_iarray_realloc(struct bpf_iarray *old, size_t n_elem); +int bpf_copy_insn_array_uniq(struct bpf_map *map, u32 start, u32 end, u32 *off); bool bpf_insn_is_cond_jump(u8 code); bool bpf_is_may_goto_insn(struct bpf_insn *insn); |
