summaryrefslogtreecommitdiff
path: root/include/linux/bpf_verifier.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf_verifier.h')
-rw-r--r--include/linux/bpf_verifier.h51
1 files changed, 39 insertions, 12 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 32c23f2a3086..9734544b6957 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -115,6 +115,14 @@ struct bpf_reg_state {
int depth:30;
} iter;
+ /* For irq stack slots */
+ struct {
+ enum {
+ IRQ_NATIVE_KFUNC,
+ IRQ_LOCK_KFUNC,
+ } kfunc_class;
+ } irq;
+
/* Max size from any of the above. */
struct {
unsigned long raw1;
@@ -255,9 +263,12 @@ struct bpf_reference_state {
* default to pointer reference on zero initialization of a state.
*/
enum ref_state_type {
- REF_TYPE_PTR = 1,
- REF_TYPE_IRQ = 2,
- REF_TYPE_LOCK = 3,
+ REF_TYPE_PTR = (1 << 1),
+ REF_TYPE_IRQ = (1 << 2),
+ REF_TYPE_LOCK = (1 << 3),
+ REF_TYPE_RES_LOCK = (1 << 4),
+ REF_TYPE_RES_LOCK_IRQ = (1 << 5),
+ REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
} type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
@@ -424,14 +435,11 @@ struct bpf_verifier_state {
u32 active_locks;
u32 active_preempt_locks;
u32 active_irq_id;
+ u32 active_lock_id;
+ void *active_lock_ptr;
bool active_rcu_lock;
bool speculative;
- /* If this state was ever pointed-to by other state's loop_entry field
- * this flag would be set to true. Used to avoid freeing such states
- * while they are still in use.
- */
- bool used_as_loop_entry;
bool in_sleepable;
/* first and last insn idx of this verifier state */
@@ -458,6 +466,11 @@ struct bpf_verifier_state {
u32 dfs_depth;
u32 callback_unroll_depth;
u32 may_goto_depth;
+ /* If this state was ever pointed-to by other state's loop_entry field
+ * this flag would be set to true. Used to avoid freeing such states
+ * while they are still in use.
+ */
+ u32 used_as_loop_entry;
};
#define bpf_get_spilled_reg(slot, frame, mask) \
@@ -498,8 +511,10 @@ struct bpf_verifier_state {
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
- struct bpf_verifier_state_list *next;
- int miss_cnt, hit_cnt;
+ struct list_head node;
+ u32 miss_cnt;
+ u32 hit_cnt:31;
+ u32 in_free_list:1;
};
struct bpf_loop_inline_state {
@@ -589,6 +604,8 @@ struct bpf_insn_aux_data {
* accepts callback function as a parameter.
*/
bool calls_callback;
+ /* registers alive before this instruction. */
+ u16 live_regs_before;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -665,6 +682,7 @@ struct bpf_subprog_info {
/* true if bpf_fastcall stack region is used by functions that can't be inlined */
bool keep_fastcall_stack: 1;
bool changes_pkt_data: 1;
+ bool might_sleep: 1;
enum priv_stack_mode priv_stack_mode;
u8 arg_cnt;
@@ -710,8 +728,11 @@ struct bpf_verifier_env {
bool test_state_freq; /* test verifier with different pruning frequency */
bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
- struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- struct bpf_verifier_state_list *free_list;
+ /* Search pruning optimization, array of list_heads for
+ * lists of struct bpf_verifier_state_list.
+ */
+ struct list_head *explored_states;
+ struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
@@ -742,7 +763,11 @@ struct bpf_verifier_env {
struct {
int *insn_state;
int *insn_stack;
+ /* vector of instruction indexes sorted in post-order */
+ int *insn_postorder;
int cur_stack;
+ /* current position in the insn_postorder vector */
+ int cur_postorder;
} cfg;
struct backtrack_state bt;
struct bpf_insn_hist_entry *insn_hist;
@@ -767,6 +792,8 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
+ u32 free_list_size;
+ u32 explored_states_size;
bpfptr_t fd_array;
/* bit mask to keep track of whether a register has been accessed