summaryrefslogtreecommitdiff
path: root/kernel/bpf/verifier.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2020-09-24 11:45:06 -0700
committerAlexei Starovoitov <ast@kernel.org>2020-09-25 16:47:21 -0700
commit4fbb38a3b2cc7bf4340fe9bd9c73622760adc19d (patch)
tree1e4be30e2794b80015074db7f3753054a11940f8 /kernel/bpf/verifier.c
parent84085f877295b6b12d95262ec4082447214b09be (diff)
downloadlwn-4fbb38a3b2cc7bf4340fe9bd9c73622760adc19d.tar.gz
lwn-4fbb38a3b2cc7bf4340fe9bd9c73622760adc19d.zip
bpf, verifier: Remove redundant var_off.value ops in scalar known reg cases
In BPF_AND and BPF_OR alu cases we have this pattern when the src and dst tnum is a constant. 1 dst_reg->var_off = tnum_[op](dst_reg->var_off, src_reg.var_off) 2 scalar32_min_max_[op] 3 if (known) return 4 scalar_min_max_[op] 5 if (known) 6 __mark_reg_known(dst_reg, dst_reg->var_off.value [op] src_reg.var_off.value) The result is in 1 we calculate the var_off value and store it in the dst_reg. Then in 6 we duplicate this logic doing the op again on the value. The duplication comes from the the tnum_[op] handlers because they have already done the value calcuation. For example this is tnum_and(). struct tnum tnum_and(struct tnum a, struct tnum b) { u64 alpha, beta, v; alpha = a.value | a.mask; beta = b.value | b.mask; v = a.value & b.value; return TNUM(v, alpha & beta & ~v); } So lets remove the redundant op calculation. Its confusing for readers and unnecessary. Its also not harmful because those ops have the property, r1 & r1 = r1 and r1 | r1 = r1. Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r--kernel/bpf/verifier.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d4ba29fb17a6..b25ba989c2dc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5849,8 +5849,7 @@ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
u64 umax_val = src_reg->umax_value;
if (src_known && dst_known) {
- __mark_reg_known(dst_reg, dst_reg->var_off.value &
- src_reg->var_off.value);
+ __mark_reg_known(dst_reg, dst_reg->var_off.value);
return;
}
@@ -5920,8 +5919,7 @@ static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
u64 umin_val = src_reg->umin_value;
if (src_known && dst_known) {
- __mark_reg_known(dst_reg, dst_reg->var_off.value |
- src_reg->var_off.value);
+ __mark_reg_known(dst_reg, dst_reg->var_off.value);
return;
}