summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-04-09 14:56:24 +0000
committerJakub Kicinski <kuba@kernel.org>2026-04-12 14:30:25 -0700
commitfb37aea2a00e67ef5264ea39371d350a1d19b24f (patch)
treeb32c696c4b12e594ca801c925f0382abc5cc4242 /net
parent97449a5f1a586d2befde5297b0fcb0bfdade774e (diff)
downloadlwn-fb37aea2a00e67ef5264ea39371d350a1d19b24f.tar.gz
lwn-fb37aea2a00e67ef5264ea39371d350a1d19b24f.zip
net: change sk_filter_trim_cap() to return a drop_reason by value
Current return value can be replaced with the drop_reason, reducing kernel bloat: $ scripts/bloat-o-meter -t vmlinux.old vmlinux.new add/remove: 0/2 grow/shrink: 1/11 up/down: 32/-603 (-571) Function old new delta tcp_v6_rcv 3135 3167 +32 unix_dgram_sendmsg 1731 1726 -5 netlink_unicast 957 945 -12 netlink_dump 1372 1359 -13 sk_filter_trim_cap 882 858 -24 tcp_v4_rcv 3143 3111 -32 __pfx_tcp_filter 32 - -32 netlink_broadcast_filtered 1633 1595 -38 sock_queue_rcv_skb_reason 126 76 -50 tun_net_xmit 1127 1074 -53 __sk_receive_skb 690 632 -58 udpv6_queue_rcv_one_skb 935 869 -66 udp_queue_rcv_one_skb 919 853 -66 tcp_filter 154 - -154 Total: Before=29722783, After=29722212, chg -0.00% Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20260409145625.2306224-6-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c31
-rw-r--r--net/core/sock.c5
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/rose/rose_in.c3
5 files changed, 22 insertions, 23 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 5569d83b8be0..bf9c37b27646 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -121,20 +121,20 @@ EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
* @cap: limit on how short the eBPF program may trim the packet
- * @reason: record drop reason
*
* Run the eBPF program and then cut skb->data to correct size returned by
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
* than pkt_len we keep whole skb->data. This is the socket level
* wrapper to bpf_prog_run. It returns 0 if the packet should
- * be accepted or -EPERM if the packet should be tossed.
+ * be accepted or a drop_reason if the packet should be tossed.
*
*/
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
- unsigned int cap, enum skb_drop_reason *reason)
+enum skb_drop_reason
+sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
- int err;
+ enum skb_drop_reason drop_reason;
struct sk_filter *filter;
+ int err;
/*
* If the skb was allocated from pfmemalloc reserves, only
@@ -143,21 +143,17 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
*/
if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
- *reason = SKB_DROP_REASON_PFMEMALLOC;
- return -ENOMEM;
+ return SKB_DROP_REASON_PFMEMALLOC;
}
err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
- if (err) {
- *reason = SKB_DROP_REASON_SOCKET_FILTER;
- return err;
- }
+ if (err)
+ return SKB_DROP_REASON_SOCKET_FILTER;
err = security_sock_rcv_skb(sk, skb);
- if (err) {
- *reason = SKB_DROP_REASON_SECURITY_HOOK;
- return err;
- }
+ if (err)
+ return SKB_DROP_REASON_SECURITY_HOOK;
+ drop_reason = 0;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter) {
@@ -168,11 +164,12 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
skb->sk = save_sk;
err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
+ if (err)
+ drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
}
rcu_read_unlock();
- *reason = err ? SKB_DROP_REASON_SOCKET_FILTER : 0;
- return err;
+ return drop_reason;
}
EXPORT_SYMBOL(sk_filter_trim_cap);
diff --git a/net/core/sock.c b/net/core/sock.c
index 1ffcb15d0fc5..367fd7bad4ac 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -544,11 +544,12 @@ EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested, unsigned int trim_cap, bool refcounted)
{
- enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ enum skb_drop_reason reason;
int rc = NET_RX_SUCCESS;
int err;
- if (sk_filter_trim_cap(sk, skb, trim_cap, &reason))
+ reason = sk_filter_trim_cap(sk, skb, trim_cap);
+ if (reason)
goto discard_and_relse;
skb->dev = NULL;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ab415de32443..2fddc7b6b717 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2392,7 +2392,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
+ drop_reason = sk_filter_trim_cap(sk, skb, sizeof(struct udphdr));
+ if (drop_reason)
goto drop;
udp_csum_pull_header(skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d7cf4c9508b2..3fac9cb47ae0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -853,7 +853,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
+ drop_reason = sk_filter_trim_cap(sk, skb, sizeof(struct udphdr));
+ if (drop_reason)
goto drop;
udp_csum_pull_header(skb);
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 0276b393f0e5..3aff3c2d45a9 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -101,7 +101,6 @@ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int framety
*/
static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
{
- enum skb_drop_reason dr; /* ignored */
struct rose_sock *rose = rose_sk(sk);
int queued = 0;
@@ -163,7 +162,7 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (!sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN, &dr) &&
+ if (!sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) &&
__sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;