diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-01-24 15:21:02 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-01-24 15:21:02 -0800 |
commit | dbcb5855d108b7fa20ab42567a5412ce9dcd776a (patch) | |
tree | b1c165e43ef28d9e22c118e8cbb983431ace3c7a | |
parent | 6640e69731b42fd5e3d2b26201c8b34fc897a0ee (diff) | |
download | lwn-dbcb5855d108b7fa20ab42567a5412ce9dcd776a.tar.gz lwn-dbcb5855d108b7fa20ab42567a5412ce9dcd776a.zip |
[AF_PACKET]: Fix BPF handling.
This fixes a bug introduced by:
commit fda9ef5d679b07c9d9097aaf6ef7f069d794a8f9
Author: Dmitry Mishin <dim@openvz.org>
Date: Thu Aug 31 15:28:39 2006 -0700
[NET]: Fix sk->sk_filter field access
sk_run_filter() returns either 0 or an unsigned 32-bit
length which says how much of the packet to retain.
If that 32-bit unsigned integer is larger than the packet,
this is fine we just leave the packet unchanged.
The above commit caused all filter return values which
were negative when interpreted as a signed integer to
indicate a packet drop, which is wrong.
Based upon a report and initial patch by Raivis Bucis.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/packet/af_packet.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index da73e8a8c18d..594c078c5ebc 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -428,24 +428,18 @@ out_unlock: } #endif -static inline int run_filter(struct sk_buff *skb, struct sock *sk, - unsigned *snaplen) +static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, + unsigned int res) { struct sk_filter *filter; - int err = 0; rcu_read_lock_bh(); filter = rcu_dereference(sk->sk_filter); - if (filter != NULL) { - err = sk_run_filter(skb, filter->insns, filter->len); - if (!err) - err = -EPERM; - else if (*snaplen > err) - *snaplen = err; - } + if (filter != NULL) + res = sk_run_filter(skb, filter->insns, filter->len); rcu_read_unlock_bh(); - return err; + return res; } /* @@ -467,7 +461,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet struct packet_sock *po; u8 * skb_head = skb->data; int skb_len = skb->len; - unsigned snaplen; + unsigned int snaplen, res; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; @@ -495,8 +489,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet snaplen = skb->len; - if (run_filter(skb, sk, &snaplen) < 0) + res = run_filter(skb, sk, snaplen); + if (!res) goto drop_n_restore; + if (snaplen > res) + snaplen = res; if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf) @@ -568,7 +565,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe struct tpacket_hdr *h; u8 * skb_head = skb->data; int skb_len = skb->len; - unsigned snaplen; + unsigned int snaplen, res; unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; unsigned short macoff, netoff; struct sk_buff *copy_skb = NULL; @@ -592,8 +589,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe snaplen = skb->len; - if (run_filter(skb, sk, &snaplen) < 0) + res = run_filter(skb, sk, snaplen); + if (!res) goto drop_n_restore; + if (snaplen > res) + snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16; |