summaryrefslogtreecommitdiff
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2017-09-19 12:11:43 +0200
committerDavid S. Miller <davem@davemloft.net>2017-09-20 14:28:52 -0700
commit0d4a6608f68c7532dcbfec2ea1150c9761767d03 (patch)
tree26373be5bf2c3173f3eae9b0cbe69d5a083c1c0a /net/ipv4/udp.c
parent186b3c998c50fc241b51b905081c748455d16b4a (diff)
downloadlwn-0d4a6608f68c7532dcbfec2ea1150c9761767d03.tar.gz
lwn-0d4a6608f68c7532dcbfec2ea1150c9761767d03.zip
udp: do rmem bulk free even if the rx sk queue is empty
The commit 6b229cf77d68 ("udp: add batching to udp_rmem_release()") reduced greatly the cacheline contention between the BH and the US reader batching the rmem updates in most scenarios. Such optimization is explicitly avoided if the US reader is faster then BH processing. My fault, I initially suggested this kind of behavior due to concerns of possible regressions with small sk_rcvbuf values. Tests showed such concerns are misplaced, so this commit relaxes the condition for rmem bulk updates, obtaining small but measurable performance gain in the scenario described above. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ef29df8648e4..784ced0b9150 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1212,8 +1212,7 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
if (likely(partial)) {
up->forward_deficit += size;
size = up->forward_deficit;
- if (size < (sk->sk_rcvbuf >> 2) &&
- !skb_queue_empty(&up->reader_queue))
+ if (size < (sk->sk_rcvbuf >> 2))
return;
} else {
size += up->forward_deficit;