diff options
author | David S. Miller <davem@davemloft.net> | 2023-06-07 10:09:05 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-06-07 10:09:05 +0100 |
commit | e3144ff52f7d2c884eef352cec9b9ff9acd2eb2f (patch) | |
tree | a5898632fd2ce3dd1f7dfca691a0a1f6dfdf1231 | |
parent | ab39b113e74751958aac1b125a14ee42bd7d3efd (diff) | |
parent | 5c3b74a92aa285a3df722bf6329ba7ccf70346d6 (diff) | |
download | lwn-e3144ff52f7d2c884eef352cec9b9ff9acd2eb2f.tar.gz lwn-e3144ff52f7d2c884eef352cec9b9ff9acd2eb2f.zip |
Merge branch 'rfs-lockless-annotate'
Eric Dumazet says:
====================
rfs: annotate lockless accesses
rfs runs without locks held, so we should annotate
read and writes to shared variables.
It should prevent compilers forcing writes
in the following situation:
if (var != val)
var = val;
A compiler could indeed simply avoid the conditional:
var = val;
This matters if var is shared between many cpus.
v2: aligns one closing bracket (Simon)
adds Fixes: tags (Jakub)
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 7 | ||||
-rw-r--r-- | include/net/sock.h | 18 | ||||
-rw-r--r-- | net/core/dev.c | 6 |
3 files changed, 22 insertions, 9 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 08fbd4622ccf..e6f22b7403d0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -768,8 +768,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, /* We only give a hint, preemption can change CPU under us */ val |= raw_smp_processor_id(); - if (table->ents[index] != val) - table->ents[index] = val; + /* The following WRITE_ONCE() is paired with the READ_ONCE() + * here, and another one in get_rps_cpu(). + */ + if (READ_ONCE(table->ents[index]) != val) + WRITE_ONCE(table->ents[index], val); } } diff --git a/include/net/sock.h b/include/net/sock.h index b418425d7230..6f428a7f3567 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1152,8 +1152,12 @@ static inline void sock_rps_record_flow(const struct sock *sk) * OR an additional socket flag * [1] : sk_state and sk_prot are in the same cache line. */ - if (sk->sk_state == TCP_ESTABLISHED) - sock_rps_record_flow_hash(sk->sk_rxhash); + if (sk->sk_state == TCP_ESTABLISHED) { + /* This READ_ONCE() is paired with the WRITE_ONCE() + * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). + */ + sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); + } } #endif } @@ -1162,15 +1166,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_RPS - if (unlikely(sk->sk_rxhash != skb->hash)) - sk->sk_rxhash = skb->hash; + /* The following WRITE_ONCE() is paired with the READ_ONCE() + * here, and another one in sock_rps_record_flow(). + */ + if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) + WRITE_ONCE(sk->sk_rxhash, skb->hash); #endif } static inline void sock_rps_reset_rxhash(struct sock *sk) { #ifdef CONFIG_RPS - sk->sk_rxhash = 0; + /* Paired with READ_ONCE() in sock_rps_record_flow() */ + WRITE_ONCE(sk->sk_rxhash, 0); #endif } diff --git a/net/core/dev.c b/net/core/dev.c index b3c13e041935..1495f8aff288 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, u32 next_cpu; u32 ident; - /* First check into global flow table if there is a match */ - ident = sock_flow_table->ents[hash & sock_flow_table->mask]; + /* First check into global flow table if there is a match. + * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). + */ + ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); if ((ident ^ hash) & ~rps_cpu_mask) goto try_rps; |