summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStanislaw Gruszka <sgruszka@redhat.com>2011-03-25 01:21:51 +0000
committerDavid S. Miller <davem@davemloft.net>2011-03-27 23:35:03 -0700
commitb3cd965739b8677f6e04913aa535fa156b09e73d (patch)
tree1cb6bb7af7f81fc5ace5ad520408007590336dc5
parentedf947f10074fea27fdb1730524dca59355a1c40 (diff)
downloadlwn-b3cd965739b8677f6e04913aa535fa156b09e73d.tar.gz
lwn-b3cd965739b8677f6e04913aa535fa156b09e73d.zip
myri10ge: small rx_done refactoring
Avoid theoretical race condition regarding accessing dev->features NETIF_F_LRO flag, which is illustrated below. CPU1 CPU2 myri10ge_clean_rx_done(): myri10ge_set_flags(): or myri10ge_set_rx_csum(): if (dev->features & NETIF_F_LRO) setup lro dev->features |= NETIF_F_LRO or dev->features &= ~NETIF_F_LRO; if (dev->features & NETIF_F_LRO) flush lro On the way reduce myri10ge_rx_done() number of arguments and calls by moving mgp->small_bytes check into that function. That reduce code size from: text data bss dec hex filename 36644 248 100 36992 9080 drivers/net/myri10ge/myri10ge.o to: text data bss dec hex filename 36037 247 100 36384 8e20 drivers/net/myri10ge/myri10ge.o on my i686 system, what should also make myri10ge_clean_rx_done() being faster. Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/myri10ge/myri10ge.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 1f4e8680a96a..673dc600c891 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1312,17 +1312,26 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
* page into an skb */
static inline int
-myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
- int bytes, int len, __wsum csum)
+myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
+ int lro_enabled)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
- int i, idx, hlen, remainder;
+ struct myri10ge_rx_buf *rx;
+ int i, idx, hlen, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
+ if (len <= mgp->small_bytes) {
+ rx = &ss->rx_small;
+ bytes = mgp->small_bytes;
+ } else {
+ rx = &ss->rx_big;
+ bytes = mgp->big_bytes;
+ }
+
len += MXGEFW_PAD;
idx = rx->cnt & rx->mask;
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
@@ -1341,7 +1350,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
remainder -= MYRI10GE_ALLOC_SIZE;
}
- if (dev->features & NETIF_F_LRO) {
+ if (lro_enabled) {
rx_frags[0].page_offset += MXGEFW_PAD;
rx_frags[0].size -= MXGEFW_PAD;
len -= MXGEFW_PAD;
@@ -1463,7 +1472,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
{
struct myri10ge_rx_done *rx_done = &ss->rx_done;
struct myri10ge_priv *mgp = ss->mgp;
- struct net_device *netdev = mgp->dev;
+
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
@@ -1474,18 +1483,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
u16 length;
__wsum checksum;
+ /*
+ * Prevent compiler from generating more than one ->features memory
+ * access to avoid theoretical race condition with functions that
+ * change NETIF_F_LRO flag at runtime.
+ */
+ bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum);
- if (length <= mgp->small_bytes)
- rx_ok = myri10ge_rx_done(ss, &ss->rx_small,
- mgp->small_bytes,
- length, checksum);
- else
- rx_ok = myri10ge_rx_done(ss, &ss->rx_big,
- mgp->big_bytes,
- length, checksum);
+ rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
cnt++;
@@ -1497,7 +1506,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
ss->stats.rx_packets += rx_packets;
ss->stats.rx_bytes += rx_bytes;
- if (netdev->features & NETIF_F_LRO)
+ if (lro_enabled)
lro_flush_all(&rx_done->lro_mgr);
/* restock receive rings if needed */