summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorZhu Yanjun <yanjun.zhu@oracle.com>2017-11-10 21:10:00 -0500
committerDavid S. Miller <davem@davemloft.net>2017-11-13 10:40:19 +0900
commit0d728b844c2dd8dd3875ed304eee43967c5d14f6 (patch)
tree37f4a74f34fc52147e5b855c21a1985138c122e0 /drivers/net/ethernet
parent6afce196236c353819526a1c41fffb6d660eb2ef (diff)
downloadlwn-0d728b844c2dd8dd3875ed304eee43967c5d14f6.tar.gz
lwn-0d728b844c2dd8dd3875ed304eee43967c5d14f6.zip
forcedeth: remove redudant assignments in xmit
In xmit process, the variables are set many times. In fact, it is enough for these variables to be set once. After a long time test, the throughput performance is better than before. CC: Srinivas Eeda <srinivas.eeda@oracle.com> CC: Joe Jin <joe.jin@oracle.com> CC: Junxiao Bi <junxiao.bi@oracle.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 31a943860f32..ac8439ceea10 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2226,8 +2226,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the header buffer */
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
@@ -2262,8 +2260,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset = 0;
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
@@ -2304,6 +2300,16 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
} while (frag_size);
}
+ if (unlikely(put_tx == np->first_tx.orig))
+ prev_tx = np->last_tx.orig;
+ else
+ prev_tx = put_tx - 1;
+
+ if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ prev_tx_ctx = np->last_tx_ctx;
+ else
+ prev_tx_ctx = np->put_tx_ctx - 1;
+
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
@@ -2377,8 +2383,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* setup the header buffer */
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
@@ -2414,8 +2418,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset = 0;
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
@@ -2456,6 +2458,16 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
} while (frag_size);
}
+ if (unlikely(put_tx == np->first_tx.ex))
+ prev_tx = np->last_tx.ex;
+ else
+ prev_tx = put_tx - 1;
+
+ if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ prev_tx_ctx = np->last_tx_ctx;
+ else
+ prev_tx_ctx = np->put_tx_ctx - 1;
+
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);