diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2011-01-10 21:18:20 +0000 |
---|---|---|
committer | Ben Hutchings <bhutchings@solarflare.com> | 2011-02-15 19:45:35 +0000 |
commit | 94b274bf5fba6c75b922c8a23ad4b5639a168780 (patch) | |
tree | 48f3bb2629ee14ba620a08098da1908d16bbe22f /drivers/net/sfc/tx.c | |
parent | 525da9072c28df815bff64bf00f3b11ab88face8 (diff) | |
download | lwn-94b274bf5fba6c75b922c8a23ad4b5639a168780.tar.gz lwn-94b274bf5fba6c75b922c8a23ad4b5639a168780.zip |
sfc: Add TX queues for high-priority traffic
Implement the ndo_setup_tc() operation with 2 traffic classes.
Current Solarstorm controllers do not implement TX queue priority, but
they do allow queues to be 'paced' with an enforced delay between
packets. Paced and unpaced queues are scheduled in round-robin within
two separate hardware bins (paced queues with a large delay may be
placed into a third bin temporarily, but we won't use that). If there
are queues in both bins, the TX scheduler will alternate between them.
If we make high-priority queues unpaced and best-effort queues paced,
and high-priority queues are mostly empty, a single high-priority queue
can then instantly take 50% of the packet rate regardless of how many
of the best-effort queues have descriptors outstanding.
We do not actually want an enforced delay between packets on best-
effort queues, so we set the pace value to a reserved value that
actually results in a delay of 0.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r-- | drivers/net/sfc/tx.c | 87 |
1 files changed, 82 insertions, 5 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 7e463fb19fb9..1a51653bb92b 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -336,22 +336,89 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_tx_queue *tx_queue; + unsigned index, type; if (unlikely(efx->port_inhibited)) return NETDEV_TX_BUSY; - tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), - skb->ip_summed == CHECKSUM_PARTIAL ? - EFX_TXQ_TYPE_OFFLOAD : 0); + index = skb_get_queue_mapping(skb); + type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; + if (index >= efx->n_tx_channels) { + index -= efx->n_tx_channels; + type |= EFX_TXQ_TYPE_HIGHPRI; + } + tx_queue = efx_get_tx_queue(efx, index, type); return efx_enqueue_skb(tx_queue, skb); } void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { + struct efx_nic *efx = tx_queue->efx; + /* Must be inverse of queue lookup in efx_hard_start_xmit() */ - tx_queue->core_txq = netdev_get_tx_queue( - tx_queue->efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); + tx_queue->core_txq = + netdev_get_tx_queue(efx->net_dev, + tx_queue->queue / EFX_TXQ_TYPES + + ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + efx->n_tx_channels : 0)); +} + +int efx_setup_tc(struct net_device *net_dev, u8 num_tc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + unsigned tc; + int rc; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) + return -EINVAL; + + if (num_tc == net_dev->num_tc) + return 0; + + for (tc = 0; tc < num_tc; tc++) { + net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; + net_dev->tc_to_txq[tc].count = efx->n_tx_channels; + } + + if (num_tc > net_dev->num_tc) { + /* Initialise high-priority queues as necessary */ + efx_for_each_channel(channel, efx) { + efx_for_each_possible_channel_tx_queue(tx_queue, + channel) { + if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) + continue; + if (!tx_queue->buffer) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + return rc; + } + if (!tx_queue->initialised) + efx_init_tx_queue(tx_queue); + efx_init_tx_queue_core_txq(tx_queue); + } + } + } else { + /* Reduce number of classes before number of queues */ + net_dev->num_tc = num_tc; + } + + rc = netif_set_real_num_tx_queues(net_dev, + max_t(int, num_tc, 1) * + efx->n_tx_channels); + if (rc) + return rc; + + /* Do not destroy high-priority queues when they become + * unused. We would have to flush them first, and it is + * fairly difficult to flush a subset of TX queues. Leave + * it to efx_fini_channels(). + */ + + net_dev->num_tc = num_tc; + return 0; } void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) @@ -437,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); + + tx_queue->initialised = true; } void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) @@ -459,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { + if (!tx_queue->initialised) + return; + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); + tx_queue->initialised = false; + /* Flush TX queue, remove descriptor ring */ efx_nic_fini_tx(tx_queue); @@ -473,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { + if (!tx_queue->buffer) + return; + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); |