diff options
author | Alexandre Rames <arames@solarflare.com> | 2014-07-22 14:03:25 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-22 19:55:20 -0700 |
commit | 36763266bbe8a2e93a7639b99bac2fee2c42bc5b (patch) | |
tree | 5fdf487376e5f0f8dedb6fa395eb7c150e00b789 | |
parent | 2dbe82d0d11836279703dcf2679e711e18c23eec (diff) | |
download | lwn-36763266bbe8a2e93a7639b99bac2fee2c42bc5b.tar.gz lwn-36763266bbe8a2e93a7639b99bac2fee2c42bc5b.zip |
sfc: Add support for busy polling
This patch adds the sfc driver code for implementing busy polling.
It adds ndo_busy_poll method and locking between it and napi poll.
It also adds each napi to the napi_hash right after netif_napi_add().
Uses efx_start_eventq and efx_stop_eventq in the self tests.
Signed-off-by: Shradha Shah <sshah@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 54 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/efx.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 148 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/selftest.c | 5 |
5 files changed, 208 insertions, 7 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4b80c0be6e57..4cebe9d37816 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -272,6 +272,9 @@ static int efx_poll(struct napi_struct *napi, int budget) struct efx_nic *efx = channel->efx; int spent; + if (!efx_channel_lock_napi(channel)) + return budget; + netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); @@ -311,6 +314,7 @@ static int efx_poll(struct napi_struct *napi, int budget) efx_nic_eventq_read_ack(channel); } + efx_channel_unlock_napi(channel); return spent; } @@ -357,7 +361,7 @@ static int efx_init_eventq(struct efx_channel *channel) } /* Enable event queue processing and NAPI */ -static void efx_start_eventq(struct efx_channel *channel) +void efx_start_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, ifup, channel->efx->net_dev, "chan %d start event queue\n", channel->channel); @@ -366,17 +370,20 @@ static void efx_start_eventq(struct efx_channel *channel) channel->enabled = true; smp_wmb(); + efx_channel_enable(channel); napi_enable(&channel->napi_str); efx_nic_eventq_read_ack(channel); } /* Disable event queue processing and NAPI */ -static void efx_stop_eventq(struct efx_channel *channel) +void efx_stop_eventq(struct efx_channel *channel) { if (!channel->enabled) return; napi_disable(&channel->napi_str); + while (!efx_channel_disable(channel)) + usleep_range(1000, 20000); channel->enabled = false; } @@ -1960,6 +1967,8 @@ static void efx_init_napi_channel(struct efx_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); + napi_hash_add(&channel->napi_str); + efx_channel_init_lock(channel); } static void efx_init_napi(struct efx_nic *efx) @@ -1972,8 +1981,10 @@ static void efx_init_napi(struct efx_nic *efx) static void efx_fini_napi_channel(struct efx_channel *channel) { - if (channel->napi_dev) + if (channel->napi_dev) { netif_napi_del(&channel->napi_str); + napi_hash_del(&channel->napi_str); + } channel->napi_dev = NULL; } @@ -2008,6 +2019,37 @@ static void efx_netpoll(struct net_device *net_dev) #endif +#ifdef CONFIG_NET_RX_BUSY_POLL +static int efx_busy_poll(struct napi_struct *napi) +{ + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; + int budget = 4; + int old_rx_packets, rx_packets; + + if (!netif_running(efx->net_dev)) + return LL_FLUSH_FAILED; + + if (!efx_channel_lock_poll(channel)) + return LL_FLUSH_BUSY; + + old_rx_packets = channel->rx_queue.rx_packets; + efx_process_channel(channel, budget); + + rx_packets = channel->rx_queue.rx_packets - old_rx_packets; + + /* There is no race condition with NAPI here. + * NAPI will automatically be rescheduled if it yielded during busy + * polling, because it was not able to take the lock and thus returned + * the full budget. + */ + efx_channel_unlock_poll(channel); + + return rx_packets; +} +#endif + /************************************************************************** * * Kernel net device interface @@ -2177,6 +2219,9 @@ static const struct net_device_ops efx_farch_netdev_ops = { .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = efx_busy_poll, +#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif @@ -2197,6 +2242,9 @@ static const struct net_device_ops efx_ef10_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = efx_busy_poll, +#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index b41601e052d6..2587c582a821 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -194,6 +194,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, bool rx_may_override_tx); void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, unsigned int *rx_usecs, bool *rx_adaptive); +void efx_stop_eventq(struct efx_channel *channel); +void efx_start_eventq(struct efx_channel *channel); /* Dummy PHY ops for PHY drivers */ int efx_port_dummy_op_int(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index fb2e3bfeb2c2..9ede32064685 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -28,6 +28,7 @@ #include <linux/vmalloc.h> #include <linux/i2c.h> #include <linux/mtd/mtd.h> +#include <net/busy_poll.h> #include "enum.h" #include "bitfield.h" @@ -387,6 +388,8 @@ enum efx_sync_events_state { * @irq_moderation: IRQ moderation value (in hardware ticks) * @napi_dev: Net device used with NAPI * @napi_str: NAPI control structure + * @state: state for NAPI vs busy polling + * @state_lock: lock protecting @state * @eventq: Event queue buffer * @eventq_mask: Event queue pointer mask * @eventq_read_ptr: Event queue read pointer @@ -424,6 +427,22 @@ struct efx_channel { unsigned int irq_moderation; struct net_device *napi_dev; struct napi_struct napi_str; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; + spinlock_t state_lock; +#define EFX_CHANNEL_STATE_IDLE 0 +#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */ +#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */ +#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */ +#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */ +#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */ +#define EFX_CHANNEL_OWNED \ + (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL) +#define EFX_CHANNEL_LOCKED \ + (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED) +#define EFX_CHANNEL_USER_PEND \ + (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD) +#endif /* CONFIG_NET_RX_BUSY_POLL */ struct efx_special_buffer eventq; unsigned int eventq_mask; unsigned int eventq_read_ptr; @@ -457,6 +476,135 @@ struct efx_channel { u32 sync_timestamp_minor; }; +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void efx_channel_init_lock(struct efx_channel *channel) +{ + spin_lock_init(&channel->state_lock); +} + +/* Called from the device poll routine to get ownership of a channel. */ +static inline bool efx_channel_lock_napi(struct efx_channel *channel) +{ + bool rc = true; + + spin_lock_bh(&channel->state_lock); + if (channel->state & EFX_CHANNEL_LOCKED) { + WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); + channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD; + rc = false; + } else { + /* we don't care if someone yielded */ + channel->state = EFX_CHANNEL_STATE_NAPI; + } + spin_unlock_bh(&channel->state_lock); + return rc; +} + +static inline void efx_channel_unlock_napi(struct efx_channel *channel) +{ + spin_lock_bh(&channel->state_lock); + WARN_ON(channel->state & + (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD)); + + channel->state &= EFX_CHANNEL_STATE_DISABLED; + spin_unlock_bh(&channel->state_lock); +} + +/* Called from efx_busy_poll(). */ +static inline bool efx_channel_lock_poll(struct efx_channel *channel) +{ + bool rc = true; + + spin_lock_bh(&channel->state_lock); + if ((channel->state & EFX_CHANNEL_LOCKED)) { + channel->state |= EFX_CHANNEL_STATE_POLL_YIELD; + rc = false; + } else { + /* preserve yield marks */ + channel->state |= EFX_CHANNEL_STATE_POLL; + } + spin_unlock_bh(&channel->state_lock); + return rc; +} + +/* Returns true if NAPI tried to get the channel while it was locked. */ +static inline void efx_channel_unlock_poll(struct efx_channel *channel) +{ + spin_lock_bh(&channel->state_lock); + WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); + + /* will reset state to idle, unless channel is disabled */ + channel->state &= EFX_CHANNEL_STATE_DISABLED; + spin_unlock_bh(&channel->state_lock); +} + +/* True if a socket is polling, even if it did not get the lock. */ +static inline bool efx_channel_busy_polling(struct efx_channel *channel) +{ + WARN_ON(!(channel->state & EFX_CHANNEL_OWNED)); + return channel->state & EFX_CHANNEL_USER_PEND; +} + +static inline void efx_channel_enable(struct efx_channel *channel) +{ + spin_lock_bh(&channel->state_lock); + channel->state = EFX_CHANNEL_STATE_IDLE; + spin_unlock_bh(&channel->state_lock); +} + +/* False if the channel is currently owned. */ +static inline bool efx_channel_disable(struct efx_channel *channel) +{ + bool rc = true; + + spin_lock_bh(&channel->state_lock); + if (channel->state & EFX_CHANNEL_OWNED) + rc = false; + channel->state |= EFX_CHANNEL_STATE_DISABLED; + spin_unlock_bh(&channel->state_lock); + + return rc; +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ + +static inline void efx_channel_init_lock(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_lock_napi(struct efx_channel *channel) +{ + return true; +} + +static inline void efx_channel_unlock_napi(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_lock_poll(struct efx_channel *channel) +{ + return false; +} + +static inline void efx_channel_unlock_poll(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_busy_polling(struct efx_channel *channel) +{ + return false; +} + +static inline void efx_channel_enable(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_disable(struct efx_channel *channel) +{ + return true; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + /** * struct efx_msi_context - Context for each MSI * @efx: The associated NIC diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index a7bb63a7a521..c0ad95d2f63d 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -462,6 +462,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, skb_record_rx_queue(skb, channel->rx_queue.core_index); + skb_mark_napi_id(skb, &channel->napi_str); gro_result = napi_gro_frags(napi); if (gro_result != GRO_DROP) channel->irq_mod_score += 2; @@ -520,6 +521,8 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, /* Move past the ethernet header */ skb->protocol = eth_type_trans(skb, efx->net_dev); + skb_mark_napi_id(skb, &channel->napi_str); + return skb; } @@ -666,7 +669,8 @@ void __efx_rx_packet(struct efx_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb && + !efx_channel_busy_polling(channel)) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 0fc5baef45b1..10b6173d557d 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -188,7 +188,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, schedule_timeout_uninterruptible(wait); efx_for_each_channel(channel, efx) { - napi_disable(&channel->napi_str); + efx_stop_eventq(channel); if (channel->eventq_read_ptr != read_ptr[channel->channel]) { set_bit(channel->channel, &napi_ran); @@ -200,8 +200,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, if (efx_nic_event_test_irq_cpu(channel) >= 0) clear_bit(channel->channel, &int_pend); } - napi_enable(&channel->napi_str); - efx_nic_eventq_read_ack(channel); + efx_start_eventq(channel); } wait *= 2; |