summaryrefslogtreecommitdiff
path: root/include/linux/netpoll.h
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-14 20:47:49 -0700
committerDavid S. Miller <davem@davemloft.net>2014-03-17 15:47:22 -0400
commitff6076314339e079806d9d2f3de9c9b768e94db1 (patch)
tree1bb96205d18e5b400d342b8acf67386814835a00 /include/linux/netpoll.h
parente97dc3fcf98a32a5eda1e942a36044b95bc58099 (diff)
downloadlwn-ff6076314339e079806d9d2f3de9c9b768e94db1.tar.gz
lwn-ff6076314339e079806d9d2f3de9c9b768e94db1.zip
netpoll: Add netpoll_rx_processing
Add a helper netpoll_rx_processing that reports when netpoll has receive side processing to perform. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netpoll.h')
-rw-r--r--include/linux/netpoll.h18
1 files changed, 14 insertions, 4 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index fbfdb9d8d3a7..479d15c97770 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -82,14 +82,24 @@ static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
local_irq_restore(flags);
}
-
+#ifdef CONFIG_NETPOLL_TRAP
+static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
+{
+ return !list_empty(&npinfo->rx_np);
+}
+#else
+static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
+{
+ return false;
+}
+#endif
#ifdef CONFIG_NETPOLL
static inline bool netpoll_rx_on(struct sk_buff *skb)
{
struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
- return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
+ return npinfo && (netpoll_rx_processing(npinfo) || npinfo->rx_flags);
}
static inline bool netpoll_rx(struct sk_buff *skb)
@@ -105,8 +115,8 @@ static inline bool netpoll_rx(struct sk_buff *skb)
npinfo = rcu_dereference_bh(skb->dev->npinfo);
spin_lock(&npinfo->rx_lock);
- /* check rx_flags again with the lock held */
- if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
+ /* check rx_processing again with the lock held */
+ if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
ret = true;
spin_unlock(&npinfo->rx_lock);