diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede')
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_dcbnl.c | 348 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 104 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_main.c | 279 |
5 files changed, 606 insertions, 135 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 06ff90d87572..74a49850d74d 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_QEDE) := qede.o qede-y := qede_main.o qede_ethtool.o +qede-$(CONFIG_DCB) += qede_dcbnl.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 47d6b22252f6..02b06d4e40ae 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -24,7 +24,7 @@ #include <linux/qed/qed_eth_if.h> #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 7 +#define QEDE_MINOR_VERSION 10 #define QEDE_REVISION_VERSION 1 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ @@ -143,6 +143,8 @@ struct qede_dev { struct mutex qede_lock; u32 state; /* Protected by qede_lock */ u16 rx_buf_size; + u32 rx_copybreak; + /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ #define ETH_OVERHEAD (ETH_HLEN + 8 + 8) /* Max supported alignment is 256 (8 shift) @@ -235,6 +237,7 @@ struct qede_rx_queue { u64 rx_hw_errors; u64 rx_alloc_errors; + u64 rx_ip_frags; }; union db_prod { @@ -304,6 +307,9 @@ union qede_reload_args { u16 mtu; }; +#ifdef CONFIG_DCB +void qede_set_dcbnl_ops(struct net_device *ndev); +#endif void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); void qede_reload(struct qede_dev *edev, @@ -329,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define QEDE_MIN_PKT_LEN 64 #define QEDE_RX_HDR_SIZE 256 #define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c new file mode 100644 index 000000000000..03e8c0212433 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c @@ -0,0 +1,348 @@ +/* QLogic qede NIC Driver +* Copyright (c) 2015 QLogic Corporation +* +* This software is available under the terms of the GNU General Public License +* (GPL) Version 2, available from the file COPYING in the main directory of +* this source tree. +*/ + +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <net/dcbnl.h> +#include "qede.h" + +static u8 qede_dcbnl_getstate(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getstate(edev->cdev); +} + +static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setstate(edev->cdev, state); +} + +static void qede_dcbnl_getpermhwaddr(struct net_device *netdev, + u8 *perm_addr) +{ + memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); +} + +static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type, + pgid, bw_pct, up_map); +} + +static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct, + up_map); +} + +static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpfccfg(edev->cdev, prio, setting); +} + +static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->setpfccfg(edev->cdev, prio, setting); +} + +static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getcap(edev->cdev, capid, cap); +} + +static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num); +} + +static u8 qede_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getpfcstate(edev->cdev); +} + +static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getapp(edev->cdev, idtype, id); +} + +static u8 qede_dcbnl_getdcbx(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getdcbx(edev->cdev); +} + +static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid, + bw_pct, up_map); +} + +static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid, + bw_pct, up_map); +} + +static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid, + u8 bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid, + u8 bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct); +} + +static u8 qede_dcbnl_setall(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setall(edev->cdev); +} + +static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num); +} + +static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpfcstate(edev->cdev, state); +} + +static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval, + u8 up) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up); +} + +static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setdcbx(edev->cdev, state); +} + +static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid, + u8 *flags) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags); +} + +static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags); +} + +static int qede_dcbnl_peer_getappinfo(struct net_device *netdev, + struct dcb_peer_app_info *info, + u16 *count) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count); +} + +static int qede_dcbnl_peer_getapptable(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->peer_getapptable(edev->cdev, app); +} + +static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev, + struct cee_pg *pg) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg); +} + +static int qede_dcbnl_ieee_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_setpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getets(edev->cdev, ets); +} + +static int qede_dcbnl_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setets(edev->cdev, ets); +} + +static int qede_dcbnl_ieee_getapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getapp(edev->cdev, app); +} + +static int qede_dcbnl_ieee_setapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setapp(edev->cdev, app); +} + +static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets); +} + +static const struct dcbnl_rtnl_ops qede_dcbnl_ops = { + .ieee_getpfc = qede_dcbnl_ieee_getpfc, + .ieee_setpfc = qede_dcbnl_ieee_setpfc, + .ieee_getets = qede_dcbnl_ieee_getets, + .ieee_setets = qede_dcbnl_ieee_setets, + .ieee_getapp = qede_dcbnl_ieee_getapp, + .ieee_setapp = qede_dcbnl_ieee_setapp, + .getdcbx = qede_dcbnl_getdcbx, + .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc, + .ieee_peer_getets = qede_dcbnl_ieee_peer_getets, + .getstate = qede_dcbnl_getstate, + .setstate = qede_dcbnl_setstate, + .getpermhwaddr = qede_dcbnl_getpermhwaddr, + .getpgtccfgtx = qede_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx, + .getpgtccfgrx = qede_dcbnl_getpgtccfgrx, + .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx, + .getpfccfg = qede_dcbnl_getpfccfg, + .setpfccfg = qede_dcbnl_setpfccfg, + .getcap = qede_dcbnl_getcap, + .getnumtcs = qede_dcbnl_getnumtcs, + .getpfcstate = qede_dcbnl_getpfcstate, + .getapp = qede_dcbnl_getapp, + .getdcbx = qede_dcbnl_getdcbx, + .setpgtccfgtx = qede_dcbnl_setpgtccfgtx, + .setpgtccfgrx = qede_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx, + .setall = qede_dcbnl_setall, + .setnumtcs = qede_dcbnl_setnumtcs, + .setpfcstate = qede_dcbnl_setpfcstate, + .setapp = qede_dcbnl_setapp, + .setdcbx = qede_dcbnl_setdcbx, + .setfeatcfg = qede_dcbnl_setfeatcfg, + .getfeatcfg = qede_dcbnl_getfeatcfg, + .peer_getappinfo = qede_dcbnl_peer_getappinfo, + .peer_getapptable = qede_dcbnl_peer_getapptable, + .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc, + .cee_peer_getpg = qede_dcbnl_cee_peer_getpg, +}; + +void qede_set_dcbnl_ops(struct net_device *dev) +{ + dev->dcbnl_ops = &qede_dcbnl_ops; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index ad3cae3b7243..f8492cac9290 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -37,6 +37,7 @@ static const struct { } qede_rqstats_arr[] = { QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), + QEDE_RQSTAT(rx_ip_frags), }; #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) @@ -426,6 +427,59 @@ static u32 qede_get_link(struct net_device *dev) return current_link.link_up; } +static int qede_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 rxc, txc; + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); + + coal->rx_coalesce_usecs = rxc; + coal->tx_coalesce_usecs = txc; + + return 0; +} + +static int qede_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + int i, rc = 0; + u16 rxc, txc; + u8 sb_id; + + if (!netif_running(dev)) { + DP_INFO(edev, "Interface is down\n"); + return -EINVAL; + } + + if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || + coal->tx_coalesce_usecs > QED_COALESCE_MAX) { + DP_INFO(edev, + "Can't support requested %s coalesce value [max supported value %d]\n", + coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" + : "tx", + QED_COALESCE_MAX); + return -EINVAL; + } + + rxc = (u16)coal->rx_coalesce_usecs; + txc = (u16)coal->tx_coalesce_usecs; + for_each_rss(i) { + sb_id = edev->fp_array[i].sb_info->igu_sb_id; + rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, + (u8)i, sb_id); + if (rc) { + DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); + return rc; + } + } + + return rc; +} + static void qede_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { @@ -910,6 +964,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, memset(first_bd, 0, sizeof(*first_bd)); val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; first_bd->data.bd_flags.bitfields = val; + val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; + first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); /* Map skb linear data for DMA and set in the first BD */ mapping = dma_map_single(&edev->pdev->dev, skb->data, @@ -1129,6 +1185,48 @@ static void qede_self_test(struct net_device *dev, } } +static int qede_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct qede_dev *edev = netdev_priv(dev); + u32 val; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + val = *(u32 *)data; + if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Invalid rx copy break value, range is [%u, %u]", + QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE); + return -EINVAL; + } + + edev->rx_copybreak = *(u32 *)data; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int qede_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = edev->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, @@ -1137,6 +1235,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .set_msglevel = qede_set_msglevel, .nway_reset = qede_nway_reset, .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_pauseparam = qede_get_pauseparam, @@ -1155,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, }; static const struct ethtool_ops qede_vf_ethtool_ops = { @@ -1177,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { .set_rxfh = qede_set_rxfh, .get_channels = qede_get_channels, .set_channels = qede_set_channels, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, }; void qede_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f8e11f953acb..91e7bb0b85c8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -24,12 +24,7 @@ #include <linux/netdev_features.h> #include <linux/udp.h> #include <linux/tcp.h> -#ifdef CONFIG_QEDE_VXLAN -#include <net/vxlan.h> -#endif -#ifdef CONFIG_QEDE_GENEVE -#include <net/geneve.h> -#endif +#include <net/udp_tunnel.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/tcp.h> @@ -490,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, } #endif +static inline void qede_update_tx_producer(struct qede_tx_queue *txq) +{ + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + /* Main transmit function */ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, @@ -548,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "SKB mapping failed\n"); qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } nbd++; @@ -579,8 +593,6 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { - u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; - /* We don't re-calculate IP checksum as it is already done by * the upper stack */ @@ -590,14 +602,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (xmit_type & XMIT_ENC) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - } else { - /* In cases when OS doesn't indicate for inner offloads - * when packet is tunnelled, we need to override the HW - * tunnel configuration so that packets are treated as - * regular non tunnelled packets and no inner offloads - * are done by the hardware. - */ - first_bd->data.bitfields |= cpu_to_le16(temp); + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; } /* If the packet is IPv6 with extension header, indicate that @@ -655,6 +661,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, tx_data_bd = (struct eth_tx_bd *)third_bd; data_split = true; } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; } /* Handle fragmented skb */ @@ -666,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (rc) { qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } @@ -690,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (rc) { qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } } @@ -710,20 +722,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - /* wmb makes sure that the BDs data is updated before updating the - * producer, otherwise FW may read old data from the BDs. - */ - wmb(); - barrier(); - writel(txq->tx_db.raw, txq->doorbell_addr); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); + if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq)) + qede_update_tx_producer(txq); if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { @@ -1357,6 +1357,20 @@ static u8 qede_check_csum(u16 flag) return qede_check_tunn_csum(flag); } +static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, + u16 flag) +{ + u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; + + if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << + ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || + (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) + return true; + + return false; +} + static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_dev *edev = fp->edev; @@ -1435,6 +1449,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) csum_flag = qede_check_csum(parse_flag); if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular, + parse_flag)) { + rxq->rx_ip_frags++; + goto alloc_skb; + } + DP_NOTICE(edev, "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", sw_comp_cons, parse_flag); @@ -1443,6 +1463,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) goto next_cqe; } +alloc_skb: skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, @@ -1453,7 +1474,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) } /* Copy data into SKB */ - if (len + pad <= QEDE_RX_HDR_SIZE) { + if (len + pad <= edev->rx_copybreak) { memcpy(skb_put(skb, len), page_address(data) + pad + sw_rx_data->page_offset, len); @@ -1585,56 +1606,49 @@ next_cqe: /* don't consume bd rx buffer */ static int qede_poll(struct napi_struct *napi, int budget) { - int work_done = 0; struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, - napi); + napi); struct qede_dev *edev = fp->edev; + int rx_work_done = 0; + u8 tc; - while (1) { - u8 tc; - - for (tc = 0; tc < edev->num_tc; tc++) - if (qede_txq_has_work(&fp->txqs[tc])) - qede_tx_int(edev, &fp->txqs[tc]); - - if (qede_has_rx_work(fp->rxq)) { - work_done += qede_rx_int(fp, budget - work_done); - - /* must not complete if we consumed full budget */ - if (work_done >= budget) - break; - } + for (tc = 0; tc < edev->num_tc; tc++) + if (qede_txq_has_work(&fp->txqs[tc])) + qede_tx_int(edev, &fp->txqs[tc]); + + rx_work_done = qede_has_rx_work(fp->rxq) ? + qede_rx_int(fp, budget) : 0; + if (rx_work_done < budget) { + qed_sb_update_sb_idx(fp->sb_info); + /* *_has_*_work() reads the status block, + * thus we need to ensure that status block indices + * have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that + * we won't write the "newer" value of the status block + * to HW (if there was a DMA right after + * qede_has_rx_work and if there is no rmb, the memory + * reading (qed_sb_update_sb_idx) may be postponed + * to right before *_ack_sb). In this case there + * will never be another interrupt until there is + * another update of the status block, while there + * is still unhandled work. + */ + rmb(); /* Fall out from the NAPI loop if needed */ - if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { - qed_sb_update_sb_idx(fp->sb_info); - /* *_has_*_work() reads the status block, - * thus we need to ensure that status block indices - * have been actually read (qed_sb_update_sb_idx) - * prior to this check (*_has_*_work) so that - * we won't write the "newer" value of the status block - * to HW (if there was a DMA right after - * qede_has_rx_work and if there is no rmb, the memory - * reading (qed_sb_update_sb_idx) may be postponed - * to right before *_ack_sb). In this case there - * will never be another interrupt until there is - * another update of the status block, while there - * is still unhandled work. - */ - rmb(); - - if (!(qede_has_rx_work(fp->rxq) || - qede_has_tx_work(fp))) { - napi_complete(napi); - /* Update and reenable interrupts */ - qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, - 1 /*update*/); - break; - } + if (!(qede_has_rx_work(fp->rxq) || + qede_has_tx_work(fp))) { + napi_complete(napi); + + /* Update and reenable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, + 1 /*update*/); + } else { + rx_work_done = budget; } } - return work_done; + return rx_work_done; } static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) @@ -2116,75 +2130,75 @@ int qede_set_features(struct net_device *dev, netdev_features_t features) return 0; } -#ifdef CONFIG_QEDE_VXLAN -static void qede_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - if (edev->vxlan_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return; - edev->vxlan_dst_port = t_port; + edev->vxlan_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + t_port); - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return; -static void qede_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + edev->geneve_dst_port = t_port; - if (t_port != edev->vxlan_dst_port) + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: return; + } - edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif -#ifdef CONFIG_QEDE_GENEVE -static void qede_add_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - if (edev->geneve_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return; - edev->geneve_dst_port = t_port; + edev->vxlan_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + t_port); -static void qede_del_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return; - if (t_port != edev->geneve_dst_port) - return; + edev->geneve_dst_port = 0; - edev->geneve_dst_port = 0; + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, @@ -2208,14 +2222,8 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif -#ifdef CONFIG_QEDE_VXLAN - .ndo_add_vxlan_port = qede_add_vxlan_port, - .ndo_del_vxlan_port = qede_del_vxlan_port, -#endif -#ifdef CONFIG_QEDE_GENEVE - .ndo_add_geneve_port = qede_add_geneve_port, - .ndo_del_geneve_port = qede_del_geneve_port, -#endif + .ndo_udp_tunnel_add = qede_udp_tunnel_add, + .ndo_udp_tunnel_del = qede_udp_tunnel_del, }; /* ------------------------------------------------------------------------- @@ -2505,8 +2513,13 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->register_ops(cdev, &qede_ll_ops, edev); +#ifdef CONFIG_DCB + qede_set_dcbnl_ops(edev->ndev); +#endif + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock); + edev->rx_copybreak = QEDE_RX_HDR_SIZE; DP_INFO(edev, "Ending successfully qede probe\n"); @@ -2823,6 +2836,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@ -2834,6 +2848,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); @@ -2885,9 +2900,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, NUM_TX_BDS_MAX, - sizeof(*p_virt), - &txq->tx_pbl); + sizeof(*p_virt), &txq->tx_pbl); if (rc) goto err; @@ -3578,12 +3593,8 @@ static int qede_open(struct net_device *ndev) if (rc) return rc; -#ifdef CONFIG_QEDE_VXLAN - vxlan_get_rx_port(ndev); -#endif -#ifdef CONFIG_QEDE_GENEVE - geneve_get_rx_port(ndev); -#endif + udp_tunnel_get_rx_info(ndev); + return 0; } |