diff options
author | Jakub Kicinski <kuba@kernel.org> | 2024-09-05 20:27:09 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-09-12 17:11:24 -0700 |
commit | 46ae4d0a489741565520195bddebc3414781e603 (patch) | |
tree | b364bb2c6808d4d1ae4e00c46c123caf7fffe618 /drivers/net | |
parent | 3cfb5aa10cb78571e214e48a3a6e42c11d5288a1 (diff) | |
parent | 5abfdfd402699ce7c1e81d1a25bc37f60f7741ff (diff) | |
download | lwn-46ae4d0a489741565520195bddebc3414781e603.tar.gz lwn-46ae4d0a489741565520195bddebc3414781e603.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR.
No conflicts (sort of) and no adjacent changes.
This merge reverts commit b3c9e65eb227 ("net: hsr: remove seqnr_lock")
from net, as it was superseded by
commit 430d67bdcb04 ("net: hsr: Use the seqnr lock for frames received via interlink port.")
in net-next.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/dsa/ocelot/felix_vsc9959.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/faraday/ftgmac100.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 59 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c | 51 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/main.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/qos.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_type.h | 6 | ||||
-rw-r--r-- | drivers/net/phy/dp83822.c | 35 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 95 |
17 files changed, 209 insertions, 122 deletions
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 73d053501fb1..0102a82e88cc 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1474,10 +1474,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, /* Hardware errata - Admin config could not be overwritten if * config is pending, need reset the TAS module */ - val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8); - if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) { - ret = -EBUSY; - goto err_reset_tc; + val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port); + if (val & QSYS_TAG_CONFIG_ENABLE) { + val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8); + if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) { + ret = -EBUSY; + goto err_reset_tc; + } } ocelot_rmw_rix(ocelot, diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h index 63b3e02fab16..4968f6f0bdbc 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.h +++ b/drivers/net/ethernet/faraday/ftgmac100.h @@ -84,7 +84,7 @@ FTGMAC100_INT_RPKT_BUF) /* All the interrupts we care about */ -#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \ +#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \ FTGMAC100_INT_BAD) /* diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 1d0208b5db7d..e15dd3d858df 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2273,12 +2273,12 @@ static netdev_tx_t dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { const int queue_mapping = skb_get_queue_mapping(skb); - bool nonlinear = skb_is_nonlinear(skb); struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; struct netdev_queue *txq; struct dpaa_priv *priv; struct qm_fd fd; + bool nonlinear; int offset = 0; int err = 0; @@ -2288,6 +2288,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) qm_fd_clear_fd(&fd); + /* Packet data is always read as 32-bit words, so zero out any part of + * the skb which might be sent if we have to pad the packet + */ + if (__skb_put_padto(skb, ETH_ZLEN, false)) + goto enomem; + + nonlinear = skb_is_nonlinear(skb); if (!nonlinear) { /* We're going to store the skb backpointer at the beginning * of the data buffer, so we need a privately owned skb diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 570425772b63..06e712cdc3d9 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2433,13 +2433,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int err; - /* The Rx rule will only exist to remove if the LLDP FW - * engine is currently stopped - */ - if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && - !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); - ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); if (err) @@ -2806,6 +2799,14 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_rss_clean(vsi); ice_vsi_close(vsi); + + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && + !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, false); + ice_vsi_decfg(vsi); /* retain SW VSI data structure since it is needed to unregister and diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 59c0e88153c0..eeb48cc48e08 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5351,7 +5351,6 @@ err_load: ice_deinit(pf); err_init: ice_adapter_put(pdev); - pci_disable_device(pdev); return err; } @@ -5459,7 +5458,6 @@ static void ice_remove(struct pci_dev *pdev) ice_set_wake(pf); ice_adapter_put(pdev); - pci_disable_device(pdev); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index fe8847184cb1..79d91e95358c 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3194,7 +3194,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in @@ -3264,7 +3264,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, list_head = &sw->recp_list[recp_id].filt_rules; list_for_each_entry(list_itr, list_head, list_entry) { - if (list_itr->vsi_list_info) { + if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { map_info = list_itr->vsi_list_info; if (test_bit(vsi_handle, map_info->vsi_map)) { *vsi_list_id = map_info->vsi_list_id; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9dc7c60838ed..1ef4cb871452 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -33,6 +33,7 @@ #include <linux/bpf_trace.h> #include <linux/pm_runtime.h> #include <linux/etherdevice.h> +#include <linux/lockdep.h> #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif @@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +/* This function assumes __netif_tx_lock is held by the caller. */ static void igb_xdp_ring_update_tail(struct igb_ring *ring) { + lockdep_assert_held(&txring_txq(ring)->_xmit_lock); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. */ @@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n, nxmit++; } - __netif_tx_unlock(nq); - if (unlikely(flags & XDP_XMIT_FLUSH)) igb_xdp_ring_update_tail(tx_ring); + __netif_tx_unlock(nq); + return nxmit; } @@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { + unsigned int total_bytes = 0, total_packets = 0; struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *rx_ring = q_vector->rx.ring; - struct sk_buff *skb = rx_ring->skb; - unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; + int cpu = smp_processor_id(); unsigned int xdp_xmit = 0; + struct netdev_queue *nq; struct xdp_buff xdp; u32 frame_sz = 0; int rx_buf_pgcnt; @@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (xdp_xmit & IGB_XDP_TX) { struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); igb_xdp_ring_update_tail(tx_ring); + __netif_tx_unlock(nq); } u64_stats_update_begin(&rx_ring->rx_syncp); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 43b1d83686d1..5016ba82e142 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -319,6 +319,7 @@ struct nix_mark_format { /* smq(flush) to tl1 cir/pir info */ struct nix_smq_tree_ctx { + u16 schq; u64 cir_off; u64 cir_val; u64 pir_off; @@ -328,8 +329,6 @@ struct nix_smq_tree_ctx { /* smq flush context */ struct nix_smq_flush_ctx { int smq; - u16 tl1_schq; - u16 tl2_schq; struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT]; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 222f9e00b836..82832a24fbd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, schq = smq; for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; + smq_tree_ctx->schq = schq; if (lvl == NIX_TXSCH_LVL_TL1) { - smq_flush_ctx->tl1_schq = schq; smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); smq_tree_ctx->pir_off = 0; smq_tree_ctx->pir_val = 0; parent_off = 0; } else if (lvl == NIX_TXSCH_LVL_TL2) { - smq_flush_ctx->tl2_schq = schq; smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); parent_off = NIX_AF_TL2X_PARENT(schq); @@ -2301,8 +2300,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, { struct nix_txsch *txsch; struct nix_hw *nix_hw; + int tl2, tl2_schq; u64 regoff; - int tl2; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) @@ -2310,16 +2309,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, /* loop through all TL2s with matching PF_FUNC */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq; for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { /* skip the smq(flush) TL2 */ - if (tl2 == smq_flush_ctx->tl2_schq) + if (tl2 == tl2_schq) continue; /* skip unused TL2s */ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) continue; /* skip if PF_FUNC doesn't match */ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != - (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & + (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] & ~RVU_PFVF_FUNC_MASK))) continue; /* enable/disable XOFF */ @@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, int smq, u16 pcifunc, int nixlf) { struct nix_smq_flush_ctx *smq_flush_ctx; + int err, restore_tx_en = 0, i; int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; - int err, restore_tx_en = 0; - u64 cfg; + u16 tl2_tl3_link_schq; + u8 link, link_level; + u64 cfg, bmap = 0; if (!is_rvu_otx2(rvu)) { /* Skip SMQ flush if pkt count is zero */ @@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); - /* Disable backpressure from physical link, * otherwise SMQ flush may stall. */ rvu_cgx_enadis_rx_bp(rvu, pf, false); + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq; + link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq; + + /* SMQ set enqueue xoff */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + cfg |= BIT_ULL(50); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + + /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */ + for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); + if (!(cfg & BIT_ULL(12))) + continue; + bmap |= (1 << i); + cfg &= ~BIT_ULL(12); + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); + } + + /* Do SMQ flush and set enqueue xoff */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + /* Wait for flush to complete */ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); @@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", nixlf, smq); + /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ + for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { + if (!(bmap & (1 << i))) + continue; + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); + cfg |= BIT_ULL(12); + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); + } + /* clear XOFF on TL2s */ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 4d123dae912c..1966736f98b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -137,6 +137,10 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy, + ETHTOOL_LINK_MODE_100baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy, ETHTOOL_LINK_MODE_10000baseT_Full_BIT); MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy, @@ -202,6 +206,12 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT); MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext, ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 255bc8b749f9..8587cd572da5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -319,7 +319,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) return -EPERM; mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { + if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) { err = -EOPNOTSUPP; goto out; } @@ -339,7 +339,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) if (!mlx5_esw_allowed(esw)) return -EPERM; - if (esw->mode != MLX5_ESWITCH_LEGACY) + if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) return -EOPNOTSUPP; *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 20146a2dc7f4..02a3563f51ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -312,6 +312,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw, return err; } +static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) +{ + switch (type) { + case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_TSAR; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT_TC; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; + } + return false; +} + static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, struct mlx5_vport *vport, u32 max_rate, u32 bw_share) @@ -323,6 +342,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, void *vport_elem; int err; + if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT)) + return -EOPNOTSUPP; + parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); @@ -421,6 +443,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex { u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_esw_rate_group *group; + __be32 *attr; u32 divider; int err; @@ -428,6 +451,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex if (!group) return ERR_PTR(-ENOMEM); + MLX5_SET(scheduling_context, tsar_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); + + attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); + *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); + MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, esw->qos.root_tsar_ix); err = mlx5_create_scheduling_element_cmd(esw->dev, @@ -526,25 +555,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, return err; } -static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) -{ - switch (type) { - case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_TSAR; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT_TC; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; - } - return false; -} - static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; @@ -555,7 +565,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) return -EOPNOTSUPP; - if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) + if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) || + !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR)) return -EOPNOTSUPP; MLX5_SET(scheduling_context, tsar_ctx, element_type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4b88d969a66c..c6e951b8ebdb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -2223,6 +2223,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ + { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c index 8bce730b5c5b..db2bd3ad63ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c @@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP)) + return -EOPNOTSUPP; + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP); @@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; void *attr; + if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) || + !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR)) + return -EOPNOTSUPP; + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 1d57b047817b..b54bffda027b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -426,9 +426,9 @@ enum WX_MSCA_CMD_value { #define WX_MIN_RXD 128 #define WX_MIN_TXD 128 -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8 +/* Number of Transmit and Receive Descriptors must be a multiple of 128 */ +#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 128 +#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ #define VMDQ_P(p) p diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index efeb643c1373..fc247f479257 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -271,8 +271,7 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_ENERGY_DET_INT_EN | DP83822_LINK_QUAL_INT_EN); - /* Private data pointer is NULL on DP83825 */ - if (!dp83822 || !dp83822->fx_enabled) + if (!dp83822->fx_enabled) misr_status |= DP83822_ANEG_COMPLETE_INT_EN | DP83822_DUP_MODE_CHANGE_INT_EN | DP83822_SPEED_CHANGED_INT_EN; @@ -292,8 +291,7 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_PAGE_RX_INT_EN | DP83822_EEE_ERROR_CHANGE_INT_EN); - /* Private data pointer is NULL on DP83825 */ - if (!dp83822 || !dp83822->fx_enabled) + if (!dp83822->fx_enabled) misr_status |= DP83822_ANEG_ERR_INT_EN | DP83822_WOL_PKT_INT_EN; @@ -691,10 +689,9 @@ static int dp83822_read_straps(struct phy_device *phydev) return 0; } -static int dp83822_probe(struct phy_device *phydev) +static int dp8382x_probe(struct phy_device *phydev) { struct dp83822_private *dp83822; - int ret; dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822), GFP_KERNEL); @@ -703,6 +700,20 @@ static int dp83822_probe(struct phy_device *phydev) phydev->priv = dp83822; + return 0; +} + +static int dp83822_probe(struct phy_device *phydev) +{ + struct dp83822_private *dp83822; + int ret; + + ret = dp8382x_probe(phydev); + if (ret) + return ret; + + dp83822 = phydev->priv; + ret = dp83822_read_straps(phydev); if (ret) return ret; @@ -717,14 +728,11 @@ static int dp83822_probe(struct phy_device *phydev) static int dp83826_probe(struct phy_device *phydev) { - struct dp83822_private *dp83822; - - dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822), - GFP_KERNEL); - if (!dp83822) - return -ENOMEM; + int ret; - phydev->priv = dp83822; + ret = dp8382x_probe(phydev); + if (ret) + return ret; dp83826_of_init(phydev); @@ -795,6 +803,7 @@ static int dp83822_resume(struct phy_device *phydev) PHY_ID_MATCH_MODEL(_id), \ .name = (_name), \ /* PHY_BASIC_FEATURES */ \ + .probe = dp8382x_probe, \ .soft_reset = dp83822_phy_reset, \ .config_init = dp8382x_config_init, \ .get_wol = dp83822_get_wol, \ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 61ffa8851d7b..6f4781ec2b36 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -356,6 +356,9 @@ struct receive_queue { struct xdp_rxq_info xsk_rxq_info; struct xdp_buff **xsk_buffs; + + /* Do dma by self */ + bool do_dma; }; /* This structure can contain rss message with maximum settings for indirection table and keysize @@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) void *buf; buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); - if (buf) + if (buf && rq->do_dma) virtnet_rq_unmap(rq, buf, *len); return buf; @@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) u32 offset; void *head; + if (!rq->do_dma) { + sg_init_one(rq->sg, buf, len); + return; + } + head = page_address(rq->alloc_frag.page); offset = buf - head; @@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) head = page_address(alloc_frag->page); - dma = head; + if (rq->do_dma) { + dma = head; + + /* new pages */ + if (!alloc_frag->offset) { + if (rq->last_dma) { + /* Now, the new page is allocated, the last dma + * will not be used. So the dma can be unmapped + * if the ref is 0. + */ + virtnet_rq_unmap(rq, rq->last_dma, 0); + rq->last_dma = NULL; + } - /* new pages */ - if (!alloc_frag->offset) { - if (rq->last_dma) { - /* Now, the new page is allocated, the last dma - * will not be used. So the dma can be unmapped - * if the ref is 0. - */ - virtnet_rq_unmap(rq, rq->last_dma, 0); - rq->last_dma = NULL; - } + dma->len = alloc_frag->size - sizeof(*dma); - dma->len = alloc_frag->size - sizeof(*dma); + addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, + dma->len, DMA_FROM_DEVICE, 0); + if (virtqueue_dma_mapping_error(rq->vq, addr)) + return NULL; - addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, - dma->len, DMA_FROM_DEVICE, 0); - if (virtqueue_dma_mapping_error(rq->vq, addr)) - return NULL; + dma->addr = addr; + dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); - dma->addr = addr; - dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); + /* Add a reference to dma to prevent the entire dma from + * being released during error handling. This reference + * will be freed after the pages are no longer used. + */ + get_page(alloc_frag->page); + dma->ref = 1; + alloc_frag->offset = sizeof(*dma); - /* Add a reference to dma to prevent the entire dma from - * being released during error handling. This reference - * will be freed after the pages are no longer used. - */ - get_page(alloc_frag->page); - dma->ref = 1; - alloc_frag->offset = sizeof(*dma); + rq->last_dma = dma; + } - rq->last_dma = dma; + ++dma->ref; } - ++dma->ref; - buf = head + alloc_frag->offset; get_page(alloc_frag->page); @@ -967,19 +977,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) return buf; } -static void virtnet_rq_set_premapped(struct virtnet_info *vi) -{ - int i; - - /* disable for big mode */ - if (!vi->mergeable_rx_bufs && vi->big_packets) - return; - - for (i = 0; i < vi->max_queue_pairs; i++) - /* error should never happen */ - BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq)); -} - static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) { struct virtnet_info *vi = vq->vdev->priv; @@ -993,7 +990,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) return; } - if (!vi->big_packets || vi->mergeable_rx_bufs) + if (rq->do_dma) virtnet_rq_unmap(rq, buf, 0); virtnet_rq_free_buf(vi, rq, buf); @@ -2430,7 +2427,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { - virtnet_rq_unmap(rq, buf, 0); + if (rq->do_dma) + virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); } @@ -2544,7 +2542,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, ctx = mergeable_len_to_ctx(len + room, headroom); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { - virtnet_rq_unmap(rq, buf, 0); + if (rq->do_dma) + virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); } @@ -2701,7 +2700,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi, } } else { while (packets < budget && - (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { + (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats); packets++; } @@ -5911,7 +5910,7 @@ static void free_receive_page_frags(struct virtnet_info *vi) int i; for (i = 0; i < vi->max_queue_pairs; i++) if (vi->rq[i].alloc_frag.page) { - if (vi->rq[i].last_dma) + if (vi->rq[i].do_dma && vi->rq[i].last_dma) virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); put_page(vi->rq[i].alloc_frag.page); } @@ -6109,8 +6108,6 @@ static int init_vqs(struct virtnet_info *vi) if (ret) goto err_free; - virtnet_rq_set_premapped(vi); - cpus_read_lock(); virtnet_set_affinity(vi); cpus_read_unlock(); |